NetApp cDOT: Add Intra-Vserver migration support

Add migration support in the cDOT driver.

Change-Id: I72e045b1c978b752f38cc3018cb2a7084e3f3e36
Implements: blueprint netapp-cdot-driver-optimized-migration
This commit is contained in:
Goutham Pacha Ravi 2016-08-21 15:23:35 -04:00 committed by Goutham Pacha Ravi
parent e8caebb457
commit 20e89b9691
16 changed files with 1184 additions and 17 deletions

View File

@ -141,7 +141,7 @@ _global_opt_lists = [
manila.share.drivers.netapp.options.netapp_transport_opts,
manila.share.drivers.netapp.options.netapp_basicauth_opts,
manila.share.drivers.netapp.options.netapp_provisioning_opts,
manila.share.drivers.netapp.options.netapp_replication_opts,
manila.share.drivers.netapp.options.netapp_data_motion_opts,
manila.share.drivers.nexenta.options.nexenta_connection_opts,
manila.share.drivers.nexenta.options.nexenta_dataset_opts,
manila.share.drivers.nexenta.options.nexenta_nfs_opts,

View File

@ -38,6 +38,8 @@ EVOLUMEOFFLINE = '13042'
EINTERNALERROR = '13114'
EDUPLICATEENTRY = '13130'
EVOLNOTCLONE = '13170'
EVOLMOVE_CANNOT_MOVE_TO_CFO = '13633'
EAGGRDOESNOTEXIST = '14420'
EVOL_NOT_MOUNTED = '14716'
ESIS_CLONE_NOT_LICENSED = '14956'
EOBJECTNOTFOUND = '15661'

View File

@ -36,6 +36,12 @@ LOG = log.getLogger(__name__)
DELETED_PREFIX = 'deleted_manila_'
DEFAULT_IPSPACE = 'Default'
DEFAULT_MAX_PAGE_LENGTH = 50
CUTOVER_ACTION_MAP = {
'defer': 'defer_on_failure',
'abort': 'abort_on_failure',
'force': 'force',
'wait': 'wait',
}
class NetAppCmodeClient(client_base.NetAppBaseClient):
@ -3257,3 +3263,117 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
has_snapmirrors = False
return has_snapmirrors
@na_utils.trace
def start_volume_move(self, volume_name, vserver, destination_aggregate,
cutover_action='wait'):
"""Moves a FlexVol across Vserver aggregates.
Requires cluster-scoped credentials.
"""
self._send_volume_move_request(
volume_name, vserver, destination_aggregate,
cutover_action=cutover_action)
@na_utils.trace
def check_volume_move(self, volume_name, vserver, destination_aggregate):
"""Moves a FlexVol across Vserver aggregates.
Requires cluster-scoped credentials.
"""
self._send_volume_move_request(
volume_name, vserver, destination_aggregate, validation_only=True)
@na_utils.trace
def _send_volume_move_request(self, volume_name, vserver,
destination_aggregate,
cutover_action='wait',
validation_only=False):
"""Send request to check if vol move is possible, or start it.
:param volume_name: Name of the FlexVol to be moved.
:param destination_aggregate: Name of the destination aggregate
:param cutover_action: can have one of ['force', 'defer', 'abort',
'wait']. 'force' will force a cutover despite errors (causing
possible client disruptions), 'wait' will wait for cutover to be
triggered manually. 'abort' will rollback move on errors on
cutover, 'defer' will attempt a cutover, but wait for manual
intervention in case of errors.
:param validation_only: If set to True, only validates if the volume
move is possible, does not trigger data copy.
"""
api_args = {
'source-volume': volume_name,
'vserver': vserver,
'dest-aggr': destination_aggregate,
'cutover-action': CUTOVER_ACTION_MAP[cutover_action],
}
if validation_only:
api_args['perform-validation-only'] = 'true'
self.send_request('volume-move-start', api_args)
@na_utils.trace
def abort_volume_move(self, volume_name, vserver):
"""Aborts an existing volume move operation."""
api_args = {
'source-volume': volume_name,
'vserver': vserver,
}
self.send_request('volume-move-trigger-abort', api_args)
@na_utils.trace
def trigger_volume_move_cutover(self, volume_name, vserver, force=True):
"""Triggers the cut-over for a volume in data motion."""
api_args = {
'source-volume': volume_name,
'vserver': vserver,
'force': 'true' if force else 'false',
}
self.send_request('volume-move-trigger-cutover', api_args)
@na_utils.trace
def get_volume_move_status(self, volume_name, vserver):
"""Gets the current state of a volume move operation."""
api_args = {
'query': {
'volume-move-info': {
'volume': volume_name,
'vserver': vserver,
},
},
'desired-attributes': {
'volume-move-info': {
'percent-complete': None,
'estimated-completion-time': None,
'state': None,
'details': None,
'cutover-action': None,
'phase': None,
},
},
}
result = self.send_iter_request('volume-move-get-iter', api_args)
if not self._has_records(result):
msg = _("Volume %(vol)s in Vserver %(server)s is not part of any "
"data motion operations.")
msg_args = {'vol': volume_name, 'server': vserver}
raise exception.NetAppException(msg % msg_args)
attributes_list = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
volume_move_info = attributes_list.get_child_by_name(
'volume-move-info') or netapp_api.NaElement('none')
status_info = {
'percent-complete': volume_move_info.get_child_content(
'percent-complete'),
'estimated-completion-time': volume_move_info.get_child_content(
'estimated-completion-time'),
'state': volume_move_info.get_child_content('state'),
'details': volume_move_info.get_child_content('details'),
'cutover-action': volume_move_info.get_child_content(
'cutover-action'),
'phase': volume_move_info.get_child_content('phase'),
}
return status_info

View File

@ -44,8 +44,9 @@ def get_backend_configuration(backend_name):
config_stanzas = CONF.list_all_sections()
if backend_name not in config_stanzas:
msg = _("Could not find backend stanza %(backend_name)s in "
"configuration which is required for replication with "
"the backend. Available stanzas are %(stanzas)s")
"configuration which is required for replication or migration "
"workflows with the source backend. Available stanzas are "
"%(stanzas)s")
params = {
"stanzas": config_stanzas,
"backend_name": backend_name,
@ -60,7 +61,7 @@ def get_backend_configuration(backend_name):
config.append_config_values(na_opts.netapp_transport_opts)
config.append_config_values(na_opts.netapp_support_opts)
config.append_config_values(na_opts.netapp_provisioning_opts)
config.append_config_values(na_opts.netapp_replication_opts)
config.append_config_values(na_opts.netapp_data_motion_opts)
return config

View File

@ -164,3 +164,53 @@ class NetAppCmodeMultiSvmShareDriver(driver.ShareDriver):
share_replica, replica_snapshots,
replica_snapshot, share_server=None):
raise NotImplementedError()
def migration_check_compatibility(self, context, source_share,
destination_share, share_server=None,
destination_share_server=None):
return self.library.migration_check_compatibility(
context, source_share, destination_share,
share_server=share_server,
destination_share_server=destination_share_server)
def migration_start(self, context, source_share, destination_share,
source_snapshots, snapshot_mappings,
share_server=None, destination_share_server=None):
return self.library.migration_start(
context, source_share, destination_share,
source_snapshots, snapshot_mappings,
share_server=share_server,
destination_share_server=destination_share_server)
def migration_continue(self, context, source_share, destination_share,
source_snapshots, snapshot_mappings,
share_server=None, destination_share_server=None):
return self.library.migration_continue(
context, source_share, destination_share,
source_snapshots, snapshot_mappings, share_server=share_server,
destination_share_server=destination_share_server)
def migration_get_progress(self, context, source_share,
destination_share, source_snapshots,
snapshot_mappings, share_server=None,
destination_share_server=None):
return self.library.migration_get_progress(
context, source_share, destination_share,
source_snapshots, snapshot_mappings, share_server=share_server,
destination_share_server=destination_share_server)
def migration_cancel(self, context, source_share, destination_share,
source_snapshots, snapshot_mappings,
share_server=None, destination_share_server=None):
return self.library.migration_cancel(
context, source_share, destination_share,
source_snapshots, snapshot_mappings, share_server=share_server,
destination_share_server=destination_share_server)
def migration_complete(self, context, source_share, destination_share,
source_snapshots, snapshot_mappings,
share_server=None, destination_share_server=None):
return self.library.migration_complete(
context, source_share, destination_share,
source_snapshots, snapshot_mappings, share_server=share_server,
destination_share_server=destination_share_server)

View File

@ -179,3 +179,52 @@ class NetAppCmodeSingleSvmShareDriver(driver.ShareDriver):
return self.library.update_replicated_snapshot(
replica_list, share_replica, replica_snapshots, replica_snapshot,
share_server=share_server)
def migration_check_compatibility(self, context, source_share,
destination_share, share_server=None,
destination_share_server=None):
return self.library.migration_check_compatibility(
context, source_share, destination_share,
share_server=share_server,
destination_share_server=destination_share_server)
def migration_start(self, context, source_share, destination_share,
source_snapshots, snapshot_mappings,
share_server=None, destination_share_server=None):
return self.library.migration_start(
context, source_share, destination_share,
source_snapshots, snapshot_mappings, share_server=share_server,
destination_share_server=destination_share_server)
def migration_continue(self, context, source_share, destination_share,
source_snapshots, snapshot_mappings,
share_server=None, destination_share_server=None):
return self.library.migration_continue(
context, source_share, destination_share,
source_snapshots, snapshot_mappings, share_server=share_server,
destination_share_server=destination_share_server)
def migration_get_progress(self, context, source_share,
destination_share, source_snapshots,
snapshot_mappings, share_server=None,
destination_share_server=None):
return self.library.migration_get_progress(
context, source_share, destination_share,
source_snapshots, snapshot_mappings, share_server=share_server,
destination_share_server=destination_share_server)
def migration_cancel(self, context, source_share, destination_share,
source_snapshots, snapshot_mappings,
share_server=None, destination_share_server=None):
return self.library.migration_cancel(
context, source_share, destination_share,
source_snapshots, snapshot_mappings, share_server=share_server,
destination_share_server=destination_share_server)
def migration_complete(self, context, source_share, destination_share,
source_snapshots, snapshot_mappings,
share_server=None, destination_share_server=None):
return self.library.migration_complete(
context, source_share, destination_share,
source_snapshots, snapshot_mappings, share_server=share_server,
destination_share_server=destination_share_server)

View File

@ -44,6 +44,7 @@ from manila.share.drivers.netapp import options as na_opts
from manila.share.drivers.netapp import utils as na_utils
from manila.share import share_types
from manila.share import utils as share_utils
from manila import utils as manila_utils
LOG = log.getLogger(__name__)
CONF = cfg.CONF
@ -96,7 +97,7 @@ class NetAppCmodeFileStorageLibrary(object):
self.configuration.append_config_values(
na_opts.netapp_provisioning_opts)
self.configuration.append_config_values(
na_opts.netapp_replication_opts)
na_opts.netapp_data_motion_opts)
self._licenses = []
self._client = None
@ -675,7 +676,8 @@ class NetAppCmodeFileStorageLibrary(object):
vserver_client.delete_volume(share_name)
@na_utils.trace
def _create_export(self, share, share_server, vserver, vserver_client):
def _create_export(self, share, share_server, vserver, vserver_client,
clear_current_export_policy=True):
"""Creates NAS storage."""
helper = self._get_helper(share)
helper.set_client(vserver_client)
@ -695,7 +697,9 @@ class NetAppCmodeFileStorageLibrary(object):
share, share_server, interfaces)
# Create the share and get a callback for generating export locations
callback = helper.create_share(share, share_name)
callback = helper.create_share(
share, share_name,
clear_current_export_policy=clear_current_export_policy)
# Generate export locations using addresses, metadata and callback
export_locations = [
@ -1618,3 +1622,275 @@ class NetAppCmodeFileStorageLibrary(object):
except netapp_api.NaApiError as e:
if e.code != netapp_api.EOBJECTNOTFOUND:
raise
def _check_destination_vserver_for_vol_move(self, source_share,
source_vserver,
dest_share_server):
try:
destination_vserver, __ = self._get_vserver(
share_server=dest_share_server)
except exception.InvalidParameterValue:
destination_vserver = None
if source_vserver != destination_vserver:
msg = _("Cannot migrate %(shr)s efficiently from source "
"VServer %(src)s to destination VServer %(dest)s.")
msg_args = {
'shr': source_share['id'],
'src': source_vserver,
'dest': destination_vserver,
}
raise exception.NetAppException(msg % msg_args)
def migration_check_compatibility(self, context, source_share,
destination_share, share_server=None,
destination_share_server=None):
"""Checks compatibility between self.host and destination host."""
# We need cluster creds to perform an intra-cluster data motion
compatible = False
destination_host = destination_share['host']
if self._have_cluster_creds:
try:
backend = share_utils.extract_host(
destination_host, level='backend_name')
data_motion.get_backend_configuration(backend)
source_vserver, __ = self._get_vserver(
share_server=share_server)
share_volume = self._get_backend_share_name(
source_share['id'])
destination_aggregate = share_utils.extract_host(
destination_host, level='pool')
self._check_destination_vserver_for_vol_move(
source_share, source_vserver, destination_share_server)
self._client.check_volume_move(
share_volume, source_vserver, destination_aggregate)
except Exception:
msg = _LE("Cannot migrate share %(shr)s efficiently between "
"%(src)s and %(dest)s.")
msg_args = {
'shr': source_share['id'],
'src': source_share['host'],
'dest': destination_host,
}
LOG.exception(msg, msg_args)
else:
compatible = True
else:
msg = _LW("Cluster credentials have not been configured "
"with this share driver. Cannot perform volume move "
"operations.")
LOG.warning(msg)
compatibility = {
'compatible': compatible,
'writable': compatible,
'nondisruptive': compatible,
'preserve_metadata': compatible,
'preserve_snapshots': compatible,
}
return compatibility
def migration_start(self, context, source_share, destination_share,
source_snapshots, snapshot_mappings,
share_server=None, destination_share_server=None):
"""Begins data motion from source_share to destination_share."""
# Intra-cluster migration
vserver, vserver_client = self._get_vserver(share_server=share_server)
share_volume = self._get_backend_share_name(source_share['id'])
destination_aggregate = share_utils.extract_host(
destination_share['host'], level='pool')
self._client.start_volume_move(
share_volume, vserver, destination_aggregate)
msg = _LI("Began volume move operation of share %(shr)s from %(src)s "
"to %(dest)s.")
msg_args = {
'shr': source_share['id'],
'src': source_share['host'],
'dest': destination_share['host'],
}
LOG.info(msg, msg_args)
def _get_volume_move_status(self, source_share, share_server):
vserver, vserver_client = self._get_vserver(share_server=share_server)
share_volume = self._get_backend_share_name(source_share['id'])
status = self._client.get_volume_move_status(share_volume, vserver)
return status
def migration_continue(self, context, source_share, destination_share,
source_snapshots, snapshot_mappings,
share_server=None, destination_share_server=None):
"""Check progress of migration, try to repair data motion errors."""
status = self._get_volume_move_status(source_share, share_server)
completed_phases = (
'cutover_hard_deferred', 'cutover_soft_deferred', 'completed')
move_phase = status['phase'].lower()
if move_phase == 'failed':
msg_args = {
'shr': source_share['id'],
'reason': status['details'],
}
msg = _("Volume move operation for share %(shr)s failed. Reason: "
"%(reason)s") % msg_args
LOG.exception(msg)
raise exception.NetAppException(msg)
elif move_phase in completed_phases:
return True
return False
def migration_get_progress(self, context, source_share,
destination_share, source_snapshots,
snapshot_mappings, share_server=None,
destination_share_server=None):
"""Return detailed progress of the migration in progress."""
status = self._get_volume_move_status(source_share, share_server)
# NOTE (gouthamr): If the volume move is waiting for a manual
# intervention to cut-over, the copy is done with respect to the
# user. Volume move copies the rest of the data before cut-over anyway.
if status['phase'] in ('cutover_hard_deferred',
'cutover_soft_deferred'):
status['percent-complete'] = 100
msg = _LI("Volume move status for share %(share)s: (State) %(state)s. "
"(Phase) %(phase)s. Details: %(details)s")
msg_args = {
'state': status['state'],
'details': status['details'],
'share': source_share['id'],
'phase': status['phase'],
}
LOG.info(msg, msg_args)
return {
'total_progress': status['percent-complete'] or 0,
'state': status['state'],
'estimated_completion_time': status['estimated-completion-time'],
'phase': status['phase'],
'details': status['details'],
}
def migration_cancel(self, context, source_share, destination_share,
source_snapshots, snapshot_mappings,
share_server=None, destination_share_server=None):
"""Abort an ongoing migration."""
vserver, vserver_client = self._get_vserver(share_server=share_server)
share_volume = self._get_backend_share_name(source_share['id'])
try:
self._get_volume_move_status(source_share, share_server)
except exception.NetAppException:
LOG.exception(_LE("Could not get volume move status."))
return
self._client.abort_volume_move(share_volume, vserver)
msg = _LI("Share volume move operation for share %(shr)s from host "
"%(src)s to %(dest)s was successfully aborted.")
msg_args = {
'shr': source_share['id'],
'src': source_share['host'],
'dest': destination_share['host'],
}
LOG.info(msg, msg_args)
def migration_complete(self, context, source_share, destination_share,
source_snapshots, snapshot_mappings,
share_server=None, destination_share_server=None):
"""Initiate the cutover to destination share after move is complete."""
vserver, vserver_client = self._get_vserver(share_server=share_server)
share_volume = self._get_backend_share_name(source_share['id'])
status = self._get_volume_move_status(source_share, share_server)
move_phase = status['phase'].lower()
if move_phase == 'completed':
LOG.debug("Volume move operation was already successfully "
"completed for share %(shr)s.",
{'shr': source_share['id']})
elif move_phase in ('cutover_hard_deferred', 'cutover_soft_deferred'):
self._client.trigger_volume_move_cutover(share_volume, vserver)
self._wait_for_cutover_completion(
source_share, share_server)
else:
msg_args = {
'shr': source_share['id'],
'status': status['state'],
'phase': status['phase'],
'details': status['details'],
}
msg = _("Cannot complete volume move operation for share %(shr)s. "
"Current volume move status: %(status)s, phase: "
"%(phase)s. Details: %(details)s") % msg_args
LOG.exception(msg)
raise exception.NetAppException(msg)
new_share_volume_name = self._get_backend_share_name(
destination_share['id'])
vserver_client.set_volume_name(share_volume, new_share_volume_name)
msg = _LI("Volume move operation for share %(shr)s has completed "
"successfully. Share has been moved from %(src)s to "
"%(dest)s.")
msg_args = {
'shr': source_share['id'],
'src': source_share['host'],
'dest': destination_share['host'],
}
LOG.info(msg, msg_args)
# NOTE(gouthamr): For nondisruptive migration, current export
# policy will not be cleared, the export policy will be renamed to
# match the name of the share.
export_locations = self._create_export(
destination_share, share_server, vserver, vserver_client,
clear_current_export_policy=False)
src_snaps_dict = {s['id']: s for s in source_snapshots}
snapshot_updates = {}
for source_snap_id, destination_snap in snapshot_mappings.items():
p_location = src_snaps_dict[source_snap_id]['provider_location']
snapshot_updates.update(
{destination_snap['id']: {'provider_location': p_location}})
return {
'export_locations': export_locations,
'snapshot_updates': snapshot_updates,
}
def _wait_for_cutover_completion(self, source_share, share_server):
retries = (self.configuration.netapp_volume_move_cutover_timeout / 5
or 1)
@manila_utils.retry(exception.ShareBusyException, interval=5,
retries=retries, backoff_rate=1)
def check_move_completion():
status = self._get_volume_move_status(source_share, share_server)
if status['phase'].lower() != 'completed':
msg_args = {
'shr': source_share['id'],
'phs': status['phase'],
}
msg = _('Volume move operation for share %(shr)s is not '
'complete. Current Phase: %(phs)s. '
'Retrying.') % msg_args
LOG.warning(msg)
raise exception.ShareBusyException(reason=msg)
try:
check_move_completion()
except exception.ShareBusyException:
msg = _("Volume move operation did not complete after cut-over "
"was triggered. Retries exhausted. Not retrying.")
raise exception.NetAppException(message=msg)

View File

@ -28,10 +28,12 @@ class NetAppCmodeCIFSHelper(base.NetAppBaseHelper):
"""NetApp cDOT CIFS protocol helper class."""
@na_utils.trace
def create_share(self, share, share_name):
def create_share(self, share, share_name,
clear_current_export_policy=True):
"""Creates CIFS share on Data ONTAP Vserver."""
self._client.create_cifs_share(share_name)
self._client.remove_cifs_share_access(share_name, 'Everyone')
if clear_current_export_policy:
self._client.remove_cifs_share_access(share_name, 'Everyone')
# Return a callback that may be used for generating export paths
# for this share.

View File

@ -35,9 +35,11 @@ class NetAppCmodeNFSHelper(base.NetAppBaseHelper):
"""NetApp cDOT NFS protocol helper class."""
@na_utils.trace
def create_share(self, share, share_name):
def create_share(self, share, share_name,
clear_current_export_policy=True):
"""Creates NFS share."""
self._client.clear_nfs_export_policy_for_volume(share_name)
if clear_current_export_policy:
self._client.clear_nfs_export_policy_for_volume(share_name)
self._ensure_export_policy(share, share_name)
export_path = self._client.get_volume_junction_path(share_name)

View File

@ -113,13 +113,19 @@ netapp_support_opts = [
'trace info is written to the debug logs. Values '
'include method and api.')), ]
netapp_replication_opts = [
netapp_data_motion_opts = [
cfg.IntOpt('netapp_snapmirror_quiesce_timeout',
min=0,
default=3600, # One Hour
help='The maximum time in seconds to wait for existing '
'snapmirror transfers to complete before aborting when '
'promoting a replica.'), ]
'promoting a replica.'),
cfg.IntOpt('netapp_volume_move_cutover_timeout',
min=0,
default=3600, # One Hour,
help='The maximum time in seconds to wait for the completion '
'of a volume move operation after the cutover '
'was triggered.'), ]
CONF = cfg.CONF
CONF.register_opts(netapp_proxy_opts)
@ -128,4 +134,4 @@ CONF.register_opts(netapp_transport_opts)
CONF.register_opts(netapp_basicauth_opts)
CONF.register_opts(netapp_provisioning_opts)
CONF.register_opts(netapp_support_opts)
CONF.register_opts(netapp_replication_opts)
CONF.register_opts(netapp_data_motion_opts)

View File

@ -2115,6 +2115,27 @@ SNAPMIRROR_INITIALIZE_RESULT = etree.XML("""
</results>
""")
VOLUME_MOVE_GET_ITER_RESULT = etree.XML("""
<results status="passed">
<attributes-list>
<volume-move-info>
<cutover-action>retry_on_failure</cutover-action>
<details>Cutover Completed::Volume move job finishing move</details>
<estimated-completion-time>1481919246</estimated-completion-time>
<percent-complete>82</percent-complete>
<phase>finishing</phase>
<state>healthy</state>
<volume>%(volume)s</volume>
<vserver>%(vserver)s</vserver>
</volume-move-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'volume': SHARE_NAME,
'vserver': VSERVER_NAME,
})
PERF_OBJECT_COUNTER_TOTAL_CP_MSECS_LABELS = [
'SETUP', 'PRE_P0', 'P0_SNAP_DEL', 'P1_CLEAN', 'P1_QUOTA', 'IPU_DISK_ADD',
'P2V_INOFILE', 'P2V_INO_PUB', 'P2V_INO_PRI', 'P2V_FSINFO', 'P2V_DLOG1',

View File

@ -5538,3 +5538,124 @@ class NetAppClientCmodeTestCase(test.TestCase):
mock_get_snapmirrors_call.assert_has_calls(
expected_get_snapmirrors_calls)
self.assertTrue(mock_exc_log.called)
@ddt.data('start_volume_move', 'check_volume_move')
def test_volume_move_method(self, method_name):
method = getattr(self.client, method_name)
self.mock_object(self.client, 'send_request')
retval = method(fake.SHARE_NAME, fake.VSERVER_NAME,
fake.SHARE_AGGREGATE_NAME)
expected_api_args = {
'source-volume': fake.SHARE_NAME,
'vserver': fake.VSERVER_NAME,
'dest-aggr': fake.SHARE_AGGREGATE_NAME,
'cutover-action': 'wait',
}
if method_name.startswith('check'):
expected_api_args['perform-validation-only'] = 'true'
self.assertIsNone(retval)
self.client.send_request.assert_called_once_with(
'volume-move-start', expected_api_args)
def test_abort_volume_move(self):
self.mock_object(self.client, 'send_request')
retval = self.client.abort_volume_move(
fake.SHARE_NAME, fake.VSERVER_NAME)
expected_api_args = {
'source-volume': fake.SHARE_NAME,
'vserver': fake.VSERVER_NAME,
}
self.assertIsNone(retval)
self.client.send_request.assert_called_once_with(
'volume-move-trigger-abort', expected_api_args)
@ddt.data(True, False)
def test_trigger_volume_move_cutover_force(self, forced):
self.mock_object(self.client, 'send_request')
retval = self.client.trigger_volume_move_cutover(
fake.SHARE_NAME, fake.VSERVER_NAME, force=forced)
expected_api_args = {
'source-volume': fake.SHARE_NAME,
'vserver': fake.VSERVER_NAME,
'force': 'true' if forced else 'false',
}
self.assertIsNone(retval)
self.client.send_request.assert_called_once_with(
'volume-move-trigger-cutover', expected_api_args)
def test_get_volume_move_status_no_records(self):
self.mock_object(self.client, 'send_iter_request')
self.mock_object(self.client, '_has_records',
mock.Mock(return_value=False))
self.assertRaises(exception.NetAppException,
self.client.get_volume_move_status,
fake.SHARE_NAME, fake.VSERVER_NAME)
expected_api_args = {
'query': {
'volume-move-info': {
'volume': fake.SHARE_NAME,
'vserver': fake.VSERVER_NAME,
},
},
'desired-attributes': {
'volume-move-info': {
'percent-complete': None,
'estimated-completion-time': None,
'state': None,
'details': None,
'cutover-action': None,
'phase': None,
},
},
}
self.client.send_iter_request.assert_called_once_with(
'volume-move-get-iter', expected_api_args)
def test_get_volume_move_status(self):
move_status = netapp_api.NaElement(fake.VOLUME_MOVE_GET_ITER_RESULT)
self.mock_object(self.client, 'send_iter_request',
mock.Mock(return_value=move_status))
actual_status_info = self.client.get_volume_move_status(
fake.SHARE_NAME, fake.VSERVER_NAME)
expected_api_args = {
'query': {
'volume-move-info': {
'volume': fake.SHARE_NAME,
'vserver': fake.VSERVER_NAME,
},
},
'desired-attributes': {
'volume-move-info': {
'percent-complete': None,
'estimated-completion-time': None,
'state': None,
'details': None,
'cutover-action': None,
'phase': None,
},
},
}
expected_status_info = {
'percent-complete': '82',
'estimated-completion-time': '1481919246',
'state': 'healthy',
'details': 'Cutover Completed::Volume move job finishing move',
'cutover-action': 'retry_on_failure',
'phase': 'finishing',
}
self.assertDictMatch(expected_status_info, actual_status_info)
self.client.send_iter_request.assert_called_once_with(
'volume-move-get-iter', expected_api_args)

View File

@ -51,7 +51,7 @@ class NetAppCDOTDataMotionTestCase(test.TestCase):
self.config.append_config_values(na_opts.netapp_transport_opts)
self.config.append_config_values(na_opts.netapp_support_opts)
self.config.append_config_values(na_opts.netapp_provisioning_opts)
self.config.append_config_values(na_opts.netapp_replication_opts)
self.config.append_config_values(na_opts.netapp_data_motion_opts)
CONF.set_override("share_backend_name", self.backend,
group=self.backend, enforce_type=True)
CONF.set_override("netapp_transport_type", "https",
@ -138,7 +138,7 @@ class NetAppCDOTDataMotionSessionTestCase(test.TestCase):
config.append_config_values(na_opts.netapp_transport_opts)
config.append_config_values(na_opts.netapp_support_opts)
config.append_config_values(na_opts.netapp_provisioning_opts)
config.append_config_values(na_opts.netapp_replication_opts)
config.append_config_values(na_opts.netapp_data_motion_opts)
self.mock_object(data_motion, "get_backend_configuration",
mock.Mock(return_value=config))

View File

@ -45,6 +45,7 @@ from manila.share import utils as share_utils
from manila import test
from manila.tests import fake_share
from manila.tests.share.drivers.netapp.dataontap import fakes as fake
from manila.tests import utils
def fake_replica(**kwargs):
@ -1075,7 +1076,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock_get_export_addresses_with_metadata.assert_called_once_with(
fake.SHARE, fake.SHARE_SERVER, fake.LIFS)
protocol_helper.create_share.assert_called_once_with(
fake.SHARE, fake.SHARE_NAME)
fake.SHARE, fake.SHARE_NAME, clear_current_export_policy=True)
def test_create_export_lifs_not_found(self):
@ -3619,3 +3620,463 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
replica_list, self.fake_replica, [fake_snapshot], fake_snapshot)
self.assertIsNone(model_update)
def test_migration_check_compatibility_no_cluster_credentials(self):
self.library._have_cluster_creds = False
self.mock_object(data_motion, 'get_backend_configuration')
mock_warning_log = self.mock_object(lib_base.LOG, 'warning')
migration_compatibility = self.library.migration_check_compatibility(
self.context, fake_share.fake_share_instance(),
fake_share.fake_share_instance(), share_server=None,
destination_share_server=fake.SHARE_SERVER)
expected_compatibility = {
'compatible': False,
'writable': False,
'nondisruptive': False,
'preserve_metadata': False,
'preserve_snapshots': False,
}
self.assertDictMatch(expected_compatibility, migration_compatibility)
mock_warning_log.assert_called_once()
self.assertFalse(data_motion.get_backend_configuration.called)
def test_migration_check_compatibility_destination_not_configured(self):
self.library._have_cluster_creds = True
self.mock_object(self.library, '_get_backend_share_name',
mock.Mock(return_value=fake.SHARE_NAME))
self.mock_object(
data_motion, 'get_backend_configuration',
mock.Mock(side_effect=exception.BadConfigurationException))
self.mock_object(self.library, '_get_vserver')
mock_exception_log = self.mock_object(lib_base.LOG, 'exception')
self.mock_object(share_utils, 'extract_host', mock.Mock(
return_value='destination_backend'))
mock_vserver_compatibility_check = self.mock_object(
self.library, '_check_destination_vserver_for_vol_move')
migration_compatibility = self.library.migration_check_compatibility(
self.context, fake_share.fake_share_instance(),
fake_share.fake_share_instance(), share_server=fake.SHARE_SERVER,
destination_share_server=None)
expected_compatibility = {
'compatible': False,
'writable': False,
'nondisruptive': False,
'preserve_metadata': False,
'preserve_snapshots': False,
}
self.assertDictMatch(expected_compatibility, migration_compatibility)
mock_exception_log.assert_called_once()
data_motion.get_backend_configuration.assert_called_once_with(
'destination_backend')
self.assertFalse(mock_vserver_compatibility_check.called)
self.assertFalse(self.library._get_vserver.called)
@ddt.data(
utils.annotated(
'dest_share_server_not_expected',
(('src_vserver', None), exception.InvalidParameterValue)),
utils.annotated(
'src_share_server_not_expected',
(exception.InvalidParameterValue, ('dest_vserver', None))))
def test_migration_check_compatibility_errors(self, side_effects):
self.library._have_cluster_creds = True
self.mock_object(self.library, '_get_backend_share_name',
mock.Mock(return_value=fake.SHARE_NAME))
self.mock_object(data_motion, 'get_backend_configuration')
self.mock_object(self.library, '_get_vserver',
mock.Mock(side_effect=side_effects))
mock_exception_log = self.mock_object(lib_base.LOG, 'exception')
self.mock_object(share_utils, 'extract_host', mock.Mock(
return_value='destination_backend'))
mock_compatibility_check = self.mock_object(
self.client, 'check_volume_move')
migration_compatibility = self.library.migration_check_compatibility(
self.context, fake_share.fake_share_instance(),
fake_share.fake_share_instance(), share_server=fake.SHARE_SERVER,
destination_share_server=None)
expected_compatibility = {
'compatible': False,
'writable': False,
'nondisruptive': False,
'preserve_metadata': False,
'preserve_snapshots': False,
}
self.assertDictMatch(expected_compatibility, migration_compatibility)
mock_exception_log.assert_called_once()
data_motion.get_backend_configuration.assert_called_once_with(
'destination_backend')
self.assertFalse(mock_compatibility_check.called)
def test_migration_check_compatibility_incompatible_vservers(self):
self.library._have_cluster_creds = True
self.mock_object(self.library, '_get_backend_share_name',
mock.Mock(return_value=fake.SHARE_NAME))
self.mock_object(data_motion, 'get_backend_configuration')
mock_exception_log = self.mock_object(lib_base.LOG, 'exception')
get_vserver_returns = [
(fake.VSERVER1, mock.Mock()),
(fake.VSERVER2, mock.Mock()),
]
self.mock_object(self.library, '_get_vserver',
mock.Mock(side_effect=get_vserver_returns))
self.mock_object(share_utils, 'extract_host', mock.Mock(
side_effect=['destination_backend', 'destination_pool']))
mock_move_check = self.mock_object(self.client, 'check_volume_move')
migration_compatibility = self.library.migration_check_compatibility(
self.context, fake_share.fake_share_instance(),
fake_share.fake_share_instance(), share_server=fake.SHARE_SERVER,
destination_share_server='dst_srv')
expected_compatibility = {
'compatible': False,
'writable': False,
'nondisruptive': False,
'preserve_metadata': False,
'preserve_snapshots': False,
}
self.assertDictMatch(expected_compatibility, migration_compatibility)
mock_exception_log.assert_called_once()
data_motion.get_backend_configuration.assert_called_once_with(
'destination_backend')
self.assertFalse(mock_move_check.called)
self.library._get_vserver.assert_has_calls(
[mock.call(share_server=fake.SHARE_SERVER),
mock.call(share_server='dst_srv')])
def test_migration_check_compatibility_client_error(self):
self.library._have_cluster_creds = True
self.mock_object(self.library, '_get_backend_share_name',
mock.Mock(return_value=fake.SHARE_NAME))
mock_exception_log = self.mock_object(lib_base.LOG, 'exception')
self.mock_object(data_motion, 'get_backend_configuration')
self.mock_object(self.library, '_get_vserver',
mock.Mock(return_value=(fake.VSERVER1, mock.Mock())))
self.mock_object(share_utils, 'extract_host', mock.Mock(
side_effect=['destination_backend', 'destination_pool']))
mock_move_check = self.mock_object(
self.client, 'check_volume_move',
mock.Mock(side_effect=netapp_api.NaApiError))
migration_compatibility = self.library.migration_check_compatibility(
self.context, fake_share.fake_share_instance(),
fake_share.fake_share_instance(), share_server=fake.SHARE_SERVER,
destination_share_server='dst_srv')
expected_compatibility = {
'compatible': False,
'writable': False,
'nondisruptive': False,
'preserve_metadata': False,
'preserve_snapshots': False,
}
self.assertDictMatch(expected_compatibility, migration_compatibility)
mock_exception_log.assert_called_once()
data_motion.get_backend_configuration.assert_called_once_with(
'destination_backend')
mock_move_check.assert_called_once_with(
fake.SHARE_NAME, fake.VSERVER1, 'destination_pool')
self.library._get_vserver.assert_has_calls(
[mock.call(share_server=fake.SHARE_SERVER),
mock.call(share_server='dst_srv')])
def test_migration_check_compatibility(self):
self.library._have_cluster_creds = True
self.mock_object(self.library, '_get_backend_share_name',
mock.Mock(return_value=fake.SHARE_NAME))
self.mock_object(data_motion, 'get_backend_configuration')
self.mock_object(self.library, '_get_vserver',
mock.Mock(return_value=(fake.VSERVER1, mock.Mock())))
self.mock_object(share_utils, 'extract_host', mock.Mock(
side_effect=['destination_backend', 'destination_pool']))
mock_move_check = self.mock_object(self.client, 'check_volume_move')
migration_compatibility = self.library.migration_check_compatibility(
self.context, fake_share.fake_share_instance(),
fake_share.fake_share_instance(), share_server=fake.SHARE_SERVER,
destination_share_server='dst_srv')
expected_compatibility = {
'compatible': True,
'writable': True,
'nondisruptive': True,
'preserve_metadata': True,
'preserve_snapshots': True,
}
self.assertDictMatch(expected_compatibility, migration_compatibility)
data_motion.get_backend_configuration.assert_called_once_with(
'destination_backend')
mock_move_check.assert_called_once_with(
fake.SHARE_NAME, fake.VSERVER1, 'destination_pool')
self.library._get_vserver.assert_has_calls(
[mock.call(share_server=fake.SHARE_SERVER),
mock.call(share_server='dst_srv')])
def test_migration_start(self):
mock_info_log = self.mock_object(lib_base.LOG, 'info')
source_snapshots = mock.Mock()
snapshot_mappings = mock.Mock()
self.mock_object(self.library, '_get_vserver',
mock.Mock(return_value=(fake.VSERVER1, mock.Mock())))
self.mock_object(self.library, '_get_backend_share_name',
mock.Mock(return_value=fake.SHARE_NAME))
self.mock_object(share_utils, 'extract_host',
mock.Mock(return_value='destination_pool'))
mock_move = self.mock_object(self.client, 'start_volume_move')
retval = self.library.migration_start(
self.context, fake_share.fake_share_instance(),
fake_share.fake_share_instance(),
source_snapshots, snapshot_mappings,
share_server=fake.SHARE_SERVER, destination_share_server='dst_srv')
self.assertIsNone(retval)
self.assertTrue(mock_info_log.called)
mock_move.assert_called_once_with(
fake.SHARE_NAME, fake.VSERVER1, 'destination_pool')
def test_migration_continue_volume_move_failed(self):
source_snapshots = mock.Mock()
snapshot_mappings = mock.Mock()
mock_exception_log = self.mock_object(lib_base.LOG, 'exception')
self.mock_object(self.library, '_get_vserver',
mock.Mock(return_value=(fake.VSERVER1, mock.Mock())))
self.mock_object(self.library, '_get_backend_share_name',
mock.Mock(return_value=fake.SHARE_NAME))
mock_status_check = self.mock_object(
self.client, 'get_volume_move_status',
mock.Mock(return_value={'phase': 'failed', 'details': 'unknown'}))
self.assertRaises(exception.NetAppException,
self.library.migration_continue,
self.context, fake_share.fake_share_instance(),
fake_share.fake_share_instance(),
source_snapshots, snapshot_mappings,
share_server=None, destination_share_server=None)
mock_status_check.assert_called_once_with(
fake.SHARE_NAME, fake.VSERVER1)
mock_exception_log.assert_called_once()
@ddt.data({'phase': 'Queued', 'completed': False},
{'phase': 'Finishing', 'completed': False},
{'phase': 'cutover_hard_deferred', 'completed': True},
{'phase': 'cutover_soft_deferred', 'completed': True},
{'phase': 'completed', 'completed': True})
@ddt.unpack
def test_migration_continue(self, phase, completed):
source_snapshots = mock.Mock()
snapshot_mappings = mock.Mock()
self.mock_object(self.library, '_get_vserver',
mock.Mock(return_value=(fake.VSERVER1, mock.Mock())))
self.mock_object(self.library, '_get_backend_share_name',
mock.Mock(return_value=fake.SHARE_NAME))
self.mock_object(self.client, 'get_volume_move_status',
mock.Mock(return_value={'phase': phase}))
migration_completed = self.library.migration_continue(
self.context, fake_share.fake_share_instance(),
fake_share.fake_share_instance(), source_snapshots,
snapshot_mappings, share_server=fake.SHARE_SERVER,
destination_share_server='dst_srv')
self.assertEqual(completed, migration_completed)
@ddt.data('cutover_hard_deferred', 'cutover_soft_deferred',
'Queued', 'Replicating')
def test_migration_get_progress_at_phase(self, phase):
source_snapshots = mock.Mock()
snapshot_mappings = mock.Mock()
mock_info_log = self.mock_object(lib_base.LOG, 'info')
status = {
'state': 'healthy',
'details': '%s:: Volume move job in progress' % phase,
'phase': phase,
'estimated-completion-time': '1481919246',
'percent-complete': 80,
}
self.mock_object(self.library, '_get_vserver',
mock.Mock(return_value=(fake.VSERVER1, mock.Mock())))
self.mock_object(self.library, '_get_backend_share_name',
mock.Mock(return_value=fake.SHARE_NAME))
self.mock_object(self.client, 'get_volume_move_status',
mock.Mock(return_value=status))
migration_progress = self.library.migration_get_progress(
self.context, fake_share.fake_share_instance(),
source_snapshots, snapshot_mappings,
fake_share.fake_share_instance(), share_server=fake.SHARE_SERVER,
destination_share_server='dst_srv')
expected_progress = {
'total_progress': 100 if phase.startswith('cutover') else 80,
'state': 'healthy',
'estimated_completion_time': '1481919246',
'details': '%s:: Volume move job in progress' % phase,
'phase': phase,
}
self.assertDictMatch(expected_progress, migration_progress)
mock_info_log.assert_called_once()
@ddt.data(utils.annotated('already_canceled', (True, )),
utils.annotated('not_canceled_yet', (False, )))
def test_migration_cancel(self, already_canceled):
source_snapshots = mock.Mock()
snapshot_mappings = mock.Mock()
already_canceled = already_canceled[0]
mock_exception_log = self.mock_object(lib_base.LOG, 'exception')
mock_info_log = self.mock_object(lib_base.LOG, 'info')
vol_move_side_effect = (exception.NetAppException
if already_canceled else None)
self.mock_object(self.library, '_get_vserver',
mock.Mock(return_value=(fake.VSERVER1, mock.Mock())))
self.mock_object(self.library, '_get_backend_share_name',
mock.Mock(return_value=fake.SHARE_NAME))
self.mock_object(self.client, 'abort_volume_move')
self.mock_object(self.client, 'get_volume_move_status',
mock.Mock(side_effect=vol_move_side_effect))
retval = self.library.migration_cancel(
self.context, fake_share.fake_share_instance(),
fake_share.fake_share_instance(), source_snapshots,
snapshot_mappings, share_server=fake.SHARE_SERVER,
destination_share_server='dst_srv')
self.assertIsNone(retval)
if already_canceled:
mock_exception_log.assert_called_once()
else:
mock_info_log.assert_called_once()
self.assertEqual(not already_canceled,
self.client.abort_volume_move.called)
def test_migration_complete_invalid_phase(self):
source_snapshots = mock.Mock()
snapshot_mappings = mock.Mock()
status = {
'state': 'healthy',
'phase': 'Replicating',
'details': 'Replicating:: Volume move operation is in progress.',
}
mock_exception_log = self.mock_object(lib_base.LOG, 'exception')
vserver_client = mock.Mock()
self.mock_object(
self.library, '_get_vserver',
mock.Mock(return_value=(fake.VSERVER1, vserver_client)))
self.mock_object(
self.library, '_get_backend_share_name',
mock.Mock(side_effect=[fake.SHARE_NAME, 'new_share_name']))
self.mock_object(self.library, '_get_volume_move_status',
mock.Mock(return_value=status))
self.mock_object(self.library, '_create_export')
self.assertRaises(
exception.NetAppException, self.library.migration_complete,
self.context, fake_share.fake_share_instance(),
fake_share.fake_share_instance, source_snapshots,
snapshot_mappings, share_server=fake.SHARE_SERVER,
destination_share_server='dst_srv')
self.assertFalse(vserver_client.set_volume_name.called)
self.assertFalse(self.library._create_export.called)
mock_exception_log.assert_called_once()
def test_migration_complete_timeout(self):
source_snapshots = mock.Mock()
snapshot_mappings = mock.Mock()
self.library.configuration.netapp_volume_move_cutover_timeout = 15
vol_move_side_effects = [
{'phase': 'cutover_hard_deferred'},
{'phase': 'Cutover'},
{'phase': 'Finishing'},
{'phase': 'Finishing'},
]
self.mock_object(time, 'sleep')
mock_warning_log = self.mock_object(lib_base.LOG, 'warning')
vserver_client = mock.Mock()
self.mock_object(
self.library, '_get_vserver',
mock.Mock(return_value=(fake.VSERVER1, vserver_client)))
self.mock_object(
self.library, '_get_backend_share_name',
mock.Mock(side_effect=[fake.SHARE_NAME, 'new_share_name']))
self.mock_object(self.library, '_get_volume_move_status', mock.Mock(
side_effect=vol_move_side_effects))
self.mock_object(self.library, '_create_export')
src_share = fake_share.fake_share_instance(id='source-share-instance')
dest_share = fake_share.fake_share_instance(id='dest-share-instance')
self.assertRaises(
exception.NetAppException, self.library.migration_complete,
self.context, src_share, dest_share, source_snapshots,
snapshot_mappings, share_server=fake.SHARE_SERVER,
destination_share_server='dst_srv')
self.assertFalse(vserver_client.set_volume_name.called)
self.assertFalse(self.library._create_export.called)
self.assertEqual(3, mock_warning_log.call_count)
@ddt.data('cutover_hard_deferred', 'cutover_soft_deferred', 'completed')
def test_migration_complete(self, phase):
snap = fake_share.fake_snapshot_instance(
id='src-snapshot', provider_location='test-src-provider-location')
dest_snap = fake_share.fake_snapshot_instance(id='dest-snapshot',
as_primitive=True)
source_snapshots = [snap]
snapshot_mappings = {snap['id']: dest_snap}
self.library.configuration.netapp_volume_move_cutover_timeout = 15
vol_move_side_effects = [
{'phase': phase},
{'phase': 'Cutover'},
{'phase': 'Finishing'},
{'phase': 'completed'},
]
self.mock_object(time, 'sleep')
mock_debug_log = self.mock_object(lib_base.LOG, 'debug')
mock_info_log = self.mock_object(lib_base.LOG, 'info')
mock_warning_log = self.mock_object(lib_base.LOG, 'warning')
vserver_client = mock.Mock()
self.mock_object(
self.library, '_get_vserver',
mock.Mock(return_value=(fake.VSERVER1, vserver_client)))
self.mock_object(
self.library, '_get_backend_share_name',
mock.Mock(side_effect=[fake.SHARE_NAME, 'new_share_name']))
self.mock_object(self.library, '_create_export', mock.Mock(
return_value=fake.NFS_EXPORTS))
mock_move_status_check = self.mock_object(
self.library, '_get_volume_move_status',
mock.Mock(side_effect=vol_move_side_effects))
src_share = fake_share.fake_share_instance(id='source-share-instance')
dest_share = fake_share.fake_share_instance(id='dest-share-instance')
data_updates = self.library.migration_complete(
self.context, src_share, dest_share, source_snapshots,
snapshot_mappings, share_server=fake.SHARE_SERVER,
destination_share_server='dst_srv')
self.assertEqual(fake.NFS_EXPORTS, data_updates['export_locations'])
expected_dest_snap_updates = {
'provider_location': snap['provider_location'],
}
self.assertIn(dest_snap['id'], data_updates['snapshot_updates'])
self.assertEqual(expected_dest_snap_updates,
data_updates['snapshot_updates'][dest_snap['id']])
vserver_client.set_volume_name.assert_called_once_with(
fake.SHARE_NAME, 'new_share_name')
self.library._create_export.assert_called_once_with(
dest_share, fake.SHARE_SERVER, fake.VSERVER1, vserver_client,
clear_current_export_policy=False)
mock_info_log.assert_called_once()
if phase != 'completed':
self.assertEqual(2, mock_warning_log.call_count)
self.assertFalse(mock_debug_log.called)
self.assertEqual(4, mock_move_status_check.call_count)
else:
self.assertFalse(mock_warning_log.called)
mock_debug_log.assert_called_once()
mock_move_status_check.assert_called_once()

View File

@ -26,6 +26,57 @@ from manila import utils
CONF = cfg.CONF
class NamedBinaryStr(six.binary_type):
"""Wrapper for six.binary_type to facilitate overriding __name__."""
class NamedUnicodeStr(six.text_type):
"""Unicode string look-alike to facilitate overriding __name__."""
def __init__(self, value):
self._value = value
def __str__(self):
return self._value
def encode(self, enc):
return self._value.encode(enc)
def __format__(self, formatstr):
"""Workaround for ddt bug.
DDT will always call __format__ even when __name__ exists,
which blows up for Unicode strings under Py2.
"""
return ''
class NamedDict(dict):
"""Wrapper for dict to facilitate overriding __name__."""
class NamedTuple(tuple):
"""Wrapper for dict to facilitate overriding __name__."""
def annotated(test_name, test_input):
if isinstance(test_input, dict):
annotated_input = NamedDict(test_input)
elif isinstance(test_input, six.text_type):
annotated_input = NamedUnicodeStr(test_input)
elif isinstance(test_input, tuple):
annotated_input = NamedTuple(test_input)
else:
annotated_input = NamedBinaryStr(test_input)
setattr(annotated_input, '__name__', test_name)
return annotated_input
def get_test_admin_context():
return context.get_admin_context()

View File

@ -0,0 +1,5 @@
---
features:
- Driver assisted migration support has been added to the NetApp cDOT
driver to efficiently and nondisruptively migrate shares within Vservers
by ensuring data, snapshots and metadata.