[NetApp] Improve create share from snapshot functionality

This patch improves the operation of creating share from snapshot
to accept new destinations that can be different pools or
back ends.

Change-Id: Id3b3d5860d6325f368cbebfe7f97c98d64554d72
This commit is contained in:
Douglas Viroel 2020-02-10 13:49:32 +00:00
parent ba57e90d45
commit 63867a3ba9
12 changed files with 1433 additions and 61 deletions

View File

@ -2244,6 +2244,53 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
return return
raise raise
@na_utils.trace
def check_volume_clone_split_completed(self, volume_name):
"""Check if volume clone split operation already finished"""
return self.get_volume_clone_parent_snaphot(volume_name) is None
@na_utils.trace
def get_volume_clone_parent_snaphot(self, volume_name):
"""Gets volume's clone parent.
Return the snapshot name of a volume's clone parent, or None if it
doesn't exist.
"""
api_args = {
'query': {
'volume-attributes': {
'volume-id-attributes': {
'name': volume_name
}
}
},
'desired-attributes': {
'volume-attributes': {
'volume-clone-attributes': {
'volume-clone-parent-attributes': {
'snapshot-name': ''
}
}
}
}
}
result = self.send_iter_request('volume-get-iter', api_args)
if not self._has_records(result):
return None
attributes_list = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
volume_attributes = attributes_list.get_child_by_name(
'volume-attributes') or netapp_api.NaElement('none')
vol_clone_attrs = volume_attributes.get_child_by_name(
'volume-clone-attributes') or netapp_api.NaElement('none')
vol_clone_parent_atts = vol_clone_attrs.get_child_by_name(
'volume-clone-parent-attributes') or netapp_api.NaElement(
'none')
snapshot_name = vol_clone_parent_atts.get_child_content(
'snapshot-name')
return snapshot_name
@na_utils.trace @na_utils.trace
def get_clone_children_for_snapshot(self, volume_name, snapshot_name): def get_clone_children_for_snapshot(self, volume_name, snapshot_name):
"""Returns volumes that are keeping a snapshot locked.""" """Returns volumes that are keeping a snapshot locked."""
@ -3964,3 +4011,19 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
return { return {
'ipv6-enabled': ipv6_enabled, 'ipv6-enabled': ipv6_enabled,
} }
@na_utils.trace
def rehost_volume(self, volume_name, vserver, destination_vserver):
"""Rehosts a volume from one Vserver into another Vserver.
:param volume_name: Name of the FlexVol to be rehosted.
:param vserver: Source Vserver name to which target volume belongs.
:param destination_vserver: Destination Vserver name where target
volume must reside after successful volume rehost operation.
"""
api_args = {
'volume': volume_name,
'vserver': vserver,
'destination-vserver': destination_vserver,
}
self.send_request('volume-rehost', api_args)

View File

@ -283,3 +283,6 @@ class NetAppCmodeMultiSvmShareDriver(driver.ShareDriver):
def unmanage_server(self, server_details, security_services=None): def unmanage_server(self, server_details, security_services=None):
return self.library.unmanage_server(server_details, security_services) return self.library.unmanage_server(server_details, security_services)
def get_share_status(self, share_instance, share_server=None):
return self.library.get_share_status(share_instance, share_server)

View File

@ -280,3 +280,6 @@ class NetAppCmodeSingleSvmShareDriver(driver.ShareDriver):
def unmanage_server(self, server_details, security_services=None): def unmanage_server(self, server_details, security_services=None):
raise NotImplementedError raise NotImplementedError
def get_share_status(self, share_instance, share_server=None):
return self.library.get_share_status(share_instance, share_server)

View File

@ -62,6 +62,11 @@ class NetAppCmodeFileStorageLibrary(object):
DEFAULT_FILTER_FUNCTION = 'capabilities.utilization < 70' DEFAULT_FILTER_FUNCTION = 'capabilities.utilization < 70'
DEFAULT_GOODNESS_FUNCTION = '100 - capabilities.utilization' DEFAULT_GOODNESS_FUNCTION = '100 - capabilities.utilization'
# Internal states when dealing with data motion
STATE_SPLITTING_VOLUME_CLONE = 'splitting_volume_clone'
STATE_MOVING_VOLUME = 'moving_volume'
STATE_SNAPMIRROR_DATA_COPYING = 'snapmirror_data_copying'
# Maps NetApp qualified extra specs keys to corresponding backend API # Maps NetApp qualified extra specs keys to corresponding backend API
# client library argument keywords. When we expose more backend # client library argument keywords. When we expose more backend
# capabilities here, we will add them to this map. # capabilities here, we will add them to this map.
@ -487,11 +492,278 @@ class NetAppCmodeFileStorageLibrary(object):
def create_share_from_snapshot(self, context, share, snapshot, def create_share_from_snapshot(self, context, share, snapshot,
share_server=None, parent_share=None): share_server=None, parent_share=None):
"""Creates new share from snapshot.""" """Creates new share from snapshot."""
vserver, vserver_client = self._get_vserver(share_server=share_server) # TODO(dviroel) return progress info in asynchronous answers
self._allocate_container_from_snapshot( if parent_share['host'] == share['host']:
share, snapshot, vserver, vserver_client) src_vserver, src_vserver_client = self._get_vserver(
return self._create_export(share, share_server, vserver, share_server=share_server)
vserver_client) # Creating a new share from snapshot in the source share's pool
self._allocate_container_from_snapshot(
share, snapshot, src_vserver, src_vserver_client)
return self._create_export(share, share_server, src_vserver,
src_vserver_client)
parent_share_server = {}
if parent_share['share_server'] is not None:
# Get only the information needed by Data Motion
ss_keys = ['id', 'identifier', 'backend_details', 'host']
for key in ss_keys:
parent_share_server[key] = (
parent_share['share_server'].get(key))
# Information to be saved in the private_storage that will need to be
# retrieved later, in order to continue with the share creation flow
src_share_instance = {
'id': share['id'],
'host': parent_share.get('host'),
'share_server': parent_share_server or None
}
# NOTE(dviroel): Data Motion functions access share's 'share_server'
# attribute to get vserser information.
dest_share = copy.deepcopy(share.to_dict())
dest_share['share_server'] = (share_server.to_dict()
if share_server else None)
dm_session = data_motion.DataMotionSession()
# Source host info
__, src_vserver, src_backend = (
dm_session.get_backend_info_for_share(parent_share))
src_vserver_client = data_motion.get_client_for_backend(
src_backend, vserver_name=src_vserver)
src_cluster_name = src_vserver_client.get_cluster_name()
# Destination host info
dest_vserver, dest_vserver_client = self._get_vserver(share_server)
dest_cluster_name = dest_vserver_client.get_cluster_name()
try:
if (src_cluster_name != dest_cluster_name or
not self._have_cluster_creds):
# 1. Create a clone on source. We don't need to split from
# clone in order to replicate data
self._allocate_container_from_snapshot(
dest_share, snapshot, src_vserver, src_vserver_client,
split=False)
# 2. Create a replica in destination host
self._allocate_container(
dest_share, dest_vserver, dest_vserver_client,
replica=True)
# 3. Initialize snapmirror relationship with cloned share.
src_share_instance['replica_state'] = (
constants.REPLICA_STATE_ACTIVE)
dm_session.create_snapmirror(src_share_instance, dest_share)
# The snapmirror data copy can take some time to be concluded,
# we'll answer this call asynchronously
state = self.STATE_SNAPMIRROR_DATA_COPYING
else:
# NOTE(dviroel): there's a need to split the cloned share from
# its parent in order to move it to a different aggregate or
# vserver
self._allocate_container_from_snapshot(
dest_share, snapshot, src_vserver,
src_vserver_client, split=True)
# The split volume clone operation can take some time to be
# concluded and we'll answer the call asynchronously
state = self.STATE_SPLITTING_VOLUME_CLONE
except Exception:
# If the share exists on the source vserser, we need to
# delete it since it's a temporary share, not managed by the system
dm_session.delete_snapmirror(src_share_instance, dest_share)
self._delete_share(src_share_instance, src_vserver_client,
remove_export=False)
msg = _('Could not create share %(share_id)s from snapshot '
'%(snapshot_id)s in the destination host %(dest_host)s.')
msg_args = {'share_id': dest_share['id'],
'snapshot_id': snapshot['id'],
'dest_host': dest_share['host']}
raise exception.NetAppException(msg % msg_args)
# Store source share info on private storage using destination share id
src_share_instance['internal_state'] = state
src_share_instance['status'] = constants.STATUS_ACTIVE
self.private_storage.update(dest_share['id'], {
'source_share': json.dumps(src_share_instance)
})
return {
'status': constants.STATUS_CREATING_FROM_SNAPSHOT,
}
def _update_create_from_snapshot_status(self, share, share_server=None):
# TODO(dviroel) return progress info in asynchronous answers
# If the share is creating from snapshot and copying data in background
# we'd verify if the operation has finished and trigger new operations
# if necessary.
source_share_str = self.private_storage.get(share['id'],
'source_share')
if source_share_str is None:
msg = _('Could not update share %(share_id)s status due to invalid'
' internal state. Aborting share creation.')
msg_args = {'share_id': share['id']}
LOG.error(msg, msg_args)
return {'status': constants.STATUS_ERROR}
try:
# Check if current operation had finished and continue to move the
# source share towards its destination
return self._create_from_snapshot_continue(share, share_server)
except Exception:
# Delete everything associated to the temporary clone created on
# the source host.
source_share = json.loads(source_share_str)
dm_session = data_motion.DataMotionSession()
dm_session.delete_snapmirror(source_share, share)
__, src_vserver, src_backend = (
dm_session.get_backend_info_for_share(source_share))
src_vserver_client = data_motion.get_client_for_backend(
src_backend, vserver_name=src_vserver)
self._delete_share(source_share, src_vserver_client,
remove_export=False)
# Delete private storage info
self.private_storage.delete(share['id'])
msg = _('Could not complete share %(share_id)s creation due to an '
'internal error.')
msg_args = {'share_id': share['id']}
LOG.error(msg, msg_args)
return {'status': constants.STATUS_ERROR}
def _create_from_snapshot_continue(self, share, share_server=None):
return_values = {
'status': constants.STATUS_CREATING_FROM_SNAPSHOT
}
apply_qos_on_dest = False
# Data motion session used to extract host info and manage snapmirrors
dm_session = data_motion.DataMotionSession()
# Get info from private storage
src_share_str = self.private_storage.get(share['id'], 'source_share')
src_share = json.loads(src_share_str)
current_state = src_share['internal_state']
share['share_server'] = share_server
# Source host info
__, src_vserver, src_backend = (
dm_session.get_backend_info_for_share(src_share))
src_aggr = share_utils.extract_host(src_share['host'], level='pool')
src_vserver_client = data_motion.get_client_for_backend(
src_backend, vserver_name=src_vserver)
# Destination host info
dest_vserver, dest_vserver_client = self._get_vserver(share_server)
dest_aggr = share_utils.extract_host(share['host'], level='pool')
if current_state == self.STATE_SPLITTING_VOLUME_CLONE:
if self._check_volume_clone_split_completed(
src_share, src_vserver_client):
# Rehost volume if source and destination are hosted in
# different vservers
if src_vserver != dest_vserver:
# NOTE(dviroel): some volume policies, policy rules and
# configurations are lost from the source volume after
# rehost operation.
qos_policy_for_share = (
self._get_backend_qos_policy_group_name(share['id']))
src_vserver_client.mark_qos_policy_group_for_deletion(
qos_policy_for_share)
# Apply QoS on destination share
apply_qos_on_dest = True
self._rehost_and_mount_volume(
share, src_vserver, src_vserver_client,
dest_vserver, dest_vserver_client)
# Move the share to the expected aggregate
if src_aggr != dest_aggr:
# Move volume and 'defer' the cutover. If it fails, the
# share will be deleted afterwards
self._move_volume_after_splitting(
src_share, share, share_server, cutover_action='defer')
# Move a volume can take longer, we'll answer
# asynchronously
current_state = self.STATE_MOVING_VOLUME
else:
return_values['status'] = constants.STATUS_AVAILABLE
elif current_state == self.STATE_MOVING_VOLUME:
if self._check_volume_move_completed(share, share_server):
if src_vserver != dest_vserver:
# NOTE(dviroel): at this point we already rehosted the
# share, but we missed applying the qos since it was moving
# the share between aggregates
apply_qos_on_dest = True
return_values['status'] = constants.STATUS_AVAILABLE
elif current_state == self.STATE_SNAPMIRROR_DATA_COPYING:
replica_state = self.update_replica_state(
None, # no context is needed
[src_share],
share,
[], # access_rules
[], # snapshot list
share_server)
if replica_state in [None, constants.STATUS_ERROR]:
msg = _("Destination share has failed on replicating data "
"from source share.")
LOG.exception(msg)
raise exception.NetAppException(msg)
elif replica_state == constants.REPLICA_STATE_IN_SYNC:
try:
# 1. Start an update to try to get a last minute
# transfer before we quiesce and break
dm_session.update_snapmirror(src_share, share)
except exception.StorageCommunicationException:
# Ignore any errors since the current source replica
# may be unreachable
pass
# 2. Break SnapMirror
# NOTE(dviroel): if it fails on break/delete a snapmirror
# relationship, we won't be able to delete the share.
dm_session.break_snapmirror(src_share, share)
dm_session.delete_snapmirror(src_share, share)
# 3. Delete the source volume
self._delete_share(src_share, src_vserver_client,
remove_export=False)
share_name = self._get_backend_share_name(src_share['id'])
# 4. Set File system size fixed to false
dest_vserver_client.set_volume_filesys_size_fixed(
share_name, filesys_size_fixed=False)
apply_qos_on_dest = True
return_values['status'] = constants.STATUS_AVAILABLE
else:
# Delete this share from private storage since we'll abort this
# operation.
self.private_storage.delete(share['id'])
msg_args = {
'state': current_state,
'id': share['id'],
}
msg = _("Caught an unexpected internal state '%(state)s' for "
"share %(id)s. Aborting operation.") % msg_args
LOG.exception(msg)
raise exception.NetAppException(msg)
if return_values['status'] == constants.STATUS_AVAILABLE:
if apply_qos_on_dest:
extra_specs = share_types.get_extra_specs_from_share(share)
provisioning_options = self._get_provisioning_options(
extra_specs)
qos_policy_group_name = (
self._modify_or_create_qos_for_existing_share(
share, extra_specs, dest_vserver, dest_vserver_client))
if qos_policy_group_name:
provisioning_options['qos_policy_group'] = (
qos_policy_group_name)
share_name = self._get_backend_share_name(share['id'])
# Modify volume to match extra specs
dest_vserver_client.modify_volume(
dest_aggr, share_name, **provisioning_options)
self.private_storage.delete(share['id'])
return_values['export_locations'] = self._create_export(
share, share_server, dest_vserver, dest_vserver_client,
clear_current_export_policy=False)
else:
new_src_share = copy.deepcopy(src_share)
new_src_share['internal_state'] = current_state
self.private_storage.update(share['id'], {
'source_share': json.dumps(new_src_share)
})
return return_values
@na_utils.trace @na_utils.trace
def _allocate_container(self, share, vserver, vserver_client, def _allocate_container(self, share, vserver, vserver_client,
@ -506,7 +778,7 @@ class NetAppCmodeFileStorageLibrary(object):
raise exception.InvalidHost(reason=msg) raise exception.InvalidHost(reason=msg)
provisioning_options = self._get_provisioning_options_for_share( provisioning_options = self._get_provisioning_options_for_share(
share, vserver, replica=replica) share, vserver, vserver_client=vserver_client, replica=replica)
if replica: if replica:
# If this volume is intended to be a replication destination, # If this volume is intended to be a replication destination,
@ -694,17 +966,19 @@ class NetAppCmodeFileStorageLibrary(object):
int(qos_specs['maxbpspergib']) * int(share_size)) int(qos_specs['maxbpspergib']) * int(share_size))
@na_utils.trace @na_utils.trace
def _create_qos_policy_group(self, share, vserver, qos_specs): def _create_qos_policy_group(self, share, vserver, qos_specs,
vserver_client=None):
max_throughput = self._get_max_throughput(share['size'], qos_specs) max_throughput = self._get_max_throughput(share['size'], qos_specs)
qos_policy_group_name = self._get_backend_qos_policy_group_name( qos_policy_group_name = self._get_backend_qos_policy_group_name(
share['id']) share['id'])
self._client.qos_policy_group_create(qos_policy_group_name, vserver, client = vserver_client or self._client
max_throughput=max_throughput) client.qos_policy_group_create(qos_policy_group_name, vserver,
max_throughput=max_throughput)
return qos_policy_group_name return qos_policy_group_name
@na_utils.trace @na_utils.trace
def _get_provisioning_options_for_share(self, share, vserver, def _get_provisioning_options_for_share(
replica=False): self, share, vserver, vserver_client=None, replica=False):
"""Return provisioning options from a share. """Return provisioning options from a share.
Starting with a share, this method gets the extra specs, rationalizes Starting with a share, this method gets the extra specs, rationalizes
@ -719,7 +993,7 @@ class NetAppCmodeFileStorageLibrary(object):
qos_specs = self._get_normalized_qos_specs(extra_specs) qos_specs = self._get_normalized_qos_specs(extra_specs)
if qos_specs and not replica: if qos_specs and not replica:
qos_policy_group = self._create_qos_policy_group( qos_policy_group = self._create_qos_policy_group(
share, vserver, qos_specs) share, vserver, qos_specs, vserver_client)
provisioning_options['qos_policy_group'] = qos_policy_group provisioning_options['qos_policy_group'] = qos_policy_group
return provisioning_options return provisioning_options
@ -766,7 +1040,7 @@ class NetAppCmodeFileStorageLibrary(object):
@na_utils.trace @na_utils.trace
def _allocate_container_from_snapshot( def _allocate_container_from_snapshot(
self, share, snapshot, vserver, vserver_client, self, share, snapshot, vserver, vserver_client,
snapshot_name_func=_get_backend_snapshot_name): snapshot_name_func=_get_backend_snapshot_name, split=None):
"""Clones existing share.""" """Clones existing share."""
share_name = self._get_backend_share_name(share['id']) share_name = self._get_backend_share_name(share['id'])
parent_share_name = self._get_backend_share_name(snapshot['share_id']) parent_share_name = self._get_backend_share_name(snapshot['share_id'])
@ -776,14 +1050,17 @@ class NetAppCmodeFileStorageLibrary(object):
parent_snapshot_name = snapshot['provider_location'] parent_snapshot_name = snapshot['provider_location']
provisioning_options = self._get_provisioning_options_for_share( provisioning_options = self._get_provisioning_options_for_share(
share, vserver) share, vserver, vserver_client=vserver_client)
hide_snapdir = provisioning_options.pop('hide_snapdir') hide_snapdir = provisioning_options.pop('hide_snapdir')
if split is not None:
provisioning_options['split'] = split
LOG.debug('Creating share from snapshot %s', snapshot['id']) LOG.debug('Creating share from snapshot %s', snapshot['id'])
vserver_client.create_volume_clone(share_name, parent_share_name, vserver_client.create_volume_clone(
parent_snapshot_name, share_name, parent_share_name, parent_snapshot_name,
**provisioning_options) **provisioning_options)
if share['size'] > snapshot['size']: if share['size'] > snapshot['size']:
vserver_client.set_volume_size(share_name, share['size']) vserver_client.set_volume_size(share_name, share['size'])
@ -795,6 +1072,20 @@ class NetAppCmodeFileStorageLibrary(object):
def _share_exists(self, share_name, vserver_client): def _share_exists(self, share_name, vserver_client):
return vserver_client.volume_exists(share_name) return vserver_client.volume_exists(share_name)
@na_utils.trace
def _delete_share(self, share, vserver_client, remove_export=True):
share_name = self._get_backend_share_name(share['id'])
if self._share_exists(share_name, vserver_client):
if remove_export:
self._remove_export(share, vserver_client)
self._deallocate_container(share_name, vserver_client)
qos_policy_for_share = self._get_backend_qos_policy_group_name(
share['id'])
vserver_client.mark_qos_policy_group_for_deletion(
qos_policy_for_share)
else:
LOG.info("Share %s does not exist.", share['id'])
@na_utils.trace @na_utils.trace
def delete_share(self, context, share, share_server=None): def delete_share(self, context, share, share_server=None):
"""Deletes share.""" """Deletes share."""
@ -809,17 +1100,7 @@ class NetAppCmodeFileStorageLibrary(object):
"will proceed anyway. Error: %(error)s", "will proceed anyway. Error: %(error)s",
{'share': share['id'], 'error': error}) {'share': share['id'], 'error': error})
return return
self._delete_share(share, vserver_client)
share_name = self._get_backend_share_name(share['id'])
if self._share_exists(share_name, vserver_client):
self._remove_export(share, vserver_client)
self._deallocate_container(share_name, vserver_client)
qos_policy_for_share = self._get_backend_qos_policy_group_name(
share['id'])
self._client.mark_qos_policy_group_for_deletion(
qos_policy_for_share)
else:
LOG.info("Share %s does not exist.", share['id'])
@na_utils.trace @na_utils.trace
def _deallocate_container(self, share_name, vserver_client): def _deallocate_container(self, share_name, vserver_client):
@ -2061,10 +2342,42 @@ class NetAppCmodeFileStorageLibrary(object):
return compatibility return compatibility
def migration_start(self, context, source_share, destination_share, def _move_volume_after_splitting(self, source_share, destination_share,
source_snapshots, snapshot_mappings, share_server=None, cutover_action='wait'):
share_server=None, destination_share_server=None): retries = (self.configuration.netapp_start_volume_move_timeout / 5
"""Begins data motion from source_share to destination_share.""" or 1)
@manila_utils.retry(exception.ShareBusyException, interval=5,
retries=retries, backoff_rate=1)
def try_move_volume():
try:
self._move_volume(source_share, destination_share,
share_server, cutover_action)
except netapp_api.NaApiError as e:
undergoing_split = 'undergoing a clone split'
msg_args = {'id': source_share['id']}
if (e.code == netapp_api.EAPIERROR and
undergoing_split in e.message):
msg = _('The volume %(id)s is undergoing a clone split '
'operation. Will retry the operation.') % msg_args
LOG.warning(msg)
raise exception.ShareBusyException(reason=msg)
else:
msg = _("Unable to perform move operation for the volume "
"%(id)s. Caught an unexpected error. Not "
"retrying.") % msg_args
raise exception.NetAppException(message=msg)
try:
try_move_volume()
except exception.ShareBusyException:
msg_args = {'id': source_share['id']}
msg = _("Unable to perform move operation for the volume %(id)s "
"because a clone split operation is still in progress. "
"Retries exhausted. Not retrying.") % msg_args
raise exception.NetAppException(message=msg)
def _move_volume(self, source_share, destination_share, share_server=None,
cutover_action='wait'):
# Intra-cluster migration # Intra-cluster migration
vserver, vserver_client = self._get_vserver(share_server=share_server) vserver, vserver_client = self._get_vserver(share_server=share_server)
share_volume = self._get_backend_share_name(source_share['id']) share_volume = self._get_backend_share_name(source_share['id'])
@ -2082,6 +2395,7 @@ class NetAppCmodeFileStorageLibrary(object):
share_volume, share_volume,
vserver, vserver,
destination_aggregate, destination_aggregate,
cutover_action=cutover_action,
encrypt_destination=encrypt_dest) encrypt_destination=encrypt_dest)
msg = ("Began volume move operation of share %(shr)s from %(src)s " msg = ("Began volume move operation of share %(shr)s from %(src)s "
@ -2093,12 +2407,22 @@ class NetAppCmodeFileStorageLibrary(object):
} }
LOG.info(msg, msg_args) LOG.info(msg, msg_args)
def migration_start(self, context, source_share, destination_share,
source_snapshots, snapshot_mappings,
share_server=None, destination_share_server=None):
"""Begins data motion from source_share to destination_share."""
self._move_volume(source_share, destination_share, share_server)
def _get_volume_move_status(self, source_share, share_server): def _get_volume_move_status(self, source_share, share_server):
vserver, vserver_client = self._get_vserver(share_server=share_server) vserver, vserver_client = self._get_vserver(share_server=share_server)
share_volume = self._get_backend_share_name(source_share['id']) share_volume = self._get_backend_share_name(source_share['id'])
status = self._client.get_volume_move_status(share_volume, vserver) status = self._client.get_volume_move_status(share_volume, vserver)
return status return status
def _check_volume_clone_split_completed(self, share, vserver_client):
share_volume = self._get_backend_share_name(share['id'])
return vserver_client.check_volume_clone_split_completed(share_volume)
def _get_dest_flexvol_encryption_value(self, destination_share): def _get_dest_flexvol_encryption_value(self, destination_share):
dest_share_type_encrypted_val = share_types.get_share_type_extra_specs( dest_share_type_encrypted_val = share_types.get_share_type_extra_specs(
destination_share['share_type_id'], destination_share['share_type_id'],
@ -2108,10 +2432,8 @@ class NetAppCmodeFileStorageLibrary(object):
return encrypt_destination return encrypt_destination
def migration_continue(self, context, source_share, destination_share, def _check_volume_move_completed(self, source_share, share_server):
source_snapshots, snapshot_mappings, """Check progress of volume move operation."""
share_server=None, destination_share_server=None):
"""Check progress of migration, try to repair data motion errors."""
status = self._get_volume_move_status(source_share, share_server) status = self._get_volume_move_status(source_share, share_server)
completed_phases = ( completed_phases = (
'cutover_hard_deferred', 'cutover_soft_deferred', 'completed') 'cutover_hard_deferred', 'cutover_soft_deferred', 'completed')
@ -2131,11 +2453,13 @@ class NetAppCmodeFileStorageLibrary(object):
return False return False
def migration_get_progress(self, context, source_share, def migration_continue(self, context, source_share, destination_share,
destination_share, source_snapshots, source_snapshots, snapshot_mappings,
snapshot_mappings, share_server=None, share_server=None, destination_share_server=None):
destination_share_server=None): """Check progress of migration, try to repair data motion errors."""
"""Return detailed progress of the migration in progress.""" return self._check_volume_move_completed(source_share, share_server)
def _get_volume_move_progress(self, source_share, share_server):
status = self._get_volume_move_status(source_share, share_server) status = self._get_volume_move_status(source_share, share_server)
# NOTE (gouthamr): If the volume move is waiting for a manual # NOTE (gouthamr): If the volume move is waiting for a manual
@ -2163,6 +2487,13 @@ class NetAppCmodeFileStorageLibrary(object):
'details': status['details'], 'details': status['details'],
} }
def migration_get_progress(self, context, source_share,
destination_share, source_snapshots,
snapshot_mappings, share_server=None,
destination_share_server=None):
"""Return detailed progress of the migration in progress."""
return self._get_volume_move_progress(source_share, share_server)
def migration_cancel(self, context, source_share, destination_share, def migration_cancel(self, context, source_share, destination_share,
source_snapshots, snapshot_mappings, source_snapshots, snapshot_mappings,
share_server=None, destination_share_server=None): share_server=None, destination_share_server=None):
@ -2342,7 +2673,8 @@ class NetAppCmodeFileStorageLibrary(object):
LOG.debug("No existing QoS policy group found for " LOG.debug("No existing QoS policy group found for "
"volume. Creating a new one with name %s.", "volume. Creating a new one with name %s.",
qos_policy_group_name) qos_policy_group_name)
self._create_qos_policy_group(share_obj, vserver, qos_specs) self._create_qos_policy_group(share_obj, vserver, qos_specs,
vserver_client=vserver_client)
return qos_policy_group_name return qos_policy_group_name
def _wait_for_cutover_completion(self, source_share, share_server): def _wait_for_cutover_completion(self, source_share, share_server):
@ -2389,3 +2721,33 @@ class NetAppCmodeFileStorageLibrary(object):
share_name = self._get_backend_share_name(share['id']) share_name = self._get_backend_share_name(share['id'])
self._apply_snapdir_visibility( self._apply_snapdir_visibility(
hide_snapdir, share_name, vserver_client) hide_snapdir, share_name, vserver_client)
def get_share_status(self, share, share_server=None):
if share['status'] == constants.STATUS_CREATING_FROM_SNAPSHOT:
return self._update_create_from_snapshot_status(share,
share_server)
else:
LOG.warning("Caught an unexpected share status '%s' during share "
"status update routine. Skipping.", share['status'])
def volume_rehost(self, share, src_vserver, dest_vserver):
volume_name = self._get_backend_share_name(share['id'])
msg = ("Rehosting volume of share %(shr)s from vserver %(src)s "
"to vserver %(dest)s.")
msg_args = {
'shr': share['id'],
'src': src_vserver,
'dest': dest_vserver,
}
LOG.info(msg, msg_args)
self._client.rehost_volume(volume_name, src_vserver, dest_vserver)
def _rehost_and_mount_volume(self, share, src_vserver, src_vserver_client,
dest_vserver, dest_vserver_client):
volume_name = self._get_backend_share_name(share['id'])
# Unmount volume in the source vserver:
src_vserver_client.unmount_volume(volume_name)
# Rehost the volume
self.volume_rehost(share, src_vserver, dest_vserver)
# Mount the volume on the destination vserver
dest_vserver_client.mount_volume(volume_name)

View File

@ -20,6 +20,7 @@ variant creates Data ONTAP storage virtual machines (i.e. 'vservers')
as needed to provision shares. as needed to provision shares.
""" """
import copy
import re import re
from oslo_log import log from oslo_log import log
@ -553,3 +554,52 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
def _delete_vserver_peer(self, vserver, peer_vserver): def _delete_vserver_peer(self, vserver, peer_vserver):
self._client.delete_vserver_peer(vserver, peer_vserver) self._client.delete_vserver_peer(vserver, peer_vserver)
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None, parent_share=None):
# NOTE(dviroel): If both parent and child shares are in the same host,
# they belong to the same cluster, and we can skip all the processing
# below.
if parent_share['host'] != share['host']:
# 1. Retrieve source and destination vservers from source and
# destination shares
new_share = copy.deepcopy(share.to_dict())
new_share['share_server'] = share_server.to_dict()
dm_session = data_motion.DataMotionSession()
src_vserver = dm_session.get_vserver_from_share(parent_share)
dest_vserver = dm_session.get_vserver_from_share(new_share)
# 2. Retrieve the source share host's client and cluster name
src_share_host = share_utils.extract_host(
parent_share['host'], level='backend_name')
src_share_client = data_motion.get_client_for_backend(
src_share_host, vserver_name=src_vserver)
# Cluster name is needed for setting up the vserver peering
src_share_cluster_name = src_share_client.get_cluster_name()
# 3. Retrieve new share host's client
dest_share_host = share_utils.extract_host(
new_share['host'], level='backend_name')
dest_share_client = data_motion.get_client_for_backend(
dest_share_host, vserver_name=dest_vserver)
dest_share_cluster_name = dest_share_client.get_cluster_name()
# If source and destination shares are placed in a different
# clusters, we'll need the both vserver peered.
if src_share_cluster_name != dest_share_cluster_name:
if not self._get_vserver_peers(dest_vserver, src_vserver):
# 3.1. Request vserver peer creation from new_replica's
# host to active replica's host
dest_share_client.create_vserver_peer(
dest_vserver, src_vserver,
peer_cluster_name=src_share_cluster_name)
# 3.2. Accepts the vserver peering using active replica
# host's client
src_share_client.accept_vserver_peer(src_vserver,
dest_vserver)
return (super(NetAppCmodeMultiSVMFileStorageLibrary, self)
.create_share_from_snapshot(
context, share, snapshot, share_server=share_server,
parent_share=parent_share))

View File

@ -150,7 +150,13 @@ netapp_data_motion_opts = [
default=3600, # One Hour, default=3600, # One Hour,
help='The maximum time in seconds to wait for the completion ' help='The maximum time in seconds to wait for the completion '
'of a volume move operation after the cutover ' 'of a volume move operation after the cutover '
'was triggered.'), ] 'was triggered.'),
cfg.IntOpt('netapp_start_volume_move_timeout',
min=0,
default=3600, # One Hour,
help='The maximum time in seconds to wait for the completion '
'of a volume clone split operation in order to start a '
'volume move.'), ]
CONF = cfg.CONF CONF = cfg.CONF
CONF.register_opts(netapp_proxy_opts) CONF.register_opts(netapp_proxy_opts)

View File

@ -2195,6 +2195,46 @@ VOLUME_GET_ITER_CLONE_CHILDREN_RESPONSE = etree.XML("""
'clone2': CLONE_CHILD_2, 'clone2': CLONE_CHILD_2,
}) })
VOLUME_GET_ITER_PARENT_SNAP_EMPTY_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<volume-attributes>
<volume-id-attributes>
<name>%(name)s</name>
<owning-vserver-name>%(vserver)s</owning-vserver-name>
</volume-id-attributes>
</volume-attributes>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'vserver': VSERVER_NAME,
'name': SHARE_NAME,
})
VOLUME_GET_ITER_PARENT_SNAP_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<volume-attributes>
<volume-clone-attributes>
<volume-clone-parent-attributes>
<snapshot-name>%(snapshot_name)s</snapshot-name>
</volume-clone-parent-attributes>
</volume-clone-attributes>
<volume-id-attributes>
<name>%(name)s</name>
<owning-vserver-name>%(vserver)s</owning-vserver-name>
</volume-id-attributes>
</volume-attributes>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'snapshot_name': SNAPSHOT_NAME,
'vserver': VSERVER_NAME,
'name': SHARE_NAME,
})
SIS_GET_ITER_RESPONSE = etree.XML(""" SIS_GET_ITER_RESPONSE = etree.XML("""
<results status="passed"> <results status="passed">
<attributes-list> <attributes-list>

View File

@ -6698,3 +6698,72 @@ class NetAppClientCmodeTestCase(test.TestCase):
self.assertEqual(fake.CLUSTER_NAME, result) self.assertEqual(fake.CLUSTER_NAME, result)
self.client.send_request.assert_called_once_with( self.client.send_request.assert_called_once_with(
'cluster-identity-get', api_args, enable_tunneling=False) 'cluster-identity-get', api_args, enable_tunneling=False)
@ddt.data('fake_snapshot_name', None)
def test_check_volume_clone_split_completed(self, get_clone_parent):
volume_name = fake.SHARE_NAME
mock_get_vol_clone_parent = self.mock_object(
self.client, 'get_volume_clone_parent_snaphot',
mock.Mock(return_value=get_clone_parent))
result = self.client.check_volume_clone_split_completed(volume_name)
mock_get_vol_clone_parent.assert_called_once_with(volume_name)
expected_result = get_clone_parent is None
self.assertEqual(expected_result, result)
def test_rehost_volume(self):
volume_name = fake.SHARE_NAME
vserver = fake.VSERVER_NAME
dest_vserver = fake.VSERVER_NAME_2
api_args = {
'volume': volume_name,
'vserver': vserver,
'destination-vserver': dest_vserver,
}
self.mock_object(self.client, 'send_request')
self.client.rehost_volume(volume_name, vserver, dest_vserver)
self.client.send_request.assert_called_once_with('volume-rehost',
api_args)
@ddt.data(
{'fake_api_response': fake.VOLUME_GET_ITER_PARENT_SNAP_EMPTY_RESPONSE,
'expected_snapshot_name': None},
{'fake_api_response': fake.VOLUME_GET_ITER_PARENT_SNAP_RESPONSE,
'expected_snapshot_name': fake.SNAPSHOT_NAME},
{'fake_api_response': fake.NO_RECORDS_RESPONSE,
'expected_snapshot_name': None})
@ddt.unpack
def test_get_volume_clone_parent_snaphot(self, fake_api_response,
expected_snapshot_name):
api_response = netapp_api.NaElement(fake_api_response)
self.mock_object(self.client,
'send_iter_request',
mock.Mock(return_value=api_response))
result = self.client.get_volume_clone_parent_snaphot(fake.SHARE_NAME)
expected_api_args = {
'query': {
'volume-attributes': {
'volume-id-attributes': {
'name': fake.SHARE_NAME
}
}
},
'desired-attributes': {
'volume-attributes': {
'volume-clone-attributes': {
'volume-clone-parent-attributes': {
'snapshot-name': ''
}
}
}
}
}
self.client.send_iter_request.assert_called_once_with(
'volume-get-iter', expected_api_args)
self.assertEqual(expected_snapshot_name, result)

View File

@ -677,7 +677,8 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.context, self.context,
fake.SHARE, fake.SHARE,
fake.SNAPSHOT, fake.SNAPSHOT,
share_server=fake.SHARE_SERVER) share_server=fake.SHARE_SERVER,
parent_share=fake.SHARE)
mock_allocate_container_from_snapshot.assert_called_once_with( mock_allocate_container_from_snapshot.assert_called_once_with(
fake.SHARE, fake.SHARE,
@ -690,6 +691,516 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
vserver_client) vserver_client)
self.assertEqual('fake_export_location', result) self.assertEqual('fake_export_location', result)
def _setup_mocks_for_create_share_from_snapshot(
self, allocate_attr=None, dest_cluster=fake.CLUSTER_NAME):
class FakeDBObj(dict):
def to_dict(self):
return self
if allocate_attr is None:
allocate_attr = mock.Mock()
self.src_vserver_client = mock.Mock()
self.mock_dm_session = mock.Mock()
self.fake_share = FakeDBObj(fake.SHARE)
self.fake_share_server = FakeDBObj(fake.SHARE_SERVER)
self.mock_dm_constr = self.mock_object(
data_motion, "DataMotionSession",
mock.Mock(return_value=self.mock_dm_session))
self.mock_dm_backend = self.mock_object(
self.mock_dm_session, 'get_backend_info_for_share',
mock.Mock(return_value=(None,
fake.VSERVER1, fake.BACKEND_NAME)))
self.mock_dm_get_src_client = self.mock_object(
data_motion, 'get_client_for_backend',
mock.Mock(return_value=self.src_vserver_client))
self.mock_get_src_cluster = self.mock_object(
self.src_vserver_client, 'get_cluster_name',
mock.Mock(return_value=fake.CLUSTER_NAME))
self.dest_vserver_client = mock.Mock()
self.mock_get_vserver = self.mock_object(
self.library, '_get_vserver',
mock.Mock(return_value=(fake.VSERVER2, self.dest_vserver_client)))
self.mock_get_dest_cluster = self.mock_object(
self.dest_vserver_client, 'get_cluster_name',
mock.Mock(return_value=dest_cluster))
self.mock_allocate_container_from_snapshot = self.mock_object(
self.library, '_allocate_container_from_snapshot', allocate_attr)
self.mock_allocate_container = self.mock_object(
self.library, '_allocate_container')
self.mock_dm_create_snapmirror = self.mock_object(
self.mock_dm_session, 'create_snapmirror')
self.mock_storage_update = self.mock_object(
self.library.private_storage, 'update')
self.mock_object(self.library, '_have_cluster_creds',
mock.Mock(return_value=True))
# Parent share on MANILA_HOST_2
self.parent_share = copy.copy(fake.SHARE)
self.parent_share['share_server'] = fake.SHARE_SERVER_2
self.parent_share['host'] = fake.MANILA_HOST_NAME_2
self.parent_share_server = {}
ss_keys = ['id', 'identifier', 'backend_details', 'host']
for key in ss_keys:
self.parent_share_server[key] = (
self.parent_share['share_server'].get(key, None))
self.temp_src_share = {
'id': self.fake_share['id'],
'host': self.parent_share['host'],
'share_server': self.parent_share_server or None
}
@ddt.data(fake.CLUSTER_NAME, fake.CLUSTER_NAME_2)
def test_create_share_from_snapshot_another_host(self, dest_cluster):
self._setup_mocks_for_create_share_from_snapshot(
dest_cluster=dest_cluster)
result = self.library.create_share_from_snapshot(
self.context,
self.fake_share,
fake.SNAPSHOT,
share_server=self.fake_share_server,
parent_share=self.parent_share)
self.fake_share['share_server'] = self.fake_share_server
self.mock_dm_constr.assert_called_once()
self.mock_dm_backend.assert_called_once_with(self.parent_share)
self.mock_dm_get_src_client.assert_called_once_with(
fake.BACKEND_NAME, vserver_name=fake.VSERVER1)
self.mock_get_src_cluster.assert_called_once()
self.mock_get_vserver.assert_called_once_with(self.fake_share_server)
self.mock_get_dest_cluster.assert_called_once()
if dest_cluster != fake.CLUSTER_NAME:
self.mock_allocate_container_from_snapshot.assert_called_once_with(
self.fake_share, fake.SNAPSHOT, fake.VSERVER1,
self.src_vserver_client, split=False)
self.mock_allocate_container.assert_called_once_with(
self.fake_share, fake.VSERVER2,
self.dest_vserver_client, replica=True)
self.mock_dm_create_snapmirror.asser_called_once()
self.temp_src_share['replica_state'] = (
constants.REPLICA_STATE_ACTIVE)
state = self.library.STATE_SNAPMIRROR_DATA_COPYING
else:
self.mock_allocate_container_from_snapshot.assert_called_once_with(
self.fake_share, fake.SNAPSHOT, fake.VSERVER1,
self.src_vserver_client, split=True)
state = self.library.STATE_SPLITTING_VOLUME_CLONE
self.temp_src_share['internal_state'] = state
self.temp_src_share['status'] = constants.STATUS_ACTIVE
str_temp_src_share = json.dumps(self.temp_src_share)
self.mock_storage_update.assert_called_once_with(
self.fake_share['id'], {
'source_share': str_temp_src_share
})
expected_return = {'status': constants.STATUS_CREATING_FROM_SNAPSHOT}
self.assertEqual(expected_return, result)
def test_create_share_from_snapshot_another_host_driver_error(self):
self._setup_mocks_for_create_share_from_snapshot(
allocate_attr=mock.Mock(side_effect=exception.NetAppException))
mock_delete_snapmirror = self.mock_object(
self.mock_dm_session, 'delete_snapmirror')
mock_get_backend_shr_name = self.mock_object(
self.library, '_get_backend_share_name',
mock.Mock(return_value=fake.SHARE_NAME))
mock_share_exits = self.mock_object(
self.library, '_share_exists',
mock.Mock(return_value=True))
mock_deallocate_container = self.mock_object(
self.library, '_deallocate_container')
self.assertRaises(exception.NetAppException,
self.library.create_share_from_snapshot,
self.context,
self.fake_share,
fake.SNAPSHOT,
share_server=self.fake_share_server,
parent_share=self.parent_share)
self.fake_share['share_server'] = self.fake_share_server
self.mock_dm_constr.assert_called_once()
self.mock_dm_backend.assert_called_once_with(self.parent_share)
self.mock_dm_get_src_client.assert_called_once_with(
fake.BACKEND_NAME, vserver_name=fake.VSERVER1)
self.mock_get_src_cluster.assert_called_once()
self.mock_get_vserver.assert_called_once_with(self.fake_share_server)
self.mock_get_dest_cluster.assert_called_once()
self.mock_allocate_container_from_snapshot.assert_called_once_with(
self.fake_share, fake.SNAPSHOT, fake.VSERVER1,
self.src_vserver_client, split=True)
mock_delete_snapmirror.assert_called_once_with(self.temp_src_share,
self.fake_share)
mock_get_backend_shr_name.assert_called_once_with(
self.fake_share['id'])
mock_share_exits.assert_called_once_with(fake.SHARE_NAME,
self.src_vserver_client)
mock_deallocate_container.assert_called_once_with(
fake.SHARE_NAME, self.src_vserver_client)
def test__update_create_from_snapshot_status(self):
fake_result = mock.Mock()
mock_pvt_storage_get = self.mock_object(
self.library.private_storage, 'get',
mock.Mock(return_value=fake.SHARE))
mock__create_continue = self.mock_object(
self.library, '_create_from_snapshot_continue',
mock.Mock(return_value=fake_result))
result = self.library._update_create_from_snapshot_status(
fake.SHARE, fake.SHARE_SERVER)
mock_pvt_storage_get.assert_called_once_with(fake.SHARE['id'],
'source_share')
mock__create_continue.assert_called_once_with(fake.SHARE,
fake.SHARE_SERVER)
self.assertEqual(fake_result, result)
def test__update_create_from_snapshot_status_missing_source_share(self):
mock_pvt_storage_get = self.mock_object(
self.library.private_storage, 'get',
mock.Mock(return_value=None))
expected_result = {'status': constants.STATUS_ERROR}
result = self.library._update_create_from_snapshot_status(
fake.SHARE, fake.SHARE_SERVER)
mock_pvt_storage_get.assert_called_once_with(fake.SHARE['id'],
'source_share')
self.assertEqual(expected_result, result)
def test__update_create_from_snapshot_status_driver_error(self):
fake_src_share = {
'id': fake.SHARE['id'],
'host': fake.SHARE['host'],
'internal_state': 'fake_internal_state',
}
copy_fake_src_share = copy.deepcopy(fake_src_share)
src_vserver_client = mock.Mock()
mock_dm_session = mock.Mock()
mock_pvt_storage_get = self.mock_object(
self.library.private_storage, 'get',
mock.Mock(return_value=json.dumps(copy_fake_src_share)))
mock__create_continue = self.mock_object(
self.library, '_create_from_snapshot_continue',
mock.Mock(side_effect=exception.NetAppException))
mock_dm_constr = self.mock_object(
data_motion, "DataMotionSession",
mock.Mock(return_value=mock_dm_session))
mock_delete_snapmirror = self.mock_object(
mock_dm_session, 'delete_snapmirror')
mock_dm_backend = self.mock_object(
mock_dm_session, 'get_backend_info_for_share',
mock.Mock(return_value=(None,
fake.VSERVER1, fake.BACKEND_NAME)))
mock_dm_get_src_client = self.mock_object(
data_motion, 'get_client_for_backend',
mock.Mock(return_value=src_vserver_client))
mock_get_backend_shr_name = self.mock_object(
self.library, '_get_backend_share_name',
mock.Mock(return_value=fake.SHARE_NAME))
mock_share_exits = self.mock_object(
self.library, '_share_exists',
mock.Mock(return_value=True))
mock_deallocate_container = self.mock_object(
self.library, '_deallocate_container')
mock_pvt_storage_delete = self.mock_object(
self.library.private_storage, 'delete')
result = self.library._update_create_from_snapshot_status(
fake.SHARE, fake.SHARE_SERVER)
expected_result = {'status': constants.STATUS_ERROR}
mock_pvt_storage_get.assert_called_once_with(fake.SHARE['id'],
'source_share')
mock__create_continue.assert_called_once_with(fake.SHARE,
fake.SHARE_SERVER)
mock_dm_constr.assert_called_once()
mock_delete_snapmirror.assert_called_once_with(fake_src_share,
fake.SHARE)
mock_dm_backend.assert_called_once_with(fake_src_share)
mock_dm_get_src_client.assert_called_once_with(
fake.BACKEND_NAME, vserver_name=fake.VSERVER1)
mock_get_backend_shr_name.assert_called_once_with(fake_src_share['id'])
mock_share_exits.assert_called_once_with(fake.SHARE_NAME,
src_vserver_client)
mock_deallocate_container.assert_called_once_with(fake.SHARE_NAME,
src_vserver_client)
mock_pvt_storage_delete.assert_called_once_with(fake.SHARE['id'])
self.assertEqual(expected_result, result)
def _setup_mocks_for_create_from_snapshot_continue(
self, src_host=fake.MANILA_HOST_NAME,
dest_host=fake.MANILA_HOST_NAME, split_completed_result=True,
move_completed_result=True, share_internal_state='fake_state',
replica_state='in_sync'):
self.fake_export_location = 'fake_export_location'
self.fake_src_share = {
'id': fake.SHARE['id'],
'host': src_host,
'internal_state': share_internal_state,
}
self.copy_fake_src_share = copy.deepcopy(self.fake_src_share)
src_pool = src_host.split('#')[1]
dest_pool = dest_host.split('#')[1]
self.src_vserver_client = mock.Mock()
self.dest_vserver_client = mock.Mock()
self.mock_dm_session = mock.Mock()
self.mock_dm_constr = self.mock_object(
data_motion, "DataMotionSession",
mock.Mock(return_value=self.mock_dm_session))
self.mock_pvt_storage_get = self.mock_object(
self.library.private_storage, 'get',
mock.Mock(return_value=json.dumps(self.copy_fake_src_share)))
self.mock_dm_backend = self.mock_object(
self.mock_dm_session, 'get_backend_info_for_share',
mock.Mock(return_value=(None,
fake.VSERVER1, fake.BACKEND_NAME)))
self.mock_extract_host = self.mock_object(
share_utils, 'extract_host',
mock.Mock(side_effect=[src_pool, dest_pool]))
self.mock_dm_get_src_client = self.mock_object(
data_motion, 'get_client_for_backend',
mock.Mock(return_value=self.src_vserver_client))
self.mock_get_vserver = self.mock_object(
self.library, '_get_vserver',
mock.Mock(return_value=(fake.VSERVER2, self.dest_vserver_client)))
self.mock_split_completed = self.mock_object(
self.library, '_check_volume_clone_split_completed',
mock.Mock(return_value=split_completed_result))
self.mock_rehost_vol = self.mock_object(
self.library, '_rehost_and_mount_volume')
self.mock_move_vol = self.mock_object(self.library,
'_move_volume_after_splitting')
self.mock_move_completed = self.mock_object(
self.library, '_check_volume_move_completed',
mock.Mock(return_value=move_completed_result))
self.mock_update_rep_state = self.mock_object(
self.library, 'update_replica_state',
mock.Mock(return_value=replica_state)
)
self.mock_update_snapmirror = self.mock_object(
self.mock_dm_session, 'update_snapmirror')
self.mock_break_snapmirror = self.mock_object(
self.mock_dm_session, 'break_snapmirror')
self.mock_delete_snapmirror = self.mock_object(
self.mock_dm_session, 'delete_snapmirror')
self.mock_get_backend_shr_name = self.mock_object(
self.library, '_get_backend_share_name',
mock.Mock(return_value=fake.SHARE_NAME))
self.mock__delete_share = self.mock_object(self.library,
'_delete_share')
self.mock_set_vol_size_fixes = self.mock_object(
self.dest_vserver_client, 'set_volume_filesys_size_fixed')
self.mock_create_export = self.mock_object(
self.library, '_create_export',
mock.Mock(return_value=self.fake_export_location))
self.mock_pvt_storage_update = self.mock_object(
self.library.private_storage, 'update')
self.mock_pvt_storage_delete = self.mock_object(
self.library.private_storage, 'delete')
self.mock_get_extra_specs_qos = self.mock_object(
share_types, 'get_extra_specs_from_share',
mock.Mock(return_value=fake.EXTRA_SPEC_WITH_QOS))
self.mock__get_provisioning_opts = self.mock_object(
self.library, '_get_provisioning_options',
mock.Mock(return_value=copy.deepcopy(fake.PROVISIONING_OPTIONS))
)
self.mock_modify_create_qos = self.mock_object(
self.library, '_modify_or_create_qos_for_existing_share',
mock.Mock(return_value=fake.QOS_POLICY_GROUP_NAME))
self.mock_modify_vol = self.mock_object(self.dest_vserver_client,
'modify_volume')
self.mock_get_backend_qos_name = self.mock_object(
self.library, '_get_backend_qos_policy_group_name',
mock.Mock(return_value=fake.QOS_POLICY_GROUP_NAME))
self.mock_mark_qos_deletion = self.mock_object(
self.src_vserver_client, 'mark_qos_policy_group_for_deletion')
@ddt.data(fake.MANILA_HOST_NAME, fake.MANILA_HOST_NAME_2)
def test__create_from_snapshot_continue_state_splitting(self, src_host):
self._setup_mocks_for_create_from_snapshot_continue(
src_host=src_host,
share_internal_state=self.library.STATE_SPLITTING_VOLUME_CLONE)
result = self.library._create_from_snapshot_continue(fake.SHARE,
fake.SHARE_SERVER)
fake.SHARE['share_server'] = fake.SHARE_SERVER
self.mock_pvt_storage_get.assert_called_once_with(fake.SHARE['id'],
'source_share')
self.mock_dm_backend.assert_called_once_with(self.fake_src_share)
self.mock_extract_host.assert_has_calls([
mock.call(self.fake_src_share['host'], level='pool'),
mock.call(fake.SHARE['host'], level='pool'),
])
self.mock_dm_get_src_client.assert_called_once_with(
fake.BACKEND_NAME, vserver_name=fake.VSERVER1)
self.mock_get_vserver.assert_called_once_with(fake.SHARE_SERVER)
self.mock_split_completed.assert_called_once_with(
self.fake_src_share, self.src_vserver_client)
self.mock_get_backend_qos_name.assert_called_once_with(fake.SHARE_ID)
self.mock_mark_qos_deletion.assert_called_once_with(
fake.QOS_POLICY_GROUP_NAME)
self.mock_rehost_vol.assert_called_once_with(
fake.SHARE, fake.VSERVER1, self.src_vserver_client,
fake.VSERVER2, self.dest_vserver_client)
if src_host != fake.MANILA_HOST_NAME:
expected_result = {
'status': constants.STATUS_CREATING_FROM_SNAPSHOT
}
self.mock_move_vol.assert_called_once_with(
self.fake_src_share, fake.SHARE, fake.SHARE_SERVER,
cutover_action='defer')
self.fake_src_share['internal_state'] = (
self.library.STATE_MOVING_VOLUME)
self.mock_pvt_storage_update.asser_called_once_with(
fake.SHARE['id'],
{'source_share': json.dumps(self.fake_src_share)}
)
self.assertEqual(expected_result, result)
else:
self.mock_get_extra_specs_qos.assert_called_once_with(fake.SHARE)
self.mock__get_provisioning_opts.assert_called_once_with(
fake.EXTRA_SPEC_WITH_QOS)
self.mock_modify_create_qos.assert_called_once_with(
fake.SHARE, fake.EXTRA_SPEC_WITH_QOS, fake.VSERVER2,
self.dest_vserver_client)
self.mock_get_backend_shr_name.assert_called_once_with(
fake.SHARE_ID)
self.mock_modify_vol.assert_called_once_with(
fake.POOL_NAME, fake.SHARE_NAME,
**fake.PROVISIONING_OPTIONS_WITH_QOS)
self.mock_pvt_storage_delete.assert_called_once_with(
fake.SHARE['id'])
self.mock_create_export.assert_called_once_with(
fake.SHARE, fake.SHARE_SERVER, fake.VSERVER2,
self.dest_vserver_client, clear_current_export_policy=False)
expected_result = {
'status': constants.STATUS_AVAILABLE,
'export_locations': self.fake_export_location,
}
self.assertEqual(expected_result, result)
@ddt.data(True, False)
def test__create_from_snapshot_continue_state_moving(self, move_completed):
self._setup_mocks_for_create_from_snapshot_continue(
share_internal_state=self.library.STATE_MOVING_VOLUME,
move_completed_result=move_completed)
result = self.library._create_from_snapshot_continue(fake.SHARE,
fake.SHARE_SERVER)
expect_result = {
'status': constants.STATUS_CREATING_FROM_SNAPSHOT
}
fake.SHARE['share_server'] = fake.SHARE_SERVER
self.mock_pvt_storage_get.assert_called_once_with(fake.SHARE['id'],
'source_share')
self.mock_dm_backend.assert_called_once_with(self.fake_src_share)
self.mock_extract_host.assert_has_calls([
mock.call(self.fake_src_share['host'], level='pool'),
mock.call(fake.SHARE['host'], level='pool'),
])
self.mock_dm_get_src_client.assert_called_once_with(
fake.BACKEND_NAME, vserver_name=fake.VSERVER1)
self.mock_get_vserver.assert_called_once_with(fake.SHARE_SERVER)
self.mock_move_completed.assert_called_once_with(
fake.SHARE, fake.SHARE_SERVER)
if move_completed:
expect_result['status'] = constants.STATUS_AVAILABLE
self.mock_pvt_storage_delete.assert_called_once_with(
fake.SHARE['id'])
self.mock_create_export.assert_called_once_with(
fake.SHARE, fake.SHARE_SERVER, fake.VSERVER2,
self.dest_vserver_client, clear_current_export_policy=False)
expect_result['export_locations'] = self.fake_export_location
self.assertEqual(expect_result, result)
else:
self.mock_pvt_storage_update.asser_called_once_with(
fake.SHARE['id'],
{'source_share': json.dumps(self.fake_src_share)}
)
self.assertEqual(expect_result, result)
@ddt.data('in_sync', 'out_of_sync')
def test__create_from_snapshot_continue_state_snapmirror(self,
replica_state):
self._setup_mocks_for_create_from_snapshot_continue(
share_internal_state=self.library.STATE_SNAPMIRROR_DATA_COPYING,
replica_state=replica_state)
result = self.library._create_from_snapshot_continue(fake.SHARE,
fake.SHARE_SERVER)
expect_result = {
'status': constants.STATUS_CREATING_FROM_SNAPSHOT
}
fake.SHARE['share_server'] = fake.SHARE_SERVER
self.mock_pvt_storage_get.assert_called_once_with(fake.SHARE['id'],
'source_share')
self.mock_dm_backend.assert_called_once_with(self.fake_src_share)
self.mock_extract_host.assert_has_calls([
mock.call(self.fake_src_share['host'], level='pool'),
mock.call(fake.SHARE['host'], level='pool'),
])
self.mock_dm_get_src_client.assert_called_once_with(
fake.BACKEND_NAME, vserver_name=fake.VSERVER1)
self.mock_get_vserver.assert_called_once_with(fake.SHARE_SERVER)
self.mock_update_rep_state.assert_called_once_with(
None, [self.fake_src_share], fake.SHARE, [], [], fake.SHARE_SERVER)
if replica_state == constants.REPLICA_STATE_IN_SYNC:
self.mock_update_snapmirror.assert_called_once_with(
self.fake_src_share, fake.SHARE)
self.mock_break_snapmirror.assert_called_once_with(
self.fake_src_share, fake.SHARE)
self.mock_delete_snapmirror.assert_called_once_with(
self.fake_src_share, fake.SHARE)
self.mock_get_backend_shr_name.assert_has_calls(
[mock.call(self.fake_src_share['id']),
mock.call(fake.SHARE_ID)])
self.mock__delete_share.assert_called_once_with(
self.fake_src_share, self.src_vserver_client,
remove_export=False)
self.mock_set_vol_size_fixes.assert_called_once_with(
fake.SHARE_NAME, filesys_size_fixed=False)
self.mock_get_extra_specs_qos.assert_called_once_with(fake.SHARE)
self.mock__get_provisioning_opts.assert_called_once_with(
fake.EXTRA_SPEC_WITH_QOS)
self.mock_modify_create_qos.assert_called_once_with(
fake.SHARE, fake.EXTRA_SPEC_WITH_QOS, fake.VSERVER2,
self.dest_vserver_client)
self.mock_modify_vol.assert_called_once_with(
fake.POOL_NAME, fake.SHARE_NAME,
**fake.PROVISIONING_OPTIONS_WITH_QOS)
expect_result['status'] = constants.STATUS_AVAILABLE
self.mock_pvt_storage_delete.assert_called_once_with(
fake.SHARE['id'])
self.mock_create_export.assert_called_once_with(
fake.SHARE, fake.SHARE_SERVER, fake.VSERVER2,
self.dest_vserver_client, clear_current_export_policy=False)
expect_result['export_locations'] = self.fake_export_location
self.assertEqual(expect_result, result)
elif replica_state not in [constants.STATUS_ERROR, None]:
self.mock_pvt_storage_update.asser_called_once_with(
fake.SHARE['id'],
{'source_share': json.dumps(self.fake_src_share)}
)
self.assertEqual(expect_result, result)
def test__create_from_snapshot_continue_state_unknown(self):
self._setup_mocks_for_create_from_snapshot_continue(
share_internal_state='unknown_state')
self.assertRaises(exception.NetAppException,
self.library._create_from_snapshot_continue,
fake.SHARE,
fake.SHARE_SERVER)
self.mock_pvt_storage_delete.assert_called_once_with(fake.SHARE_ID)
@ddt.data(False, True) @ddt.data(False, True)
def test_allocate_container(self, hide_snapdir): def test_allocate_container(self, hide_snapdir):
@ -709,7 +1220,8 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
vserver_client) vserver_client)
mock_get_provisioning_opts.assert_called_once_with( mock_get_provisioning_opts.assert_called_once_with(
fake.SHARE_INSTANCE, fake.VSERVER1, replica=False) fake.SHARE_INSTANCE, fake.VSERVER1, vserver_client=vserver_client,
replica=False)
vserver_client.create_volume.assert_called_once_with( vserver_client.create_volume.assert_called_once_with(
fake.POOL_NAME, fake.SHARE_NAME, fake.SHARE['size'], fake.POOL_NAME, fake.SHARE_NAME, fake.SHARE['size'],
@ -745,7 +1257,8 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
vserver_client, replica=True) vserver_client, replica=True)
mock_get_provisioning_opts.assert_called_once_with( mock_get_provisioning_opts.assert_called_once_with(
fake.SHARE_INSTANCE, fake.VSERVER1, replica=True) fake.SHARE_INSTANCE, fake.VSERVER1, vserver_client=vserver_client,
replica=True)
vserver_client.create_volume.assert_called_once_with( vserver_client.create_volume.assert_called_once_with(
fake.POOL_NAME, fake.SHARE_NAME, fake.SHARE['size'], fake.POOL_NAME, fake.SHARE_NAME, fake.SHARE['size'],
@ -842,6 +1355,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
def test_get_provisioning_options_for_share(self, extra_specs, is_replica): def test_get_provisioning_options_for_share(self, extra_specs, is_replica):
qos = True if fake.QOS_EXTRA_SPEC in extra_specs else False qos = True if fake.QOS_EXTRA_SPEC in extra_specs else False
vserver_client = mock.Mock()
mock_get_extra_specs_from_share = self.mock_object( mock_get_extra_specs_from_share = self.mock_object(
share_types, 'get_extra_specs_from_share', share_types, 'get_extra_specs_from_share',
mock.Mock(return_value=extra_specs)) mock.Mock(return_value=extra_specs))
@ -861,7 +1375,8 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
return_value=fake.QOS_POLICY_GROUP_NAME)) return_value=fake.QOS_POLICY_GROUP_NAME))
result = self.library._get_provisioning_options_for_share( result = self.library._get_provisioning_options_for_share(
fake.SHARE_INSTANCE, fake.VSERVER1, replica=is_replica) fake.SHARE_INSTANCE, fake.VSERVER1, vserver_client=vserver_client,
replica=is_replica)
if qos and is_replica: if qos and is_replica:
expected_provisioning_opts = fake.PROVISIONING_OPTIONS expected_provisioning_opts = fake.PROVISIONING_OPTIONS
@ -870,7 +1385,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
expected_provisioning_opts = fake.PROVISIONING_OPTIONS_WITH_QOS expected_provisioning_opts = fake.PROVISIONING_OPTIONS_WITH_QOS
mock_create_qos_policy_group.assert_called_once_with( mock_create_qos_policy_group.assert_called_once_with(
fake.SHARE_INSTANCE, fake.VSERVER1, fake.SHARE_INSTANCE, fake.VSERVER1,
{fake.QOS_NORMALIZED_SPEC: 3000}) {fake.QOS_NORMALIZED_SPEC: 3000}, vserver_client)
self.assertEqual(expected_provisioning_opts, result) self.assertEqual(expected_provisioning_opts, result)
mock_get_extra_specs_from_share.assert_called_once_with( mock_get_extra_specs_from_share.assert_called_once_with(
@ -1053,14 +1568,15 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
fake.AGGREGATES[1], fake.AGGREGATES[1],
fake.EXTRA_SPEC) fake.EXTRA_SPEC)
@ddt.data({'provider_location': None, 'size': 50, 'hide_snapdir': True}, @ddt.data({'provider_location': None, 'size': 50, 'hide_snapdir': True,
'split': None},
{'provider_location': 'fake_location', 'size': 30, {'provider_location': 'fake_location', 'size': 30,
'hide_snapdir': False}, 'hide_snapdir': False, 'split': True},
{'provider_location': 'fake_location', 'size': 20, {'provider_location': 'fake_location', 'size': 20,
'hide_snapdir': True}) 'hide_snapdir': True, 'split': False})
@ddt.unpack @ddt.unpack
def test_allocate_container_from_snapshot( def test_allocate_container_from_snapshot(
self, provider_location, size, hide_snapdir): self, provider_location, size, hide_snapdir, split):
provisioning_options = copy.deepcopy(fake.PROVISIONING_OPTIONS) provisioning_options = copy.deepcopy(fake.PROVISIONING_OPTIONS)
provisioning_options['hide_snapdir'] = hide_snapdir provisioning_options['hide_snapdir'] = hide_snapdir
@ -1070,6 +1586,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
vserver = fake.VSERVER1 vserver = fake.VSERVER1
vserver_client = mock.Mock() vserver_client = mock.Mock()
original_snapshot_size = 20 original_snapshot_size = 20
expected_split_op = split or fake.PROVISIONING_OPTIONS['split']
fake_share_inst = copy.deepcopy(fake.SHARE_INSTANCE) fake_share_inst = copy.deepcopy(fake.SHARE_INSTANCE)
fake_share_inst['size'] = size fake_share_inst['size'] = size
@ -1089,12 +1606,12 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
parent_snapshot_name = self.library._get_backend_snapshot_name( parent_snapshot_name = self.library._get_backend_snapshot_name(
fake_snapshot['id']) if not provider_location else 'fake_location' fake_snapshot['id']) if not provider_location else 'fake_location'
mock_get_provisioning_opts.assert_called_once_with( mock_get_provisioning_opts.assert_called_once_with(
fake_share_inst, fake.VSERVER1) fake_share_inst, fake.VSERVER1, vserver_client=vserver_client)
vserver_client.create_volume_clone.assert_called_once_with( vserver_client.create_volume_clone.assert_called_once_with(
share_name, parent_share_name, parent_snapshot_name, share_name, parent_share_name, parent_snapshot_name,
thin_provisioned=True, snapshot_policy='default', thin_provisioned=True, snapshot_policy='default',
language='en-US', dedup_enabled=True, split=True, encrypt=False, language='en-US', dedup_enabled=True, split=expected_split_op,
compression_enabled=False, max_files=5000) encrypt=False, compression_enabled=False, max_files=5000)
if size > original_snapshot_size: if size > original_snapshot_size:
vserver_client.set_volume_size.assert_called_once_with( vserver_client.set_volume_size.assert_called_once_with(
share_name, size) share_name, size)
@ -1150,7 +1667,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock_remove_export.assert_called_once_with(fake.SHARE, vserver_client) mock_remove_export.assert_called_once_with(fake.SHARE, vserver_client)
mock_deallocate_container.assert_called_once_with(share_name, mock_deallocate_container.assert_called_once_with(share_name,
vserver_client) vserver_client)
(self.library._client.mark_qos_policy_group_for_deletion (vserver_client.mark_qos_policy_group_for_deletion
.assert_called_once_with(qos_policy_name)) .assert_called_once_with(qos_policy_name))
self.assertEqual(0, lib_base.LOG.info.call_count) self.assertEqual(0, lib_base.LOG.info.call_count)
@ -4555,7 +5072,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.assertTrue(mock_info_log.called) self.assertTrue(mock_info_log.called)
mock_move.assert_called_once_with( mock_move.assert_called_once_with(
fake.SHARE_NAME, fake.VSERVER1, 'destination_pool', fake.SHARE_NAME, fake.VSERVER1, 'destination_pool',
encrypt_destination=False) cutover_action='wait', encrypt_destination=False)
def test_migration_start_encrypted_destination(self): def test_migration_start_encrypted_destination(self):
mock_info_log = self.mock_object(lib_base.LOG, 'info') mock_info_log = self.mock_object(lib_base.LOG, 'info')
@ -4581,7 +5098,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.assertTrue(mock_info_log.called) self.assertTrue(mock_info_log.called)
mock_move.assert_called_once_with( mock_move.assert_called_once_with(
fake.SHARE_NAME, fake.VSERVER1, 'destination_pool', fake.SHARE_NAME, fake.VSERVER1, 'destination_pool',
encrypt_destination=True) cutover_action='wait', encrypt_destination=True)
def test_migration_continue_volume_move_failed(self): def test_migration_continue_volume_move_failed(self):
source_snapshots = mock.Mock() source_snapshots = mock.Mock()
@ -4881,7 +5398,8 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.assertEqual(qos_policy_name, retval) self.assertEqual(qos_policy_name, retval)
self.library._client.qos_policy_group_modify.assert_not_called() self.library._client.qos_policy_group_modify.assert_not_called()
self.library._create_qos_policy_group.assert_called_once_with( self.library._create_qos_policy_group.assert_called_once_with(
share_obj, fake.VSERVER1, {'maxiops': '3000'}) share_obj, fake.VSERVER1, {'maxiops': '3000'},
vserver_client=vserver_client)
@ddt.data(utils.annotated('volume_has_shared_qos_policy', (2, )), @ddt.data(utils.annotated('volume_has_shared_qos_policy', (2, )),
utils.annotated('volume_has_nonshared_qos_policy', (1, ))) utils.annotated('volume_has_nonshared_qos_policy', (1, )))
@ -4920,7 +5438,8 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
'id': fake.SHARE['id'], 'id': fake.SHARE['id'],
} }
mock_create_qos_policy.assert_called_once_with( mock_create_qos_policy.assert_called_once_with(
share_obj, fake.VSERVER1, {'maxiops': '3000'}) share_obj, fake.VSERVER1, {'maxiops': '3000'},
vserver_client=vserver_client)
self.library._client.qos_policy_group_modify.assert_not_called() self.library._client.qos_policy_group_modify.assert_not_called()
self.library._client.qos_policy_group_rename.assert_not_called() self.library._client.qos_policy_group_rename.assert_not_called()
@ -5072,3 +5591,131 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock.call('share_s_2', True), mock.call('share_s_2', True),
mock.call('share_s_3', True), mock.call('share_s_3', True),
]) ])
def test__check_volume_clone_split_completed(self):
vserver_client = mock.Mock()
mock_share_name = self.mock_object(
self.library, '_get_backend_share_name',
mock.Mock(return_value=fake.SHARE_NAME))
vserver_client.check_volume_clone_split_completed.return_value = (
fake.CDOT_SNAPSHOT_BUSY_SNAPMIRROR)
self.library._check_volume_clone_split_completed(fake.SHARE,
vserver_client)
mock_share_name.assert_called_once_with(fake.SHARE_ID)
check_call = vserver_client.check_volume_clone_split_completed
check_call.assert_called_once_with(fake.SHARE_NAME)
@ddt.data(constants.STATUS_ACTIVE, constants.STATUS_CREATING_FROM_SNAPSHOT)
def test_get_share_status(self, status):
mock_update_from_snap = self.mock_object(
self.library, '_update_create_from_snapshot_status')
fake.SHARE['status'] = status
self.library.get_share_status(fake.SHARE, fake.SHARE_SERVER)
if status == constants.STATUS_CREATING_FROM_SNAPSHOT:
mock_update_from_snap.assert_called_once_with(fake.SHARE,
fake.SHARE_SERVER)
else:
mock_update_from_snap.assert_not_called()
def test_volume_rehost(self):
mock_share_name = self.mock_object(
self.library, '_get_backend_share_name',
mock.Mock(return_value=fake.SHARE_NAME))
mock_rehost = self.mock_object(self.client, 'rehost_volume')
self.library.volume_rehost(fake.SHARE, fake.VSERVER1, fake.VSERVER2)
mock_share_name.assert_called_once_with(fake.SHARE_ID)
mock_rehost.assert_called_once_with(fake.SHARE_NAME, fake.VSERVER1,
fake.VSERVER2)
def test__rehost_and_mount_volume(self):
mock_share_name = self.mock_object(
self.library, '_get_backend_share_name',
mock.Mock(return_value=fake.SHARE_NAME))
mock_rehost = self.mock_object(self.library, 'volume_rehost',
mock.Mock())
src_vserver_client = mock.Mock()
mock_unmount = self.mock_object(src_vserver_client, 'unmount_volume')
dst_vserver_client = mock.Mock()
mock_mount = self.mock_object(dst_vserver_client, 'mount_volume')
self.library._rehost_and_mount_volume(
fake.SHARE, fake.VSERVER1, src_vserver_client, fake.VSERVER2,
dst_vserver_client)
mock_share_name.assert_called_once_with(fake.SHARE_ID)
mock_unmount.assert_called_once_with(fake.SHARE_NAME)
mock_rehost.assert_called_once_with(fake.SHARE, fake.VSERVER1,
fake.VSERVER2)
mock_mount.assert_called_once_with(fake.SHARE_NAME)
def test__move_volume_after_splitting(self):
src_share = fake_share.fake_share_instance(id='source-share-instance')
dest_share = fake_share.fake_share_instance(id='dest-share-instance')
cutover_action = 'defer'
self.library.configuration.netapp_start_volume_move_timeout = 15
self.mock_object(time, 'sleep')
mock_warning_log = self.mock_object(lib_base.LOG, 'warning')
mock_vol_move = self.mock_object(self.library, '_move_volume')
self.library._move_volume_after_splitting(
src_share, dest_share, share_server=fake.SHARE_SERVER,
cutover_action=cutover_action)
mock_vol_move.assert_called_once_with(src_share, dest_share,
fake.SHARE_SERVER,
cutover_action)
self.assertEqual(0, mock_warning_log.call_count)
def test__move_volume_after_splitting_timeout(self):
src_share = fake_share.fake_share_instance(id='source-share-instance')
dest_share = fake_share.fake_share_instance(id='dest-share-instance')
self.library.configuration.netapp_start_volume_move_timeout = 15
cutover_action = 'defer'
self.mock_object(time, 'sleep')
mock_warning_log = self.mock_object(lib_base.LOG, 'warning')
undergoing_split_op_msg = (
'The volume is undergoing a clone split operation.')
na_api_error = netapp_api.NaApiError(code=netapp_api.EAPIERROR,
message=undergoing_split_op_msg)
mock_move_vol = self.mock_object(
self.library, '_move_volume', mock.Mock(side_effect=na_api_error))
self.assertRaises(exception.NetAppException,
self.library._move_volume_after_splitting,
src_share, dest_share,
share_server=fake.SHARE_SERVER,
cutover_action=cutover_action)
self.assertEqual(3, mock_move_vol.call_count)
self.assertEqual(3, mock_warning_log.call_count)
def test__move_volume_after_splitting_api_not_found(self):
src_share = fake_share.fake_share_instance(id='source-share-instance')
dest_share = fake_share.fake_share_instance(id='dest-share-instance')
self.library.configuration.netapp_start_volume_move_timeout = 15
cutover_action = 'defer'
self.mock_object(time, 'sleep')
mock_warning_log = self.mock_object(lib_base.LOG, 'warning')
na_api_error = netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND)
mock_move_vol = self.mock_object(
self.library, '_move_volume', mock.Mock(side_effect=na_api_error))
self.assertRaises(exception.NetAppException,
self.library._move_volume_after_splitting,
src_share, dest_share,
share_server=fake.SHARE_SERVER,
cutover_action=cutover_action)
mock_move_vol.assert_called_once_with(src_share, dest_share,
fake.SHARE_SERVER,
cutover_action)
mock_warning_log.assert_not_called()

View File

@ -1108,3 +1108,108 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.library._client.delete_vserver_peer.assert_called_once_with( self.library._client.delete_vserver_peer.assert_called_once_with(
self.fake_vserver, self.fake_new_vserver_name self.fake_vserver, self.fake_new_vserver_name
) )
def test_create_share_from_snaphot(self):
fake_parent_share = copy.deepcopy(fake.SHARE)
fake_parent_share['id'] = fake.SHARE_ID2
mock_create_from_snap = self.mock_object(
lib_base.NetAppCmodeFileStorageLibrary,
'create_share_from_snapshot')
self.library.create_share_from_snapshot(
None, fake.SHARE, fake.SNAPSHOT, share_server=fake.SHARE_SERVER,
parent_share=fake_parent_share)
mock_create_from_snap.assert_called_once_with(
None, fake.SHARE, fake.SNAPSHOT, share_server=fake.SHARE_SERVER,
parent_share=fake_parent_share
)
@ddt.data(
{'src_cluster_name': fake.CLUSTER_NAME,
'dest_cluster_name': fake.CLUSTER_NAME, 'has_vserver_peers': None},
{'src_cluster_name': fake.CLUSTER_NAME,
'dest_cluster_name': fake.CLUSTER_NAME_2, 'has_vserver_peers': False},
{'src_cluster_name': fake.CLUSTER_NAME,
'dest_cluster_name': fake.CLUSTER_NAME_2, 'has_vserver_peers': True}
)
@ddt.unpack
def test_create_share_from_snaphot_different_hosts(self, src_cluster_name,
dest_cluster_name,
has_vserver_peers):
class FakeDBObj(dict):
def to_dict(self):
return self
fake_parent_share = copy.deepcopy(fake.SHARE)
fake_parent_share['id'] = fake.SHARE_ID2
fake_parent_share['host'] = fake.MANILA_HOST_NAME_2
fake_share = FakeDBObj(fake.SHARE)
fake_share_server = FakeDBObj(fake.SHARE_SERVER)
src_vserver = fake.VSERVER2
dest_vserver = fake.VSERVER1
src_backend = fake.BACKEND_NAME
dest_backend = fake.BACKEND_NAME_2
mock_dm_session = mock.Mock()
mock_dm_constr = self.mock_object(
data_motion, "DataMotionSession",
mock.Mock(return_value=mock_dm_session))
mock_get_vserver = self.mock_object(
mock_dm_session, 'get_vserver_from_share',
mock.Mock(side_effect=[src_vserver, dest_vserver]))
src_vserver_client = mock.Mock()
dest_vserver_client = mock.Mock()
mock_extract_host = self.mock_object(
share_utils, 'extract_host',
mock.Mock(side_effect=[src_backend, dest_backend]))
mock_dm_get_client = self.mock_object(
data_motion, 'get_client_for_backend',
mock.Mock(side_effect=[src_vserver_client, dest_vserver_client]))
mock_get_src_cluster_name = self.mock_object(
src_vserver_client, 'get_cluster_name',
mock.Mock(return_value=src_cluster_name))
mock_get_dest_cluster_name = self.mock_object(
dest_vserver_client, 'get_cluster_name',
mock.Mock(return_value=dest_cluster_name))
mock_get_vserver_peers = self.mock_object(
self.library, '_get_vserver_peers',
mock.Mock(return_value=has_vserver_peers))
mock_create_vserver_peer = self.mock_object(dest_vserver_client,
'create_vserver_peer')
mock_accept_peer = self.mock_object(src_vserver_client,
'accept_vserver_peer')
mock_create_from_snap = self.mock_object(
lib_base.NetAppCmodeFileStorageLibrary,
'create_share_from_snapshot')
self.library.create_share_from_snapshot(
None, fake_share, fake.SNAPSHOT, share_server=fake_share_server,
parent_share=fake_parent_share)
internal_share = copy.deepcopy(fake.SHARE)
internal_share['share_server'] = copy.deepcopy(fake.SHARE_SERVER)
mock_dm_constr.assert_called_once()
mock_get_vserver.assert_has_calls([mock.call(fake_parent_share),
mock.call(internal_share)])
mock_extract_host.assert_has_calls([
mock.call(fake_parent_share['host'], level='backend_name'),
mock.call(internal_share['host'], level='backend_name')])
mock_dm_get_client.assert_has_calls([
mock.call(src_backend, vserver_name=src_vserver),
mock.call(dest_backend, vserver_name=dest_vserver)
])
mock_get_src_cluster_name.assert_called_once()
mock_get_dest_cluster_name.assert_called_once()
if src_cluster_name != dest_cluster_name:
mock_get_vserver_peers.assert_called_once_with(dest_vserver,
src_vserver)
if not has_vserver_peers:
mock_create_vserver_peer.assert_called_once_with(
dest_vserver, src_vserver,
peer_cluster_name=src_cluster_name)
mock_accept_peer.assert_called_once_with(src_vserver,
dest_vserver)
mock_create_from_snap.assert_called_once_with(
None, fake.SHARE, fake.SNAPSHOT, share_server=fake.SHARE_SERVER,
parent_share=fake_parent_share)

View File

@ -18,12 +18,15 @@ import copy
from manila.common import constants from manila.common import constants
import manila.tests.share.drivers.netapp.fakes as na_fakes import manila.tests.share.drivers.netapp.fakes as na_fakes
CLUSTER_NAME = 'fake_cluster'
CLUSTER_NAME_2 = 'fake_cluster_2'
BACKEND_NAME = 'fake_backend_name' BACKEND_NAME = 'fake_backend_name'
BACKEND_NAME_2 = 'fake_backend_name_2'
DRIVER_NAME = 'fake_driver_name' DRIVER_NAME = 'fake_driver_name'
APP_VERSION = 'fake_app_vsersion' APP_VERSION = 'fake_app_vsersion'
HOST_NAME = 'fake_host' HOST_NAME = 'fake_host'
POOL_NAME = 'fake_pool' POOL_NAME = 'fake_pool'
POOL_NAME_2 = 'fake_pool_2'
VSERVER1 = 'fake_vserver_1' VSERVER1 = 'fake_vserver_1'
VSERVER2 = 'fake_vserver_2' VSERVER2 = 'fake_vserver_2'
LICENSES = ('base', 'cifs', 'fcp', 'flexclone', 'iscsi', 'nfs', 'snapmirror', LICENSES = ('base', 'cifs', 'fcp', 'flexclone', 'iscsi', 'nfs', 'snapmirror',
@ -73,6 +76,10 @@ MTU = 1234
DEFAULT_MTU = 1500 DEFAULT_MTU = 1500
MANILA_HOST_NAME = '%(host)s@%(backend)s#%(pool)s' % { MANILA_HOST_NAME = '%(host)s@%(backend)s#%(pool)s' % {
'host': HOST_NAME, 'backend': BACKEND_NAME, 'pool': POOL_NAME} 'host': HOST_NAME, 'backend': BACKEND_NAME, 'pool': POOL_NAME}
MANILA_HOST_NAME_2 = '%(host)s@%(backend)s#%(pool)s' % {
'host': HOST_NAME, 'backend': BACKEND_NAME, 'pool': POOL_NAME_2}
MANILA_HOST_NAME_3 = '%(host)s@%(backend)s#%(pool)s' % {
'host': HOST_NAME, 'backend': BACKEND_NAME_2, 'pool': POOL_NAME_2}
QOS_EXTRA_SPEC = 'netapp:maxiops' QOS_EXTRA_SPEC = 'netapp:maxiops'
QOS_SIZE_DEPENDENT_EXTRA_SPEC = 'netapp:maxbpspergib' QOS_SIZE_DEPENDENT_EXTRA_SPEC = 'netapp:maxbpspergib'
QOS_NORMALIZED_SPEC = 'maxiops' QOS_NORMALIZED_SPEC = 'maxiops'
@ -365,6 +372,16 @@ SHARE_SERVER = {
ADMIN_NETWORK_ALLOCATIONS), ADMIN_NETWORK_ALLOCATIONS),
} }
SHARE_SERVER_2 = {
'id': 'fake_id_2',
'share_network_id': 'c5b3a865-56d0-4d88-abe5-879965e099c9',
'backend_details': {
'vserver_name': VSERVER2
},
'network_allocations': (USER_NETWORK_ALLOCATIONS +
ADMIN_NETWORK_ALLOCATIONS),
}
VSERVER_PEER = [{ VSERVER_PEER = [{
'vserver': VSERVER1, 'vserver': VSERVER1,
'peer-vserver': VSERVER2, 'peer-vserver': VSERVER2,

View File

@ -0,0 +1,7 @@
---
features:
- |
The NetApp driver now supports efficiently creating new shares from
snapshots in pools or back ends different than that of the source share. In
order to have this functionality working across different back ends,
replication must be enabled and configured accordingly.