NetApp: Support share revert to snapshot
This commit adds support for the revert-to-snapshot feature to the NetApp cDOT drivers for both normal and replicated shares. Implements: blueprint netapp-cdot-share-revert-to-snapshot Change-Id: Ia939eba03b3db9cbba0cc6c16184578e8c8893d1
This commit is contained in:
parent
f4aed13f86
commit
794e9a74fc
@ -39,7 +39,7 @@ Mapping of share drivers and share features support
|
||||
+----------------------------------------+-----------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+--------------------+
|
||||
| Generic (Cinder as back-end) | J | K | L | L | J | J | M | \- |
|
||||
+----------------------------------------+-----------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+--------------------+
|
||||
| NetApp Clustered Data ONTAP | J | L | L | L | J | J | N | \- |
|
||||
| NetApp Clustered Data ONTAP | J | L | L | L | J | J | N | O |
|
||||
+----------------------------------------+-----------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+--------------------+
|
||||
| EMC VNX | J | \- | \- | \- | J | J | \- | \- |
|
||||
+----------------------------------------+-----------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+--------------------+
|
||||
@ -214,7 +214,7 @@ More information: :ref:`capabilities_and_extra_specs`
|
||||
+----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+
|
||||
| Generic (Cinder as back-end) | J | K | \- | \- | \- | L | \- | J | \- |
|
||||
+----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+
|
||||
| NetApp Clustered Data ONTAP | J | K | M | M | M | L | \- | J | \- |
|
||||
| NetApp Clustered Data ONTAP | J | K | M | M | M | L | \- | J | O |
|
||||
+----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+
|
||||
| EMC VNX | J | \- | \- | \- | \- | L | \- | J | \- |
|
||||
+----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+
|
||||
|
@ -2114,6 +2114,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
|
||||
},
|
||||
'desired-attributes': {
|
||||
'snapshot-info': {
|
||||
'access-time': None,
|
||||
'name': None,
|
||||
'volume': None,
|
||||
'busy': None,
|
||||
@ -2159,6 +2160,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
|
||||
|
||||
snapshot_info = snapshot_info_list[0]
|
||||
snapshot = {
|
||||
'access-time': snapshot_info.get_child_content('access-time'),
|
||||
'name': snapshot_info.get_child_content('name'),
|
||||
'volume': snapshot_info.get_child_content('volume'),
|
||||
'busy': strutils.bool_from_string(
|
||||
@ -2184,9 +2186,26 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
|
||||
self.send_request('snapshot-rename', api_args)
|
||||
|
||||
@na_utils.trace
|
||||
def delete_snapshot(self, volume_name, snapshot_name):
|
||||
def restore_snapshot(self, volume_name, snapshot_name):
|
||||
"""Reverts a volume to the specified snapshot."""
|
||||
api_args = {
|
||||
'volume': volume_name,
|
||||
'snapshot': snapshot_name,
|
||||
}
|
||||
self.send_request('snapshot-restore-volume', api_args)
|
||||
|
||||
@na_utils.trace
|
||||
def delete_snapshot(self, volume_name, snapshot_name, ignore_owners=False):
|
||||
"""Deletes a volume snapshot."""
|
||||
api_args = {'volume': volume_name, 'snapshot': snapshot_name}
|
||||
|
||||
ignore_owners = ('true' if strutils.bool_from_string(ignore_owners)
|
||||
else 'false')
|
||||
|
||||
api_args = {
|
||||
'volume': volume_name,
|
||||
'snapshot': snapshot_name,
|
||||
'ignore-owners': ignore_owners,
|
||||
}
|
||||
self.send_request('snapshot-delete', api_args)
|
||||
|
||||
@na_utils.trace
|
||||
@ -3264,6 +3283,28 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
|
||||
|
||||
return has_snapmirrors
|
||||
|
||||
def list_snapmirror_snapshots(self, volume_name, newer_than=None):
|
||||
"""Gets SnapMirror snapshots on a volume."""
|
||||
api_args = {
|
||||
'query': {
|
||||
'snapshot-info': {
|
||||
'dependency': 'snapmirror',
|
||||
'volume': volume_name,
|
||||
},
|
||||
},
|
||||
}
|
||||
if newer_than:
|
||||
api_args['query']['snapshot-info'][
|
||||
'access-time'] = '>' + newer_than
|
||||
|
||||
result = self.send_iter_request('snapshot-get-iter', api_args)
|
||||
|
||||
attributes_list = result.get_child_by_name(
|
||||
'attributes-list') or netapp_api.NaElement('none')
|
||||
|
||||
return [snapshot_info.get_child_content('name')
|
||||
for snapshot_info in attributes_list.get_children()]
|
||||
|
||||
@na_utils.trace
|
||||
def start_volume_move(self, volume_name, vserver, destination_aggregate,
|
||||
cutover_action='wait'):
|
||||
|
@ -278,7 +278,7 @@ class DataMotionSession(object):
|
||||
dest_volume_name,
|
||||
clear_checkpoint=False)
|
||||
|
||||
def break_snapmirror(self, source_share_obj, dest_share_obj):
|
||||
def break_snapmirror(self, source_share_obj, dest_share_obj, mount=True):
|
||||
"""Breaks SnapMirror relationship.
|
||||
|
||||
1. Quiesce any ongoing snapmirror transfers
|
||||
@ -304,6 +304,7 @@ class DataMotionSession(object):
|
||||
dest_volume_name)
|
||||
|
||||
# 3. Mount the destination volume and create a junction path
|
||||
if mount:
|
||||
dest_client.mount_volume(dest_volume_name)
|
||||
|
||||
def resync_snapmirror(self, source_share_obj, dest_share_obj):
|
||||
|
@ -55,6 +55,9 @@ class NetAppCmodeMultiSvmShareDriver(driver.ShareDriver):
|
||||
def create_snapshot(self, context, snapshot, **kwargs):
|
||||
return self.library.create_snapshot(context, snapshot, **kwargs)
|
||||
|
||||
def revert_to_snapshot(self, context, snapshot, **kwargs):
|
||||
return self.library.revert_to_snapshot(context, snapshot, **kwargs)
|
||||
|
||||
def delete_share(self, context, share, **kwargs):
|
||||
self.library.delete_share(context, share, **kwargs)
|
||||
|
||||
@ -165,6 +168,11 @@ class NetAppCmodeMultiSvmShareDriver(driver.ShareDriver):
|
||||
replica_snapshot, share_server=None):
|
||||
raise NotImplementedError()
|
||||
|
||||
def revert_to_replicated_snapshot(self, context, active_replica,
|
||||
replica_list, active_replica_snapshot,
|
||||
replica_snapshots, share_server=None):
|
||||
raise NotImplementedError()
|
||||
|
||||
def migration_check_compatibility(self, context, source_share,
|
||||
destination_share, share_server=None,
|
||||
destination_share_server=None):
|
||||
|
@ -55,6 +55,9 @@ class NetAppCmodeSingleSvmShareDriver(driver.ShareDriver):
|
||||
def create_snapshot(self, context, snapshot, **kwargs):
|
||||
return self.library.create_snapshot(context, snapshot, **kwargs)
|
||||
|
||||
def revert_to_snapshot(self, context, snapshot, **kwargs):
|
||||
return self.library.revert_to_snapshot(context, snapshot, **kwargs)
|
||||
|
||||
def delete_share(self, context, share, **kwargs):
|
||||
self.library.delete_share(context, share, **kwargs)
|
||||
|
||||
@ -180,6 +183,13 @@ class NetAppCmodeSingleSvmShareDriver(driver.ShareDriver):
|
||||
replica_list, share_replica, replica_snapshots, replica_snapshot,
|
||||
share_server=share_server)
|
||||
|
||||
def revert_to_replicated_snapshot(self, context, active_replica,
|
||||
replica_list, active_replica_snapshot,
|
||||
replica_snapshots, **kwargs):
|
||||
return self.library.revert_to_replicated_snapshot(
|
||||
context, active_replica, replica_list, active_replica_snapshot,
|
||||
replica_snapshots, **kwargs)
|
||||
|
||||
def migration_check_compatibility(self, context, source_share,
|
||||
destination_share, share_server=None,
|
||||
destination_share_server=None):
|
||||
|
@ -304,6 +304,9 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
'dedupe': [True, False],
|
||||
'compression': [True, False],
|
||||
'thin_provisioning': [True, False],
|
||||
'snapshot_support': True,
|
||||
'create_share_from_snapshot_support': True,
|
||||
'revert_to_snapshot_support': True,
|
||||
}
|
||||
|
||||
# Add storage service catalog data.
|
||||
@ -792,6 +795,14 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
vserver_client.create_snapshot(share_name, snapshot_name)
|
||||
return {'provider_location': snapshot_name}
|
||||
|
||||
def revert_to_snapshot(self, context, snapshot, share_server=None):
|
||||
"""Reverts a share (in place) to the specified snapshot."""
|
||||
vserver, vserver_client = self._get_vserver(share_server=share_server)
|
||||
share_name = self._get_backend_share_name(snapshot['share_id'])
|
||||
snapshot_name = self._get_backend_snapshot_name(snapshot['id'])
|
||||
LOG.debug('Restoring snapshot %s', snapshot_name)
|
||||
vserver_client.restore_snapshot(share_name, snapshot_name)
|
||||
|
||||
@na_utils.trace
|
||||
def delete_snapshot(self, context, snapshot, share_server=None,
|
||||
snapshot_name=None):
|
||||
@ -1257,6 +1268,11 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
if r['replica_state'] == constants.REPLICA_STATE_ACTIVE:
|
||||
return r
|
||||
|
||||
def _find_nonactive_replicas(self, replica_list):
|
||||
"""Returns a list of all except the active replica."""
|
||||
return [replica for replica in replica_list
|
||||
if replica['replica_state'] != constants.REPLICA_STATE_ACTIVE]
|
||||
|
||||
def create_replica(self, context, replica_list, new_replica,
|
||||
access_rules, share_snapshots, share_server=None):
|
||||
"""Creates the new replica on this backend and sets up SnapMirror."""
|
||||
@ -1324,6 +1340,12 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
msg_args = {'share_name': share_name, 'vserver': vserver}
|
||||
raise exception.ShareResourceNotFound(msg % msg_args)
|
||||
|
||||
# NOTE(cknight): The SnapMirror may have been intentionally broken by
|
||||
# a revert-to-snapshot operation, in which case this method should not
|
||||
# attempt to change anything.
|
||||
if active_replica['status'] == constants.STATUS_REVERTING:
|
||||
return None
|
||||
|
||||
dm_session = data_motion.DataMotionSession()
|
||||
try:
|
||||
snapmirrors = dm_session.get_snapmirrors(active_replica, replica)
|
||||
@ -1623,6 +1645,50 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
if e.code != netapp_api.EOBJECTNOTFOUND:
|
||||
raise
|
||||
|
||||
def revert_to_replicated_snapshot(self, context, active_replica,
|
||||
replica_list, active_replica_snapshot,
|
||||
replica_snapshots, share_server=None):
|
||||
"""Reverts a replicated share (in place) to the specified snapshot."""
|
||||
vserver, vserver_client = self._get_vserver(share_server=share_server)
|
||||
share_name = self._get_backend_share_name(
|
||||
active_replica_snapshot['share_id'])
|
||||
snapshot_name = self._get_backend_snapshot_name(
|
||||
active_replica_snapshot['id'])
|
||||
LOG.debug('Restoring snapshot %s', snapshot_name)
|
||||
|
||||
dm_session = data_motion.DataMotionSession()
|
||||
non_active_replica_list = self._find_nonactive_replicas(replica_list)
|
||||
|
||||
# Ensure source snapshot exists
|
||||
vserver_client.get_snapshot(share_name, snapshot_name)
|
||||
|
||||
# Break all mirrors
|
||||
for replica in non_active_replica_list:
|
||||
try:
|
||||
dm_session.break_snapmirror(
|
||||
active_replica, replica, mount=False)
|
||||
except netapp_api.NaApiError as e:
|
||||
if e.code != netapp_api.EOBJECTNOTFOUND:
|
||||
raise
|
||||
|
||||
# Delete source SnapMirror snapshots that will prevent a snap restore
|
||||
snapmirror_snapshot_names = vserver_client.list_snapmirror_snapshots(
|
||||
share_name)
|
||||
for snapmirror_snapshot_name in snapmirror_snapshot_names:
|
||||
vserver_client.delete_snapshot(
|
||||
share_name, snapmirror_snapshot_name, ignore_owners=True)
|
||||
|
||||
# Restore source snapshot of interest
|
||||
vserver_client.restore_snapshot(share_name, snapshot_name)
|
||||
|
||||
# Reestablish mirrors
|
||||
for replica in non_active_replica_list:
|
||||
try:
|
||||
dm_session.resync_snapmirror(active_replica, replica)
|
||||
except netapp_api.NaApiError as e:
|
||||
if e.code != netapp_api.EOBJECTNOTFOUND:
|
||||
raise
|
||||
|
||||
def _check_destination_vserver_for_vol_move(self, source_share,
|
||||
source_vserver,
|
||||
dest_share_server):
|
||||
|
@ -1395,10 +1395,12 @@ VOLUME_MODIFY_ITER_ERROR_RESPONSE = etree.XML("""
|
||||
</results>
|
||||
""" % {'volume': SHARE_NAME, 'vserver': VSERVER_NAME})
|
||||
|
||||
SNAPSHOT_ACCESS_TIME = '1466640058'
|
||||
SNAPSHOT_GET_ITER_NOT_BUSY_RESPONSE = etree.XML("""
|
||||
<results status="passed">
|
||||
<attributes-list>
|
||||
<snapshot-info>
|
||||
<access-time>%(access_time)s</access-time>
|
||||
<busy>false</busy>
|
||||
<name>%(snap)s</name>
|
||||
<volume>%(volume)s</volume>
|
||||
@ -1407,12 +1409,18 @@ SNAPSHOT_GET_ITER_NOT_BUSY_RESPONSE = etree.XML("""
|
||||
</attributes-list>
|
||||
<num-records>1</num-records>
|
||||
</results>
|
||||
""" % {'snap': SNAPSHOT_NAME, 'volume': SHARE_NAME, 'vserver': VSERVER_NAME})
|
||||
""" % {
|
||||
'access_time': SNAPSHOT_ACCESS_TIME,
|
||||
'snap': SNAPSHOT_NAME,
|
||||
'volume': SHARE_NAME,
|
||||
'vserver': VSERVER_NAME,
|
||||
})
|
||||
|
||||
SNAPSHOT_GET_ITER_BUSY_RESPONSE = etree.XML("""
|
||||
<results status="passed">
|
||||
<attributes-list>
|
||||
<snapshot-info>
|
||||
<access-time>%(access_time)s</access-time>
|
||||
<busy>true</busy>
|
||||
<name>%(snap)s</name>
|
||||
<volume>%(volume)s</volume>
|
||||
@ -1426,7 +1434,12 @@ SNAPSHOT_GET_ITER_BUSY_RESPONSE = etree.XML("""
|
||||
</attributes-list>
|
||||
<num-records>1</num-records>
|
||||
</results>
|
||||
""" % {'snap': SNAPSHOT_NAME, 'volume': SHARE_NAME, 'vserver': VSERVER_NAME})
|
||||
""" % {
|
||||
'access_time': SNAPSHOT_ACCESS_TIME,
|
||||
'snap': SNAPSHOT_NAME,
|
||||
'volume': SHARE_NAME,
|
||||
'vserver': VSERVER_NAME,
|
||||
})
|
||||
|
||||
SNAPSHOT_GET_ITER_NOT_UNIQUE_RESPONSE = etree.XML("""
|
||||
<results status="passed">
|
||||
@ -1513,6 +1526,24 @@ SNAPSHOT_GET_ITER_DELETED_RESPONSE = etree.XML("""
|
||||
'vserver': VSERVER_NAME,
|
||||
})
|
||||
|
||||
SNAPSHOT_GET_ITER_SNAPMIRROR_RESPONSE = etree.XML("""
|
||||
<results status="passed">
|
||||
<attributes-list>
|
||||
<snapshot-info>
|
||||
<name>%(snap)s</name>
|
||||
<volume>%(volume)s</volume>
|
||||
<vserver>%(vserver)s</vserver>
|
||||
</snapshot-info>
|
||||
</attributes-list>
|
||||
<num-records>1</num-records>
|
||||
</results>
|
||||
|
||||
""" % {
|
||||
'snap': SNAPSHOT_NAME,
|
||||
'volume': SHARE_NAME,
|
||||
'vserver': VSERVER_NAME,
|
||||
})
|
||||
|
||||
CIFS_SHARE_ACCESS_CONTROL_GET_ITER = etree.XML("""
|
||||
<results status="passed">
|
||||
<attributes-list>
|
||||
|
@ -3570,6 +3570,7 @@ class NetAppClientCmodeTestCase(test.TestCase):
|
||||
@ddt.data({
|
||||
'mock_return': fake.SNAPSHOT_GET_ITER_NOT_BUSY_RESPONSE,
|
||||
'expected': {
|
||||
'access-time': fake.SNAPSHOT_ACCESS_TIME,
|
||||
'name': fake.SNAPSHOT_NAME,
|
||||
'volume': fake.SHARE_NAME,
|
||||
'busy': False,
|
||||
@ -3578,6 +3579,7 @@ class NetAppClientCmodeTestCase(test.TestCase):
|
||||
}, {
|
||||
'mock_return': fake.SNAPSHOT_GET_ITER_BUSY_RESPONSE,
|
||||
'expected': {
|
||||
'access-time': fake.SNAPSHOT_ACCESS_TIME,
|
||||
'name': fake.SNAPSHOT_NAME,
|
||||
'volume': fake.SHARE_NAME,
|
||||
'busy': True,
|
||||
@ -3603,6 +3605,7 @@ class NetAppClientCmodeTestCase(test.TestCase):
|
||||
},
|
||||
'desired-attributes': {
|
||||
'snapshot-info': {
|
||||
'access-time': None,
|
||||
'name': None,
|
||||
'volume': None,
|
||||
'busy': None,
|
||||
@ -3658,15 +3661,32 @@ class NetAppClientCmodeTestCase(test.TestCase):
|
||||
self.client.send_request.assert_has_calls([
|
||||
mock.call('snapshot-rename', snapshot_rename_args)])
|
||||
|
||||
def test_delete_snapshot(self):
|
||||
def test_restore_snapshot(self):
|
||||
|
||||
self.mock_object(self.client, 'send_request')
|
||||
|
||||
self.client.delete_snapshot(fake.SHARE_NAME, fake.SNAPSHOT_NAME)
|
||||
self.client.restore_snapshot(fake.SHARE_NAME,
|
||||
fake.SNAPSHOT_NAME)
|
||||
|
||||
snapshot_restore_args = {
|
||||
'volume': fake.SHARE_NAME,
|
||||
'snapshot': fake.SNAPSHOT_NAME,
|
||||
}
|
||||
self.client.send_request.assert_has_calls([
|
||||
mock.call('snapshot-restore-volume', snapshot_restore_args)])
|
||||
|
||||
@ddt.data(True, False)
|
||||
def test_delete_snapshot(self, ignore_owners):
|
||||
|
||||
self.mock_object(self.client, 'send_request')
|
||||
|
||||
self.client.delete_snapshot(
|
||||
fake.SHARE_NAME, fake.SNAPSHOT_NAME, ignore_owners=ignore_owners)
|
||||
|
||||
snapshot_delete_args = {
|
||||
'volume': fake.SHARE_NAME,
|
||||
'snapshot': fake.SNAPSHOT_NAME
|
||||
'snapshot': fake.SNAPSHOT_NAME,
|
||||
'ignore-owners': 'true' if ignore_owners else 'false',
|
||||
}
|
||||
|
||||
self.client.send_request.assert_has_calls([
|
||||
@ -5539,6 +5559,35 @@ class NetAppClientCmodeTestCase(test.TestCase):
|
||||
expected_get_snapmirrors_calls)
|
||||
self.assertTrue(mock_exc_log.called)
|
||||
|
||||
@ddt.data(None, '12345')
|
||||
def test_list_snapmirror_snapshots(self, newer_than):
|
||||
|
||||
api_response = netapp_api.NaElement(
|
||||
fake.SNAPSHOT_GET_ITER_SNAPMIRROR_RESPONSE)
|
||||
self.mock_object(self.client,
|
||||
'send_iter_request',
|
||||
mock.Mock(return_value=api_response))
|
||||
|
||||
result = self.client.list_snapmirror_snapshots(fake.SHARE_NAME,
|
||||
newer_than=newer_than)
|
||||
|
||||
snapshot_get_iter_args = {
|
||||
'query': {
|
||||
'snapshot-info': {
|
||||
'dependency': 'snapmirror',
|
||||
'volume': fake.SHARE_NAME,
|
||||
},
|
||||
},
|
||||
}
|
||||
if newer_than:
|
||||
snapshot_get_iter_args['query']['snapshot-info']['access-time'] = (
|
||||
'>' + newer_than)
|
||||
self.client.send_iter_request.assert_has_calls([
|
||||
mock.call('snapshot-get-iter', snapshot_get_iter_args)])
|
||||
|
||||
expected = [fake.SNAPSHOT_NAME]
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
@ddt.data('start_volume_move', 'check_volume_move')
|
||||
def test_volume_move_method(self, method_name):
|
||||
|
||||
|
@ -350,6 +350,22 @@ class NetAppCDOTDataMotionSessionTestCase(test.TestCase):
|
||||
self.mock_dest_client.mount_volume.assert_called_once_with(
|
||||
self.fake_dest_vol_name)
|
||||
|
||||
def test_break_snapmirror_no_mount(self):
|
||||
self.mock_object(self.dm_session, 'quiesce_then_abort')
|
||||
|
||||
self.dm_session.break_snapmirror(self.fake_src_share,
|
||||
self.fake_dest_share,
|
||||
mount=False)
|
||||
|
||||
self.mock_dest_client.break_snapmirror.assert_called_once_with(
|
||||
self.source_vserver, self.fake_src_vol_name,
|
||||
self.dest_vserver, self.fake_dest_vol_name)
|
||||
|
||||
self.dm_session.quiesce_then_abort.assert_called_once_with(
|
||||
self.fake_src_share, self.fake_dest_share)
|
||||
|
||||
self.assertFalse(self.mock_dest_client.mount_volume.called)
|
||||
|
||||
def test_break_snapmirror_wait_for_quiesced(self):
|
||||
self.mock_object(self.dm_session, 'quiesce_then_abort')
|
||||
|
||||
|
@ -1205,6 +1205,25 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
||||
snapshot_name)
|
||||
self.assertEqual(snapshot_name, model_update['provider_location'])
|
||||
|
||||
def test_revert_to_snapshot(self):
|
||||
|
||||
vserver_client = mock.Mock()
|
||||
self.mock_object(self.library,
|
||||
'_get_vserver',
|
||||
mock.Mock(return_value=(fake.VSERVER1,
|
||||
vserver_client)))
|
||||
|
||||
result = self.library.revert_to_snapshot(
|
||||
self.context, fake.SNAPSHOT, share_server=fake.SHARE_SERVER)
|
||||
|
||||
self.assertIsNone(result)
|
||||
share_name = self.library._get_backend_share_name(
|
||||
fake.SNAPSHOT['share_id'])
|
||||
snapshot_name = self.library._get_backend_snapshot_name(
|
||||
fake.SNAPSHOT['id'])
|
||||
vserver_client.restore_snapshot.assert_called_once_with(share_name,
|
||||
snapshot_name)
|
||||
|
||||
def test_delete_snapshot(self):
|
||||
|
||||
vserver_client = mock.Mock()
|
||||
@ -2480,6 +2499,26 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
||||
self.assertFalse(self.mock_dm_session.create_snapmirror.called)
|
||||
self.assertEqual(constants.STATUS_OUT_OF_SYNC, result)
|
||||
|
||||
def test_update_replica_state_share_reverting_to_snapshot(self):
|
||||
vserver_client = mock.Mock()
|
||||
self.mock_object(vserver_client, 'volume_exists',
|
||||
mock.Mock(return_value=True))
|
||||
self.mock_object(self.library,
|
||||
'_get_vserver',
|
||||
mock.Mock(return_value=(fake.VSERVER1,
|
||||
vserver_client)))
|
||||
self.mock_dm_session.get_snapmirrors = mock.Mock(return_value=[])
|
||||
|
||||
replica = copy.deepcopy(fake.SHARE)
|
||||
replica['status'] = constants.STATUS_REVERTING
|
||||
|
||||
result = self.library.update_replica_state(
|
||||
None, [replica], replica, None, [], share_server=None)
|
||||
|
||||
self.assertFalse(self.mock_dm_session.get_snapmirrors.called)
|
||||
self.assertFalse(self.mock_dm_session.create_snapmirror.called)
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_update_replica_state_no_snapmirror_create_failed(self):
|
||||
vserver_client = mock.Mock()
|
||||
self.mock_object(vserver_client, 'volume_exists',
|
||||
@ -3621,6 +3660,221 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
||||
|
||||
self.assertIsNone(model_update)
|
||||
|
||||
def _get_fake_replicas_and_snapshots(self):
|
||||
|
||||
fake_replica_3 = copy.deepcopy(self.fake_replica_2)
|
||||
fake_replica_3['id'] = fake.SHARE_ID3
|
||||
fake_snapshot = copy.deepcopy(fake.SNAPSHOT)
|
||||
fake_snapshot['share_id'] = self.fake_replica['id']
|
||||
snapshot_name = self.library._get_backend_snapshot_name(
|
||||
fake_snapshot['id'])
|
||||
fake_snapshot['provider_location'] = snapshot_name
|
||||
fake_snapshot_2 = copy.deepcopy(fake.SNAPSHOT)
|
||||
fake_snapshot_2['id'] = uuidutils.generate_uuid()
|
||||
fake_snapshot_2['share_id'] = self.fake_replica_2['id']
|
||||
fake_snapshot_2['provider_location'] = snapshot_name
|
||||
fake_snapshot_3 = copy.deepcopy(fake.SNAPSHOT)
|
||||
fake_snapshot_3['id'] = uuidutils.generate_uuid()
|
||||
fake_snapshot_3['share_id'] = fake_replica_3['id']
|
||||
fake_snapshot_3['provider_location'] = snapshot_name
|
||||
replica_list = [self.fake_replica, self.fake_replica_2, fake_replica_3]
|
||||
snapshot_list = [fake_snapshot, fake_snapshot_2, fake_snapshot_3]
|
||||
return replica_list, snapshot_list
|
||||
|
||||
def test_revert_to_replicated_snapshot(self):
|
||||
|
||||
replica_list, snapshot_list = self._get_fake_replicas_and_snapshots()
|
||||
fake_replica, fake_replica_2, fake_replica_3 = replica_list
|
||||
fake_snapshot, fake_snapshot_2, fake_snapshot_3 = snapshot_list
|
||||
share_name = self.library._get_backend_share_name(
|
||||
fake_snapshot['share_id'])
|
||||
snapshot_name = self.library._get_backend_snapshot_name(
|
||||
fake_snapshot['id'])
|
||||
|
||||
vserver_client = mock.Mock()
|
||||
vserver_client.get_snapshot.return_value = fake.CDOT_SNAPSHOT
|
||||
vserver_client.list_snapmirror_snapshots.return_value = ['sm_snap']
|
||||
self.mock_object(self.library,
|
||||
'_get_vserver',
|
||||
mock.Mock(return_value=(fake.VSERVER1,
|
||||
vserver_client)))
|
||||
|
||||
self.library.revert_to_replicated_snapshot(
|
||||
self.context, self.fake_replica, replica_list, fake_snapshot,
|
||||
snapshot_list, share_server=fake.SHARE_SERVER)
|
||||
|
||||
vserver_client.get_snapshot.assert_called_once_with(
|
||||
share_name, snapshot_name)
|
||||
vserver_client.list_snapmirror_snapshots.assert_called_once_with(
|
||||
share_name)
|
||||
vserver_client.delete_snapshot.assert_called_once_with(
|
||||
share_name, 'sm_snap', ignore_owners=True)
|
||||
vserver_client.restore_snapshot.assert_called_once_with(
|
||||
share_name, snapshot_name)
|
||||
|
||||
self.mock_dm_session.break_snapmirror.assert_has_calls(
|
||||
[mock.call(self.fake_replica, self.fake_replica_2, mount=False),
|
||||
mock.call(self.fake_replica, fake_replica_3, mount=False)],
|
||||
any_order=True)
|
||||
self.mock_dm_session.resync_snapmirror.assert_has_calls(
|
||||
[mock.call(self.fake_replica, self.fake_replica_2),
|
||||
mock.call(self.fake_replica, fake_replica_3)],
|
||||
any_order=True)
|
||||
|
||||
def test_revert_to_replicated_snapshot_not_found(self):
|
||||
|
||||
replica_list, snapshot_list = self._get_fake_replicas_and_snapshots()
|
||||
fake_snapshot, fake_snapshot_2, fake_snapshot_3 = snapshot_list
|
||||
share_name = self.library._get_backend_share_name(
|
||||
fake_snapshot['share_id'])
|
||||
snapshot_name = self.library._get_backend_snapshot_name(
|
||||
fake_snapshot['id'])
|
||||
|
||||
vserver_client = mock.Mock()
|
||||
vserver_client.get_snapshot.side_effect = netapp_api.NaApiError
|
||||
vserver_client.list_snapmirror_snapshots.return_value = ['sm_snap']
|
||||
self.mock_object(self.library,
|
||||
'_get_vserver',
|
||||
mock.Mock(return_value=(fake.VSERVER1,
|
||||
vserver_client)))
|
||||
|
||||
self.assertRaises(
|
||||
netapp_api.NaApiError, self.library.revert_to_replicated_snapshot,
|
||||
self.context, self.fake_replica, replica_list, fake_snapshot,
|
||||
snapshot_list, share_server=fake.SHARE_SERVER)
|
||||
|
||||
vserver_client.get_snapshot.assert_called_once_with(
|
||||
share_name, snapshot_name)
|
||||
self.assertFalse(vserver_client.list_snapmirror_snapshots.called)
|
||||
self.assertFalse(vserver_client.delete_snapshot.called)
|
||||
self.assertFalse(vserver_client.restore_snapshot.called)
|
||||
self.assertFalse(self.mock_dm_session.break_snapmirror.called)
|
||||
self.assertFalse(self.mock_dm_session.resync_snapmirror.called)
|
||||
|
||||
def test_revert_to_replicated_snapshot_snapmirror_break_error(self):
|
||||
|
||||
replica_list, snapshot_list = self._get_fake_replicas_and_snapshots()
|
||||
fake_snapshot, fake_snapshot_2, fake_snapshot_3 = snapshot_list
|
||||
|
||||
vserver_client = mock.Mock()
|
||||
vserver_client.get_snapshot.return_value = fake.CDOT_SNAPSHOT
|
||||
vserver_client.list_snapmirror_snapshots.return_value = ['sm_snap']
|
||||
self.mock_object(self.library,
|
||||
'_get_vserver',
|
||||
mock.Mock(return_value=(fake.VSERVER1,
|
||||
vserver_client)))
|
||||
self.mock_dm_session.break_snapmirror.side_effect = (
|
||||
netapp_api.NaApiError)
|
||||
|
||||
self.assertRaises(
|
||||
netapp_api.NaApiError, self.library.revert_to_replicated_snapshot,
|
||||
self.context, self.fake_replica, replica_list, fake_snapshot,
|
||||
snapshot_list, share_server=fake.SHARE_SERVER)
|
||||
|
||||
def test_revert_to_replicated_snapshot_snapmirror_break_not_found(self):
|
||||
|
||||
replica_list, snapshot_list = self._get_fake_replicas_and_snapshots()
|
||||
fake_replica, fake_replica_2, fake_replica_3 = replica_list
|
||||
fake_snapshot, fake_snapshot_2, fake_snapshot_3 = snapshot_list
|
||||
share_name = self.library._get_backend_share_name(
|
||||
fake_snapshot['share_id'])
|
||||
snapshot_name = self.library._get_backend_snapshot_name(
|
||||
fake_snapshot['id'])
|
||||
|
||||
vserver_client = mock.Mock()
|
||||
vserver_client.get_snapshot.return_value = fake.CDOT_SNAPSHOT
|
||||
vserver_client.list_snapmirror_snapshots.return_value = ['sm_snap']
|
||||
self.mock_object(self.library,
|
||||
'_get_vserver',
|
||||
mock.Mock(return_value=(fake.VSERVER1,
|
||||
vserver_client)))
|
||||
self.mock_dm_session.break_snapmirror.side_effect = (
|
||||
netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND))
|
||||
|
||||
self.library.revert_to_replicated_snapshot(
|
||||
self.context, self.fake_replica, replica_list, fake_snapshot,
|
||||
snapshot_list, share_server=fake.SHARE_SERVER)
|
||||
|
||||
vserver_client.get_snapshot.assert_called_once_with(
|
||||
share_name, snapshot_name)
|
||||
vserver_client.list_snapmirror_snapshots.assert_called_once_with(
|
||||
share_name)
|
||||
vserver_client.delete_snapshot.assert_called_once_with(
|
||||
share_name, 'sm_snap', ignore_owners=True)
|
||||
vserver_client.restore_snapshot.assert_called_once_with(
|
||||
share_name, snapshot_name)
|
||||
|
||||
self.mock_dm_session.break_snapmirror.assert_has_calls(
|
||||
[mock.call(self.fake_replica, self.fake_replica_2, mount=False),
|
||||
mock.call(self.fake_replica, fake_replica_3, mount=False)],
|
||||
any_order=True)
|
||||
self.mock_dm_session.resync_snapmirror.assert_has_calls(
|
||||
[mock.call(self.fake_replica, self.fake_replica_2),
|
||||
mock.call(self.fake_replica, fake_replica_3)],
|
||||
any_order=True)
|
||||
|
||||
def test_revert_to_replicated_snapshot_snapmirror_resync_error(self):
|
||||
|
||||
replica_list, snapshot_list = self._get_fake_replicas_and_snapshots()
|
||||
fake_snapshot, fake_snapshot_2, fake_snapshot_3 = snapshot_list
|
||||
|
||||
vserver_client = mock.Mock()
|
||||
vserver_client.get_snapshot.return_value = fake.CDOT_SNAPSHOT
|
||||
vserver_client.list_snapmirror_snapshots.return_value = ['sm_snap']
|
||||
self.mock_object(self.library,
|
||||
'_get_vserver',
|
||||
mock.Mock(return_value=(fake.VSERVER1,
|
||||
vserver_client)))
|
||||
self.mock_dm_session.resync_snapmirror.side_effect = (
|
||||
netapp_api.NaApiError)
|
||||
|
||||
self.assertRaises(
|
||||
netapp_api.NaApiError, self.library.revert_to_replicated_snapshot,
|
||||
self.context, self.fake_replica, replica_list, fake_snapshot,
|
||||
snapshot_list, share_server=fake.SHARE_SERVER)
|
||||
|
||||
def test_revert_to_replicated_snapshot_snapmirror_resync_not_found(self):
|
||||
|
||||
replica_list, snapshot_list = self._get_fake_replicas_and_snapshots()
|
||||
fake_replica, fake_replica_2, fake_replica_3 = replica_list
|
||||
fake_snapshot, fake_snapshot_2, fake_snapshot_3 = snapshot_list
|
||||
share_name = self.library._get_backend_share_name(
|
||||
fake_snapshot['share_id'])
|
||||
snapshot_name = self.library._get_backend_snapshot_name(
|
||||
fake_snapshot['id'])
|
||||
|
||||
vserver_client = mock.Mock()
|
||||
vserver_client.get_snapshot.return_value = fake.CDOT_SNAPSHOT
|
||||
vserver_client.list_snapmirror_snapshots.return_value = ['sm_snap']
|
||||
self.mock_object(self.library,
|
||||
'_get_vserver',
|
||||
mock.Mock(return_value=(fake.VSERVER1,
|
||||
vserver_client)))
|
||||
self.mock_dm_session.resync_snapmirror.side_effect = (
|
||||
netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND))
|
||||
|
||||
self.library.revert_to_replicated_snapshot(
|
||||
self.context, self.fake_replica, replica_list, fake_snapshot,
|
||||
snapshot_list, share_server=fake.SHARE_SERVER)
|
||||
|
||||
vserver_client.get_snapshot.assert_called_once_with(
|
||||
share_name, snapshot_name)
|
||||
vserver_client.list_snapmirror_snapshots.assert_called_once_with(
|
||||
share_name)
|
||||
vserver_client.delete_snapshot.assert_called_once_with(
|
||||
share_name, 'sm_snap', ignore_owners=True)
|
||||
vserver_client.restore_snapshot.assert_called_once_with(
|
||||
share_name, snapshot_name)
|
||||
|
||||
self.mock_dm_session.break_snapmirror.assert_has_calls(
|
||||
[mock.call(self.fake_replica, self.fake_replica_2, mount=False),
|
||||
mock.call(self.fake_replica, fake_replica_3, mount=False)],
|
||||
any_order=True)
|
||||
self.mock_dm_session.resync_snapmirror.assert_has_calls(
|
||||
[mock.call(self.fake_replica, self.fake_replica_2),
|
||||
mock.call(self.fake_replica, fake_replica_3)],
|
||||
any_order=True)
|
||||
|
||||
def test_migration_check_compatibility_no_cluster_credentials(self):
|
||||
self.library._have_cluster_creds = False
|
||||
self.mock_object(data_motion, 'get_backend_configuration')
|
||||
|
@ -36,6 +36,7 @@ FLEXVOL_NAME = 'fake_volume'
|
||||
JUNCTION_PATH = '/%s' % FLEXVOL_NAME
|
||||
EXPORT_LOCATION = '%s:%s' % (HOST_NAME, JUNCTION_PATH)
|
||||
SNAPSHOT_NAME = 'fake_snapshot'
|
||||
SNAPSHOT_ACCESS_TIME = '1466455782'
|
||||
CONSISTENCY_GROUP_NAME = 'fake_consistency_group'
|
||||
SHARE_SIZE = 10
|
||||
TENANT_ID = '24cb2448-13d8-4f41-afd9-eff5c4fd2a57'
|
||||
@ -95,6 +96,7 @@ SHARE = {
|
||||
'network_allocations': [{'ip_address': 'ip'}]
|
||||
},
|
||||
'replica_state': constants.REPLICA_STATE_ACTIVE,
|
||||
'status': constants.STATUS_AVAILABLE,
|
||||
}
|
||||
|
||||
FLEXVOL_TO_MANAGE = {
|
||||
@ -292,6 +294,7 @@ CDOT_SNAPSHOT = {
|
||||
'volume': SHARE_NAME,
|
||||
'busy': False,
|
||||
'owners': set(),
|
||||
'access-time': SNAPSHOT_ACCESS_TIME,
|
||||
}
|
||||
|
||||
CDOT_SNAPSHOT_BUSY_VOLUME_CLONE = {
|
||||
@ -299,6 +302,7 @@ CDOT_SNAPSHOT_BUSY_VOLUME_CLONE = {
|
||||
'volume': SHARE_NAME,
|
||||
'busy': True,
|
||||
'owners': {'volume clone'},
|
||||
'access-time': SNAPSHOT_ACCESS_TIME,
|
||||
}
|
||||
|
||||
CDOT_SNAPSHOT_BUSY_SNAPMIRROR = {
|
||||
@ -306,6 +310,7 @@ CDOT_SNAPSHOT_BUSY_SNAPMIRROR = {
|
||||
'volume': SHARE_NAME,
|
||||
'busy': True,
|
||||
'owners': {'snapmirror'},
|
||||
'access-time': SNAPSHOT_ACCESS_TIME,
|
||||
}
|
||||
|
||||
CDOT_CLONE_CHILD_1 = 'fake_child_1'
|
||||
@ -545,7 +550,8 @@ SSC_INFO_VSERVER_CREDS = {
|
||||
}
|
||||
|
||||
POOLS = [
|
||||
{'pool_name': AGGREGATES[0],
|
||||
{
|
||||
'pool_name': AGGREGATES[0],
|
||||
'netapp_aggregate': AGGREGATES[0],
|
||||
'total_capacity_gb': 3.3,
|
||||
'free_capacity_gb': 1.1,
|
||||
@ -561,8 +567,12 @@ POOLS = [
|
||||
'utilization': 30.0,
|
||||
'filter_function': 'filter',
|
||||
'goodness_function': 'goodness',
|
||||
'snapshot_support': True,
|
||||
'create_share_from_snapshot_support': True,
|
||||
'revert_to_snapshot_support': True,
|
||||
},
|
||||
{'pool_name': AGGREGATES[1],
|
||||
{
|
||||
'pool_name': AGGREGATES[1],
|
||||
'netapp_aggregate': AGGREGATES[1],
|
||||
'total_capacity_gb': 6.0,
|
||||
'free_capacity_gb': 2.0,
|
||||
@ -578,11 +588,15 @@ POOLS = [
|
||||
'utilization': 42.0,
|
||||
'filter_function': 'filter',
|
||||
'goodness_function': 'goodness',
|
||||
'snapshot_support': True,
|
||||
'create_share_from_snapshot_support': True,
|
||||
'revert_to_snapshot_support': True,
|
||||
},
|
||||
]
|
||||
|
||||
POOLS_VSERVER_CREDS = [
|
||||
{'pool_name': AGGREGATES[0],
|
||||
{
|
||||
'pool_name': AGGREGATES[0],
|
||||
'netapp_aggregate': AGGREGATES[0],
|
||||
'total_capacity_gb': 'unknown',
|
||||
'free_capacity_gb': 1.1,
|
||||
@ -595,8 +609,12 @@ POOLS_VSERVER_CREDS = [
|
||||
'utilization': 50.0,
|
||||
'filter_function': None,
|
||||
'goodness_function': None,
|
||||
'snapshot_support': True,
|
||||
'create_share_from_snapshot_support': True,
|
||||
'revert_to_snapshot_support': True,
|
||||
},
|
||||
{'pool_name': AGGREGATES[1],
|
||||
{
|
||||
'pool_name': AGGREGATES[1],
|
||||
'netapp_aggregate': AGGREGATES[1],
|
||||
'total_capacity_gb': 'unknown',
|
||||
'free_capacity_gb': 2.0,
|
||||
@ -609,6 +627,9 @@ POOLS_VSERVER_CREDS = [
|
||||
'utilization': 50.0,
|
||||
'filter_function': None,
|
||||
'goodness_function': None,
|
||||
'snapshot_support': True,
|
||||
'create_share_from_snapshot_support': True,
|
||||
'revert_to_snapshot_support': True,
|
||||
},
|
||||
]
|
||||
|
||||
|
@ -20,6 +20,7 @@ from tempest.lib.common.utils import data_utils
|
||||
from testtools import testcase as tc
|
||||
|
||||
from manila_tempest_tests.common import constants
|
||||
from manila_tempest_tests import share_exceptions
|
||||
from manila_tempest_tests.tests.api import base
|
||||
|
||||
CONF = config.CONF
|
||||
@ -72,6 +73,26 @@ class RevertToSnapshotTest(base.BaseSharesMixedTest):
|
||||
|
||||
cls.share = cls.create_share(share_type_id=cls.st_id)
|
||||
|
||||
if CONF.share.run_replication_tests:
|
||||
# Create replicated share type
|
||||
cls.replicated_share_type_name = data_utils.rand_name("share-type")
|
||||
cls.replication_type = CONF.share.backend_replication_type
|
||||
if cls.replication_type not in constants.REPLICATION_TYPE_CHOICES:
|
||||
raise share_exceptions.ShareReplicationTypeException(
|
||||
replication_type=cls.replication_type
|
||||
)
|
||||
cls.zones = cls.get_availability_zones(client=cls.admin_client)
|
||||
cls.share_zone = cls.zones[0]
|
||||
cls.replica_zone = cls.zones[-1]
|
||||
|
||||
extra_specs = cls.add_extra_specs_to_dict(
|
||||
{"replication_type": cls.replication_type})
|
||||
share_type = cls.create_share_type(
|
||||
cls.replicated_share_type_name,
|
||||
extra_specs=extra_specs,
|
||||
client=cls.admin_client)
|
||||
cls.replicated_share_type = share_type["share_type"]
|
||||
|
||||
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
|
||||
@ddt.data(
|
||||
*{constants.REVERT_TO_SNAPSHOT_MICROVERSION,
|
||||
@ -107,3 +128,35 @@ class RevertToSnapshotTest(base.BaseSharesMixedTest):
|
||||
version=version)
|
||||
self.shares_v2_client.wait_for_share_status(self.share['id'],
|
||||
constants.STATUS_AVAILABLE)
|
||||
|
||||
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
|
||||
@tc.skipUnless(CONF.share.run_replication_tests,
|
||||
'Replication tests are disabled.')
|
||||
@ddt.data(
|
||||
*{constants.REVERT_TO_SNAPSHOT_MICROVERSION,
|
||||
CONF.share.max_api_microversion}
|
||||
)
|
||||
def test_revert_to_replicated_snapshot(self, version):
|
||||
"""Test reverting to a replicated snapshot."""
|
||||
share = self.create_share(
|
||||
share_type_id=self.replicated_share_type['id'],
|
||||
availability_zone=self.share_zone
|
||||
)
|
||||
|
||||
share_replica = self.create_share_replica(share["id"],
|
||||
self.replica_zone)
|
||||
self.shares_v2_client.wait_for_share_replica_status(
|
||||
share_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
|
||||
status_attr='replica_state')
|
||||
|
||||
snapshot = self.create_snapshot_wait_for_active(share["id"])
|
||||
|
||||
self.shares_v2_client.revert_to_snapshot(
|
||||
share['id'],
|
||||
snapshot['id'],
|
||||
version=version)
|
||||
self.shares_v2_client.wait_for_share_status(share['id'],
|
||||
constants.STATUS_AVAILABLE)
|
||||
self.shares_v2_client.wait_for_share_replica_status(
|
||||
share_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
|
||||
status_attr='replica_state')
|
||||
|
@ -0,0 +1,8 @@
|
||||
---
|
||||
features:
|
||||
- Added support for share revert-to-snapshot to NetApp Data ONTAP drivers.
|
||||
upgrades:
|
||||
- If using existing share types with Data ONTAP, set the
|
||||
'revert_to_snapshot_support' extra spec to allow creating shares that
|
||||
support in-place revert-to-snapshot functionality. This modification will
|
||||
not affect existing shares of such types.
|
Loading…
Reference in New Issue
Block a user