NetApp cDOT driver should not split clones

An earlier bug (https://bugs.launchpad.net/manila/+bug/1259988
"NetApp cDOT driver should split clone from snapshot after
creation") led us to modify the NetApp cDOT driver to split cloned
shares off from their parents immediately upon creation. As
described in that bug report, the fix makes the source snapshot
deletable after the clone split is done. However, the more
significant negative consequence is that the storage efficiency
gains from having cloned the blocks are lost. We have had
complaints from users who expect to retain the storage efficiency
of cDOT snapshots and cloning.

The fix is to not start a clone split during the
create-from-snapshot workflow. Instead, if/when a request to
delete the locked snapshot is received, the driver should start
the clone split at that time and soft-delete the snapshot. The
driver already has logic for reaping soft-deleted objects, so it
is straightforward to also reap deleted snapshots as they become
un-busy.

Change-Id: I0f7ba8f76dce6f55c64e156b372317387d299fa6
Closes-Bug: #1554592
This commit is contained in:
Clinton Knight 2016-03-08 11:04:24 -05:00
parent cd72947258
commit 4a2290193c
11 changed files with 467 additions and 141 deletions

View File

@ -49,6 +49,7 @@ ETRANSFER_IN_PROGRESS = '17137'
EANOTHER_OP_ACTIVE = '17131'
ERELATION_NOT_QUIESCED = '17127'
ESOURCE_IS_DIFFERENT = '17105'
EVOL_CLONE_BEING_SPLIT = '17151'
class NaServer(object):

View File

@ -1626,8 +1626,55 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
@na_utils.trace
def split_volume_clone(self, volume_name):
"""Begins splitting a clone from its parent."""
api_args = {'volume': volume_name}
self.send_request('volume-clone-split-start', api_args)
try:
api_args = {'volume': volume_name}
self.send_request('volume-clone-split-start', api_args)
except netapp_api.NaApiError as e:
if e.code == netapp_api.EVOL_CLONE_BEING_SPLIT:
return
raise
@na_utils.trace
def get_clone_children_for_snapshot(self, volume_name, snapshot_name):
"""Returns volumes that are keeping a snapshot locked."""
api_args = {
'query': {
'volume-attributes': {
'volume-clone-attributes': {
'volume-clone-parent-attributes': {
'name': volume_name,
'snapshot-name': snapshot_name,
},
},
},
},
'desired-attributes': {
'volume-attributes': {
'volume-id-attributes': {
'name': None,
},
},
},
}
result = self.send_iter_request('volume-get-iter', api_args)
if not self._has_records(result):
return []
volume_list = []
attributes_list = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
for volume_attributes in attributes_list.get_children():
volume_id_attributes = volume_attributes.get_child_by_name(
'volume-id-attributes') or netapp_api.NaElement('none')
volume_list.append({
'name': volume_id_attributes.get_child_content('name'),
})
return volume_list
@na_utils.trace
def get_volume_junction_path(self, volume_name, is_style_cifs=False):
@ -1794,12 +1841,94 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
return snapshot
@na_utils.trace
def rename_snapshot(self, volume_name, snapshot_name, new_snapshot_name):
api_args = {
'volume': volume_name,
'current-name': snapshot_name,
'new-name': new_snapshot_name
}
self.send_request('snapshot-rename', api_args)
@na_utils.trace
def delete_snapshot(self, volume_name, snapshot_name):
"""Deletes a volume snapshot."""
api_args = {'volume': volume_name, 'snapshot': snapshot_name}
self.send_request('snapshot-delete', api_args)
@na_utils.trace
def soft_delete_snapshot(self, volume_name, snapshot_name):
"""Deletes a volume snapshot, or renames it if delete fails."""
try:
self.delete_snapshot(volume_name, snapshot_name)
except netapp_api.NaApiError:
self.rename_snapshot(volume_name,
snapshot_name,
DELETED_PREFIX + snapshot_name)
msg = _('Soft-deleted snapshot %(snapshot)s on volume %(volume)s.')
msg_args = {'snapshot': snapshot_name, 'volume': volume_name}
LOG.info(msg, msg_args)
@na_utils.trace
def prune_deleted_snapshots(self):
"""Deletes non-busy snapshots that were previously soft-deleted."""
deleted_snapshots_map = self._get_deleted_snapshots()
for vserver in deleted_snapshots_map:
client = copy.deepcopy(self)
client.set_vserver(vserver)
for snapshot in deleted_snapshots_map[vserver]:
try:
client.delete_snapshot(snapshot['volume'],
snapshot['name'])
except netapp_api.NaApiError:
msg = _('Could not delete snapshot %(snap)s on '
'volume %(volume)s.')
msg_args = {
'snap': snapshot['name'],
'volume': snapshot['volume'],
}
LOG.exception(msg, msg_args)
@na_utils.trace
def _get_deleted_snapshots(self):
"""Returns non-busy, soft-deleted snapshots suitable for reaping."""
api_args = {
'query': {
'snapshot-info': {
'name': DELETED_PREFIX + '*',
'busy': 'false',
},
},
'desired-attributes': {
'snapshot-info': {
'name': None,
'vserver': None,
'volume': None,
},
},
}
result = self.send_iter_request('snapshot-get-iter', api_args)
attributes_list = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
# Build a map of snapshots, one list of snapshots per vserver
snapshot_map = {}
for snapshot_info in attributes_list.get_children():
vserver = snapshot_info.get_child_content('vserver')
snapshot_list = snapshot_map.get(vserver, [])
snapshot_list.append({
'name': snapshot_info.get_child_content('name'),
'volume': snapshot_info.get_child_content('volume'),
'vserver': vserver,
})
snapshot_map[vserver] = snapshot_list
return snapshot_map
@na_utils.trace
def create_cg_snapshot(self, volume_names, snapshot_name):
"""Creates a consistency group snapshot of one or more flexvols."""

View File

@ -22,7 +22,6 @@ single-SVM or multi-SVM functionality needed by the cDOT Manila drivers.
import copy
import math
import socket
import time
from oslo_config import cfg
from oslo_log import log
@ -568,7 +567,6 @@ class NetAppCmodeFileStorageLibrary(object):
LOG.debug('Creating share from snapshot %s', snapshot['id'])
vserver_client.create_volume_clone(share_name, parent_share_name,
parent_snapshot_name)
vserver_client.split_volume_clone(share_name)
@na_utils.trace
def _share_exists(self, share_name, vserver_client):
@ -735,48 +733,36 @@ class NetAppCmodeFileStorageLibrary(object):
snapshot_name = self._get_backend_snapshot_name(snapshot['id'])
try:
self._handle_busy_snapshot(vserver_client, share_name,
snapshot_name)
self._delete_snapshot(vserver_client, share_name, snapshot_name)
except exception.SnapshotNotFound:
LOG.info(_LI("Snapshot %s does not exist."), snapshot_name)
return
msg = _LI("Snapshot %(snap)s does not exist on share %(share)s.")
msg_args = {'snap': snapshot_name, 'share': share_name}
LOG.info(msg, msg_args)
def _delete_snapshot(self, vserver_client, share_name, snapshot_name):
"""Deletes a backend snapshot, handling busy snapshots as needed."""
backend_snapshot = vserver_client.get_snapshot(share_name,
snapshot_name)
LOG.debug('Deleting snapshot %(snap)s for share %(share)s.',
{'snap': snapshot_name, 'share': share_name})
vserver_client.delete_snapshot(share_name, snapshot_name)
@na_utils.trace
def _handle_busy_snapshot(self, vserver_client, share_name, snapshot_name,
wait_seconds=60):
"""Checks for and handles a busy snapshot.
if not backend_snapshot['busy']:
vserver_client.delete_snapshot(share_name, snapshot_name)
If a snapshot is not busy, take no action. If a snapshot is busy for
reasons other than a clone dependency, raise immediately. Otherwise,
since we always start a clone split operation after cloning a share,
wait up to a minute for a clone dependency to clear before giving up.
"""
snapshot = vserver_client.get_snapshot(share_name, snapshot_name)
if not snapshot['busy']:
return
elif backend_snapshot['owners'] == {'volume clone'}:
# Snapshots are locked by clone(s), so split clone and soft delete
snapshot_children = vserver_client.get_clone_children_for_snapshot(
share_name, snapshot_name)
for snapshot_child in snapshot_children:
vserver_client.split_volume_clone(snapshot_child['name'])
# Fail fast if snapshot is not busy due to a clone dependency
if snapshot['owners'] != {'volume clone'}:
vserver_client.soft_delete_snapshot(share_name, snapshot_name)
else:
raise exception.ShareSnapshotIsBusy(snapshot_name=snapshot_name)
# Wait for clone dependency to clear.
retry_interval = 3 # seconds
for retry in range(int(wait_seconds / retry_interval)):
LOG.debug('Snapshot %(snap)s for share %(share)s is busy, waiting '
'for volume clone dependency to clear.',
{'snap': snapshot_name, 'share': share_name})
time.sleep(retry_interval)
snapshot = vserver_client.get_snapshot(share_name, snapshot_name)
if not snapshot['busy']:
return
raise exception.ShareSnapshotIsBusy(snapshot_name=snapshot_name)
@na_utils.trace
def manage_existing(self, share, driver_options):
vserver, vserver_client = self._get_vserver(share_server=None)
@ -1004,18 +990,15 @@ class NetAppCmodeFileStorageLibrary(object):
for share_name in share_names:
try:
self._handle_busy_snapshot(vserver_client, share_name,
snapshot_name)
self._delete_snapshot(
vserver_client, share_name, snapshot_name)
except exception.SnapshotNotFound:
LOG.info(_LI("Snapshot %(snap)s does not exist for share "
"%(share)s."),
{'snap': snapshot_name, 'share': share_name})
msg = _LI("Snapshot %(snap)s does not exist on share "
"%(share)s.")
msg_args = {'snap': snapshot_name, 'share': share_name}
LOG.info(msg, msg_args)
continue
LOG.debug("Deleting snapshot %(snap)s for share %(share)s.",
{'snap': snapshot_name, 'share': share_name})
vserver_client.delete_snapshot(share_name, snapshot_name)
return None, None
@na_utils.trace

View File

@ -91,6 +91,7 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
def _handle_housekeeping_tasks(self):
"""Handle various cleanup activities."""
self._client.prune_deleted_nfs_export_policies()
self._client.prune_deleted_snapshots()
super(NetAppCmodeMultiSVMFileStorageLibrary, self).\
_handle_housekeeping_tasks()

View File

@ -105,6 +105,7 @@ class NetAppCmodeSingleSVMFileStorageLibrary(
"""Handle various cleanup activities."""
vserver_client = self._get_api_client(vserver=self._vserver)
vserver_client.prune_deleted_nfs_export_policies()
vserver_client.prune_deleted_snapshots()
super(NetAppCmodeSingleSVMFileStorageLibrary, self).\
_handle_housekeeping_tasks()

View File

@ -1303,6 +1303,23 @@ SNAPSHOT_MULTIDELETE_ERROR_RESPONSE = etree.XML("""
</results>
""" % {'volume': SHARE_NAME})
SNAPSHOT_GET_ITER_DELETED_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<snapshot-info>
<name>deleted_manila_%(snap)s</name>
<volume>%(volume)s</volume>
<vserver>%(vserver)s</vserver>
</snapshot-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'snap': SNAPSHOT_NAME,
'volume': SHARE_NAME,
'vserver': VSERVER_NAME,
})
CIFS_SHARE_ACCESS_CONTROL_GET_ITER = etree.XML("""
<results status="passed">
<attributes-list>
@ -1702,6 +1719,32 @@ VOLUME_GET_ITER_VOLUME_TO_MANAGE_RESPONSE = etree.XML("""
'size': SHARE_SIZE,
})
CLONE_CHILD_1 = 'fake_child_1'
CLONE_CHILD_2 = 'fake_child_2'
VOLUME_GET_ITER_CLONE_CHILDREN_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<volume-attributes>
<volume-id-attributes>
<name>%(clone1)s</name>
<owning-vserver-name>%(vserver)s</owning-vserver-name>
</volume-id-attributes>
</volume-attributes>
<volume-attributes>
<volume-id-attributes>
<name>%(clone2)s</name>
<owning-vserver-name>%(vserver)s</owning-vserver-name>
</volume-id-attributes>
</volume-attributes>
</attributes-list>
<num-records>2</num-records>
</results>
""" % {
'vserver': VSERVER_NAME,
'clone1': CLONE_CHILD_1,
'clone2': CLONE_CHILD_2,
})
SIS_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>

View File

@ -2827,9 +2827,14 @@ class NetAppClientCmodeTestCase(test.TestCase):
self.client.send_request.assert_has_calls([
mock.call('volume-clone-create', volume_clone_create_args)])
def test_split_volume_clone(self):
@ddt.data(None,
mock.Mock(side_effect=netapp_api.NaApiError(
code=netapp_api.EVOL_CLONE_BEING_SPLIT)))
def test_split_volume_clone(self, side_effect):
self.mock_object(self.client, 'send_request')
self.mock_object(
self.client, 'send_request',
mock.Mock(side_effect=side_effect))
self.client.split_volume_clone(fake.SHARE_NAME)
@ -2838,6 +2843,67 @@ class NetAppClientCmodeTestCase(test.TestCase):
self.client.send_request.assert_has_calls([
mock.call('volume-clone-split-start', volume_clone_split_args)])
def test_split_volume_clone_api_error(self):
self.mock_object(self.client,
'send_request',
mock.Mock(side_effect=self._mock_api_error()))
self.assertRaises(netapp_api.NaApiError,
self.client.split_volume_clone,
fake.SHARE_NAME)
def test_get_clone_children_for_snapshot(self):
api_response = netapp_api.NaElement(
fake.VOLUME_GET_ITER_CLONE_CHILDREN_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
mock.Mock(return_value=api_response))
result = self.client.get_clone_children_for_snapshot(
fake.SHARE_NAME, fake.SNAPSHOT_NAME)
volume_get_iter_args = {
'query': {
'volume-attributes': {
'volume-clone-attributes': {
'volume-clone-parent-attributes': {
'name': fake.SHARE_NAME,
'snapshot-name': fake.SNAPSHOT_NAME,
},
},
},
},
'desired-attributes': {
'volume-attributes': {
'volume-id-attributes': {
'name': None,
},
},
},
}
self.client.send_iter_request.assert_has_calls([
mock.call('volume-get-iter', volume_get_iter_args)])
expected = [
{'name': fake.CLONE_CHILD_1},
{'name': fake.CLONE_CHILD_2},
]
self.assertEqual(expected, result)
def test_get_clone_children_for_snapshot_not_found(self):
api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
mock.Mock(return_value=api_response))
result = self.client.get_clone_children_for_snapshot(
fake.SHARE_NAME, fake.SNAPSHOT_NAME)
self.assertEqual([], result)
def test_get_volume_junction_path(self):
api_response = netapp_api.NaElement(
@ -3149,6 +3215,22 @@ class NetAppClientCmodeTestCase(test.TestCase):
fake.SHARE_NAME,
fake.SNAPSHOT_NAME)
def test_rename_snapshot(self):
self.mock_object(self.client, 'send_request')
self.client.rename_snapshot(fake.SHARE_NAME,
fake.SNAPSHOT_NAME,
'new_snapshot_name')
snapshot_rename_args = {
'volume': fake.SHARE_NAME,
'current-name': fake.SNAPSHOT_NAME,
'new-name': 'new_snapshot_name'
}
self.client.send_request.assert_has_calls([
mock.call('snapshot-rename', snapshot_rename_args)])
def test_delete_snapshot(self):
self.mock_object(self.client, 'send_request')
@ -3163,6 +3245,99 @@ class NetAppClientCmodeTestCase(test.TestCase):
self.client.send_request.assert_has_calls([
mock.call('snapshot-delete', snapshot_delete_args)])
def test_soft_delete_snapshot(self):
mock_delete_snapshot = self.mock_object(self.client, 'delete_snapshot')
mock_rename_snapshot = self.mock_object(self.client, 'rename_snapshot')
self.client.soft_delete_snapshot(fake.SHARE_NAME, fake.SNAPSHOT_NAME)
mock_delete_snapshot.assert_called_once_with(
fake.SHARE_NAME, fake.SNAPSHOT_NAME)
self.assertFalse(mock_rename_snapshot.called)
def test_soft_delete_snapshot_api_error(self):
mock_delete_snapshot = self.mock_object(
self.client, 'delete_snapshot', self._mock_api_error())
mock_rename_snapshot = self.mock_object(self.client, 'rename_snapshot')
self.client.soft_delete_snapshot(fake.SHARE_NAME, fake.SNAPSHOT_NAME)
mock_delete_snapshot.assert_called_once_with(
fake.SHARE_NAME, fake.SNAPSHOT_NAME)
mock_rename_snapshot.assert_called_once_with(
fake.SHARE_NAME, fake.SNAPSHOT_NAME,
'deleted_manila_' + fake.SNAPSHOT_NAME)
def test_prune_deleted_snapshots(self):
deleted_snapshots_map = {
'vserver1': [{
'name': 'deleted_snap_1',
'volume': 'fake_volume_1',
'vserver': 'vserver1',
}],
'vserver2': [{
'name': 'deleted_snap_2',
'volume': 'fake_volume_2',
'vserver': 'vserver2',
}],
}
mock_get_deleted_snapshots = self.mock_object(
self.client, '_get_deleted_snapshots',
mock.Mock(return_value=deleted_snapshots_map))
mock_delete_snapshot = self.mock_object(
self.client, 'delete_snapshot',
mock.Mock(side_effect=[None, netapp_api.NaApiError]))
self.mock_object(
copy, 'deepcopy', mock.Mock(return_value=self.client))
self.client.prune_deleted_snapshots()
mock_get_deleted_snapshots.assert_called_once_with()
mock_delete_snapshot.assert_has_calls([
mock.call('fake_volume_1', 'deleted_snap_1'),
mock.call('fake_volume_2', 'deleted_snap_2'),
], any_order=True)
def test_get_deleted_snapshots(self):
api_response = netapp_api.NaElement(
fake.SNAPSHOT_GET_ITER_DELETED_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
mock.Mock(return_value=api_response))
result = self.client._get_deleted_snapshots()
snapshot_get_iter_args = {
'query': {
'snapshot-info': {
'name': 'deleted_manila_*',
'busy': 'false',
},
},
'desired-attributes': {
'snapshot-info': {
'name': None,
'vserver': None,
'volume': None,
},
},
}
self.client.send_iter_request.assert_has_calls([
mock.call('snapshot-get-iter', snapshot_get_iter_args)])
expected = {
fake.VSERVER_NAME: [{
'name': 'deleted_manila_' + fake.SNAPSHOT_NAME,
'volume': fake.SHARE_NAME,
'vserver': fake.VSERVER_NAME,
}],
}
self.assertDictEqual(expected, result)
def test_create_cg_snapshot(self):
mock_start_cg_snapshot = self.mock_object(

View File

@ -865,7 +865,6 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
share_name,
parent_share_name,
parent_snapshot_name)
vserver_client.split_volume_clone.assert_called_once_with(share_name)
def test_share_exists(self):
@ -1131,8 +1130,8 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
'_get_vserver',
mock.Mock(return_value=(fake.VSERVER1,
vserver_client)))
mock_handle_busy_snapshot = self.mock_object(self.library,
'_handle_busy_snapshot')
mock_delete_snapshot = self.mock_object(self.library,
'_delete_snapshot')
self.library.delete_snapshot(self.context,
fake.SNAPSHOT,
@ -1142,9 +1141,8 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
fake.SNAPSHOT['share_id'])
snapshot_name = self.library._get_backend_snapshot_name(
fake.SNAPSHOT['id'])
self.assertTrue(mock_handle_busy_snapshot.called)
vserver_client.delete_snapshot.assert_called_once_with(share_name,
snapshot_name)
mock_delete_snapshot.assert_called_once_with(
vserver_client, share_name, snapshot_name)
@ddt.data(exception.InvalidInput(reason='fake_reason'),
exception.VserverNotSpecified(),
@ -1154,15 +1152,14 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.mock_object(self.library,
'_get_vserver',
mock.Mock(side_effect=get_vserver_exception))
mock_handle_busy_snapshot = self.mock_object(self.library,
'_handle_busy_snapshot')
mock_delete_snapshot = self.mock_object(self.library,
'_delete_snapshot')
self.library.delete_snapshot(self.context,
fake.SNAPSHOT,
share_server=fake.SHARE_SERVER)
self.assertFalse(mock_handle_busy_snapshot.called)
self.assertEqual(1, lib_base.LOG.warning.call_count)
self.assertFalse(mock_delete_snapshot.called)
def test_delete_snapshot_not_found(self):
@ -1171,8 +1168,8 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
'_get_vserver',
mock.Mock(return_value=(fake.VSERVER1,
vserver_client)))
mock_handle_busy_snapshot = self.mock_object(
self.library, '_handle_busy_snapshot',
mock_delete_snapshot = self.mock_object(
self.library, '_delete_snapshot',
mock.Mock(side_effect=exception.SnapshotNotFound(
name=fake.SNAPSHOT_NAME)))
@ -1180,104 +1177,90 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
fake.SNAPSHOT,
share_server=fake.SHARE_SERVER)
self.assertTrue(mock_handle_busy_snapshot.called)
self.assertFalse(vserver_client.delete_snapshot.called)
share_name = self.library._get_backend_share_name(
fake.SNAPSHOT['share_id'])
snapshot_name = self.library._get_backend_snapshot_name(
fake.SNAPSHOT['id'])
mock_delete_snapshot.assert_called_once_with(
vserver_client, share_name, snapshot_name)
def test_delete_snapshot_busy(self):
def test_delete_snapshot_not_unique(self):
vserver_client = mock.Mock()
self.mock_object(self.library,
'_get_vserver',
mock.Mock(return_value=(fake.VSERVER1,
vserver_client)))
mock_handle_busy_snapshot = self.mock_object(
self.library, '_handle_busy_snapshot',
mock.Mock(side_effect=exception.ShareSnapshotIsBusy(
snapshot_name=fake.SNAPSHOT_NAME)))
mock_delete_snapshot = self.mock_object(
self.library, '_delete_snapshot',
mock.Mock(side_effect=exception.NetAppException()))
self.assertRaises(exception.ShareSnapshotIsBusy,
self.assertRaises(exception.NetAppException,
self.library.delete_snapshot,
self.context,
fake.SNAPSHOT,
share_server=fake.SHARE_SERVER)
self.assertTrue(mock_handle_busy_snapshot.called)
self.assertFalse(vserver_client.delete_snapshot.called)
share_name = self.library._get_backend_share_name(
fake.SNAPSHOT['share_id'])
snapshot_name = self.library._get_backend_snapshot_name(
fake.SNAPSHOT['id'])
mock_delete_snapshot.assert_called_once_with(
vserver_client, share_name, snapshot_name)
def test_handle_busy_snapshot_not_busy(self):
def test__delete_snapshot(self):
vserver_client = mock.Mock()
vserver_client.get_snapshot.return_value = fake.CDOT_SNAPSHOT
result = self.library._handle_busy_snapshot(vserver_client,
fake.SHARE_NAME,
fake.SNAPSHOT_NAME)
self.library._delete_snapshot(vserver_client,
fake.SHARE_NAME,
fake.SNAPSHOT_NAME)
self.assertIsNone(result)
self.assertEqual(1, vserver_client.get_snapshot.call_count)
self.assertEqual(0, lib_base.LOG.debug.call_count)
vserver_client.delete_snapshot.assert_called_once_with(
fake.SHARE_NAME, fake.SNAPSHOT_NAME)
self.assertFalse(vserver_client.get_clone_children_for_snapshot.called)
self.assertFalse(vserver_client.split_volume_clone.called)
self.assertFalse(vserver_client.soft_delete_snapshot.called)
def test_handle_busy_snapshot_not_found(self):
def test__delete_snapshot_busy_volume_clone(self):
vserver_client = mock.Mock()
vserver_client.get_snapshot.side_effect = exception.SnapshotNotFound(
name=fake.SNAPSHOT_NAME)
vserver_client.get_snapshot.return_value = (
fake.CDOT_SNAPSHOT_BUSY_VOLUME_CLONE)
vserver_client.get_clone_children_for_snapshot.return_value = (
fake.CDOT_CLONE_CHILDREN)
self.assertRaises(exception.SnapshotNotFound,
self.library._handle_busy_snapshot,
vserver_client,
fake.SHARE_NAME,
fake.SNAPSHOT_NAME)
self.library._delete_snapshot(vserver_client,
fake.SHARE_NAME,
fake.SNAPSHOT_NAME)
def test_handle_busy_snapshot_not_clone_dependency(self):
self.assertFalse(vserver_client.delete_snapshot.called)
vserver_client.get_clone_children_for_snapshot.assert_called_once_with(
fake.SHARE_NAME, fake.SNAPSHOT_NAME)
vserver_client.split_volume_clone.assert_has_calls([
mock.call(fake.CDOT_CLONE_CHILD_1),
mock.call(fake.CDOT_CLONE_CHILD_2),
])
vserver_client.soft_delete_snapshot.assert_called_once_with(
fake.SHARE_NAME, fake.SNAPSHOT_NAME)
snapshot = copy.deepcopy(fake.CDOT_SNAPSHOT_BUSY_VOLUME_CLONE)
snapshot['owners'] = {'fake reason'}
def test__delete_snapshot_busy_snapmirror(self):
vserver_client = mock.Mock()
vserver_client.get_snapshot.return_value = snapshot
vserver_client.get_snapshot.return_value = (
fake.CDOT_SNAPSHOT_BUSY_SNAPMIRROR)
self.assertRaises(exception.ShareSnapshotIsBusy,
self.library._handle_busy_snapshot,
self.library._delete_snapshot,
vserver_client,
fake.SHARE_NAME,
fake.SNAPSHOT_NAME)
self.assertEqual(1, vserver_client.get_snapshot.call_count)
self.assertEqual(0, lib_base.LOG.debug.call_count)
def test_handle_busy_snapshot_clone_finishes(self):
get_snapshot_side_effect = [fake.CDOT_SNAPSHOT_BUSY_VOLUME_CLONE] * 10
get_snapshot_side_effect.append(fake.CDOT_SNAPSHOT)
vserver_client = mock.Mock()
vserver_client.get_snapshot.side_effect = get_snapshot_side_effect
mock_sleep = self.mock_object(time, 'sleep')
result = self.library._handle_busy_snapshot(vserver_client,
fake.SHARE_NAME,
fake.SNAPSHOT_NAME)
self.assertIsNone(result)
self.assertEqual(11, vserver_client.get_snapshot.call_count)
mock_sleep.assert_has_calls([mock.call(3)] * 10)
self.assertEqual(10, lib_base.LOG.debug.call_count)
def test_handle_busy_snapshot_clone_continues(self):
vserver_client = mock.Mock()
vserver_client.get_snapshot.side_effect = [
fake.CDOT_SNAPSHOT_BUSY_VOLUME_CLONE] * 30
mock_sleep = self.mock_object(time, 'sleep')
self.assertRaises(exception.ShareSnapshotIsBusy,
self.library._handle_busy_snapshot,
vserver_client,
fake.SHARE_NAME,
fake.SNAPSHOT_NAME)
self.assertEqual(21, vserver_client.get_snapshot.call_count)
mock_sleep.assert_has_calls([mock.call(3)] * 20)
self.assertEqual(20, lib_base.LOG.debug.call_count)
self.assertFalse(vserver_client.delete_snapshot.called)
self.assertFalse(vserver_client.get_clone_children_for_snapshot.called)
self.assertFalse(vserver_client.split_volume_clone.called)
self.assertFalse(vserver_client.soft_delete_snapshot.called)
def test_manage_existing(self):
@ -1719,8 +1702,8 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock_get_vserver = self.mock_object(
self.library, '_get_vserver',
mock.Mock(return_value=(fake.VSERVER1, vserver_client)))
mock_handle_busy_snapshot = self.mock_object(self.library,
'_handle_busy_snapshot')
mock_delete_snapshot = self.mock_object(self.library,
'_delete_snapshot')
result = self.library.delete_cgsnapshot(
self.context,
@ -1736,14 +1719,10 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
snapshot_name = self.library._get_backend_cg_snapshot_name(
fake.CG_SNAPSHOT['id'])
mock_handle_busy_snapshot.assert_has_calls([
mock_delete_snapshot.assert_has_calls([
mock.call(vserver_client, share_names[0], snapshot_name),
mock.call(vserver_client, share_names[1], snapshot_name)
])
vserver_client.delete_snapshot.assert_has_calls([
mock.call(share_names[0], snapshot_name),
mock.call(share_names[1], snapshot_name)
])
self.assertEqual((None, None), result)
mock_get_vserver.assert_called_once_with(
share_server=fake.SHARE_SERVER)
@ -1754,8 +1733,8 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock_get_vserver = self.mock_object(
self.library, '_get_vserver',
mock.Mock(return_value=(fake.VSERVER1, vserver_client)))
mock_handle_busy_snapshot = self.mock_object(self.library,
'_handle_busy_snapshot')
mock_delete_snapshot = self.mock_object(self.library,
'_delete_snapshot')
fake_cg_snapshot = copy.deepcopy(fake.CG_SNAPSHOT)
fake_cg_snapshot['cgsnapshot_members'] = []
@ -1765,8 +1744,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
fake_cg_snapshot,
share_server=fake.SHARE_SERVER)
self.assertFalse(mock_handle_busy_snapshot.called)
self.assertFalse(vserver_client.delete_snapshot.called)
self.assertFalse(mock_delete_snapshot.called)
self.assertEqual((None, None), result)
mock_get_vserver.assert_called_once_with(
share_server=fake.SHARE_SERVER)
@ -1777,9 +1755,8 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock_get_vserver = self.mock_object(
self.library, '_get_vserver',
mock.Mock(return_value=(fake.VSERVER1, vserver_client)))
mock_handle_busy_snapshot = self.mock_object(
self.library,
'_handle_busy_snapshot',
mock_delete_snapshot = self.mock_object(
self.library, '_delete_snapshot',
mock.Mock(side_effect=exception.SnapshotNotFound(name='fake')))
result = self.library.delete_cgsnapshot(
@ -1796,11 +1773,10 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
snapshot_name = self.library._get_backend_cg_snapshot_name(
fake.CG_SNAPSHOT['id'])
mock_handle_busy_snapshot.assert_has_calls([
mock_delete_snapshot.assert_has_calls([
mock.call(vserver_client, share_names[0], snapshot_name),
mock.call(vserver_client, share_names[1], snapshot_name)
])
self.assertFalse(vserver_client.delete_snapshot.called)
self.assertEqual((None, None), result)
mock_get_vserver.assert_called_once_with(
share_server=fake.SHARE_SERVER)

View File

@ -173,12 +173,14 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
def test_handle_housekeeping_tasks(self):
self.mock_object(self.client, 'prune_deleted_nfs_export_policies')
self.mock_object(self.client, 'prune_deleted_snapshots')
mock_super = self.mock_object(lib_base.NetAppCmodeFileStorageLibrary,
'_handle_housekeeping_tasks')
self.library._handle_housekeeping_tasks()
self.assertTrue(self.client.prune_deleted_nfs_export_policies.called)
self.assertTrue(self.client.prune_deleted_snapshots.called)
self.assertTrue(mock_super.called)
def test_find_matching_aggregates(self):

View File

@ -161,6 +161,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.assertTrue(
mock_vserver_client.prune_deleted_nfs_export_policies.called)
self.assertTrue(mock_vserver_client.prune_deleted_snapshots.called)
self.assertTrue(mock_super.called)
def test_find_matching_aggregates(self):

View File

@ -279,6 +279,20 @@ CDOT_SNAPSHOT_BUSY_VOLUME_CLONE = {
'owners': {'volume clone'},
}
CDOT_SNAPSHOT_BUSY_SNAPMIRROR = {
'name': SNAPSHOT_NAME,
'volume': SHARE_NAME,
'busy': True,
'owners': {'snapmirror'},
}
CDOT_CLONE_CHILD_1 = 'fake_child_1'
CDOT_CLONE_CHILD_2 = 'fake_child_2'
CDOT_CLONE_CHILDREN = [
{'name': CDOT_CLONE_CHILD_1},
{'name': CDOT_CLONE_CHILD_2},
]
SHARE_FOR_CG1 = {
'id': SHARE_ID,
'host': '%(host)s@%(backend)s#%(pool)s' % {