cDOT driver should split clone from snapshot after creation

When the cDOT driver receives a create_share_from_snapshot command,
it clones the share/flexvol from the specified snapshot in the
parent share/flexvol.  But if the resulting clone is not split
from its parent, the snapshot remains busy and may not be deleted.
This is resolved by starting a clone split operation anytime a
clone is created.

During the split operation, which usually completes within
seconds, the snapshot remains busy.  This patch detects that
condition in the snapshot delete path and waits up to a minute
for the busy condition to clear.

Change-Id: I44bac106f7f7a1acc968c1d9a526dcef85f657b7
Closes-Bug: #1259988
This commit is contained in:
Clinton Knight 2015-03-15 19:56:58 -04:00
parent 4db796339a
commit d37f44f04a
7 changed files with 198 additions and 47 deletions

View File

@ -36,6 +36,7 @@ EAPINOTFOUND = '13005'
EVOLUMEOFFLINE = '13042'
EINTERNALERROR = '13114'
EDUPLICATEENTRY = '13130'
EVOLNOTCLONE = '13170'
ESIS_CLONE_NOT_LICENSED = '14956'
EOBJECTNOTFOUND = '15661'

View File

@ -19,6 +19,7 @@ import copy
import hashlib
from oslo_log import log
from oslo_utils import strutils
import six
from manila import exception
@ -92,7 +93,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
@na_utils.trace
def vserver_exists(self, vserver_name):
"""Checks if Vserver exists."""
LOG.debug('Checking if Vserver %s exists' % vserver_name)
LOG.debug('Checking if Vserver %s exists', vserver_name)
api_args = {
'query': {
@ -400,7 +401,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
protocols = na_utils.convert_to_list(protocols)
protocols = [protocol.lower() for protocol in protocols]
args = {
api_args = {
'query': {
'net-interface-info': {
'data-protocols': {
@ -410,7 +411,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
}
} if protocols else None
result = self.send_request('net-interface-get-iter', args)
result = self.send_request('net-interface-get-iter', api_args)
lif_info_list = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
@ -833,6 +834,12 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
}
self.send_request('volume-clone-create', api_args)
@na_utils.trace
def split_volume_clone(self, volume_name):
"""Begins splitting a clone from its parent."""
api_args = {'volume': volume_name}
self.send_request('volume-clone-split-start', api_args)
@na_utils.trace
def get_volume_junction_path(self, volume_name, is_style_cifs=False):
"""Gets a volume junction path."""
@ -869,8 +876,8 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
self.send_request('snapshot-create', api_args)
@na_utils.trace
def is_snapshot_busy(self, volume_name, snapshot_name):
"""Checks if volume snapshot is busy."""
def get_snapshot(self, volume_name, snapshot_name):
"""Gets a single snapshot."""
api_args = {
'query': {
'snapshot-info': {
@ -880,7 +887,12 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
},
'desired-attributes': {
'snapshot-info': {
'name': None,
'volume': None,
'busy': None,
'snapshot-owners-list': {
'snapshot-owner': None,
}
},
},
}
@ -897,8 +909,21 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
raise exception.NetAppException(msg % msg_args)
snapshot_info = snapshot_info_list[0]
busy = snapshot_info.get_child_content('busy').lower()
return busy == 'true'
snapshot = {
'name': snapshot_info.get_child_content('name'),
'volume': snapshot_info.get_child_content('volume'),
'busy': strutils.bool_from_string(
snapshot_info.get_child_content('busy')),
}
snapshot_owners_list = snapshot_info.get_child_by_name(
'snapshot-owners-list') or netapp_api.NaElement('none')
snapshot_owners = set([
snapshot_owner.get_child_content('owner')
for snapshot_owner in snapshot_owners_list.get_children()])
snapshot['owners'] = snapshot_owners
return snapshot
@na_utils.trace
def delete_snapshot(self, volume_name, snapshot_name):

View File

@ -21,6 +21,7 @@ single-SVM or multi-SVM functionality needed by the cDOT Manila drivers.
import copy
import socket
import time
from oslo_log import log
from oslo_utils import units
@ -449,6 +450,7 @@ class NetAppCmodeFileStorageLibrary(object):
LOG.debug('Creating share from snapshot %s', snapshot['id'])
vserver_client.create_volume_clone(share_name, parent_share_name,
parent_snapshot_name)
vserver_client.split_volume_clone(share_name)
@na_utils.trace
def _share_exists(self, share_name, vserver_client):
@ -518,13 +520,44 @@ class NetAppCmodeFileStorageLibrary(object):
share_name = self._get_valid_share_name(snapshot['share_id'])
snapshot_name = self._get_valid_snapshot_name(snapshot['id'])
if vserver_client.is_snapshot_busy(share_name, snapshot_name):
raise exception.ShareSnapshotIsBusy(snapshot_name=snapshot_name)
self._handle_busy_snapshot(vserver_client, share_name, snapshot_name)
LOG.debug('Deleting snapshot %(snap)s for share %(share)s.',
{'snap': snapshot_name, 'share': share_name})
vserver_client.delete_snapshot(share_name, snapshot_name)
@na_utils.trace
def _handle_busy_snapshot(self, vserver_client, share_name, snapshot_name,
wait_seconds=60):
"""Checks for and handles a busy snapshot.
If a snapshot is not busy, take no action. If a snapshot is busy for
reasons other than a clone dependency, raise immediately. Otherwise,
since we always start a clone split operation after cloning a share,
wait up to a minute for a clone dependency to clear before giving up.
"""
snapshot = vserver_client.get_snapshot(share_name, snapshot_name)
if not snapshot['busy']:
return
# Fail fast if snapshot is not busy due to a clone dependency
if snapshot['owners'] != {'volume clone'}:
raise exception.ShareSnapshotIsBusy(snapshot_name=snapshot_name)
# Wait for clone dependency to clear.
retry_interval = 3 # seconds
for retry in range(wait_seconds / retry_interval):
LOG.debug('Snapshot %(snap)s for share %(share)s is busy, waiting '
'for volume clone dependency to clear.',
{'snap': snapshot_name, 'share': share_name})
time.sleep(retry_interval)
snapshot = vserver_client.get_snapshot(share_name, snapshot_name)
if not snapshot['busy']:
return
raise exception.ShareSnapshotIsBusy(snapshot_name=snapshot_name)
@na_utils.trace
def allow_access(self, context, share, access, share_server=None):
"""Allows access to a given NAS storage."""

View File

@ -981,6 +981,11 @@ SNAPSHOT_GET_ITER_BUSY_RESPONSE = etree.XML("""
<name>%(snap)s</name>
<volume>%(volume)s</volume>
<vserver>%(vserver)s</vserver>
<snapshot-owners-list>
<snapshot-owner>
<owner>volume clone</owner>
</snapshot-owner>
</snapshot-owners-list>
</snapshot-info>
</attributes-list>
<num-records>1</num-records>

View File

@ -17,6 +17,7 @@
import copy
import hashlib
import ddt
import mock
from oslo_log import log
@ -28,6 +29,7 @@ from manila import test
from manila.tests.share.drivers.netapp.dataontap.client import fakes as fake
@ddt.ddt
class NetAppClientCmodeTestCase(test.TestCase):
def setUp(self):
@ -1448,6 +1450,17 @@ class NetAppClientCmodeTestCase(test.TestCase):
self.client.send_request.assert_has_calls([
mock.call('volume-clone-create', volume_clone_create_args)])
def test_split_volume_clone(self):
self.mock_object(self.client, 'send_request')
self.client.split_volume_clone(fake.SHARE_NAME)
volume_clone_split_args = {'volume': fake.SHARE_NAME}
self.client.send_request.assert_has_calls([
mock.call('volume-clone-split-start', volume_clone_split_args)])
def test_get_volume_junction_path(self):
api_response = netapp_api.NaElement(
@ -1551,49 +1564,56 @@ class NetAppClientCmodeTestCase(test.TestCase):
self.client.send_request.assert_has_calls([
mock.call('snapshot-create', snapshot_create_args)])
def test_is_snapshot_busy(self):
@ddt.data({
'mock_return': fake.SNAPSHOT_GET_ITER_NOT_BUSY_RESPONSE,
'expected': {
'name': fake.SNAPSHOT_NAME,
'volume': fake.SHARE_NAME,
'busy': False,
'owners': set(),
}
}, {
'mock_return': fake.SNAPSHOT_GET_ITER_BUSY_RESPONSE,
'expected': {
'name': fake.SNAPSHOT_NAME,
'volume': fake.SHARE_NAME,
'busy': True,
'owners': {'volume clone'},
}
})
@ddt.unpack
def test_get_snapshot(self, mock_return, expected):
api_response = netapp_api.NaElement(
fake.SNAPSHOT_GET_ITER_BUSY_RESPONSE)
api_response = netapp_api.NaElement(mock_return)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.is_snapshot_busy(fake.SHARE_NAME,
fake.SNAPSHOT_NAME)
result = self.client.get_snapshot(fake.SHARE_NAME, fake.SNAPSHOT_NAME)
snapshot_get_iter_args = {
'query': {
'snapshot-info': {
'name': fake.SNAPSHOT_NAME,
'volume': fake.SHARE_NAME
}
'volume': fake.SHARE_NAME,
},
},
'desired-attributes': {
'snapshot-info': {
'busy': None
}
}
'name': None,
'volume': None,
'busy': None,
'snapshot-owners-list': {
'snapshot-owner': None,
}
},
},
}
self.client.send_request.assert_has_calls([
mock.call('snapshot-get-iter', snapshot_get_iter_args)])
self.assertTrue(result)
self.assertDictEqual(expected, result)
def test_is_snapshot_busy_not_busy(self):
api_response = netapp_api.NaElement(
fake.SNAPSHOT_GET_ITER_NOT_BUSY_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.is_snapshot_busy(fake.SHARE_NAME,
fake.SNAPSHOT_NAME)
self.assertFalse(result)
def test_is_snapshot_busy_not_found(self):
def test_get_snapshot_not_found(self):
api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
@ -1601,7 +1621,7 @@ class NetAppClientCmodeTestCase(test.TestCase):
mock.Mock(return_value=api_response))
self.assertRaises(exception.NetAppException,
self.client.is_snapshot_busy,
self.client.get_snapshot,
fake.SHARE_NAME,
fake.SNAPSHOT_NAME)

View File

@ -18,6 +18,7 @@ Unit tests for the NetApp Data ONTAP cDOT base storage driver library.
import copy
import socket
import time
import mock
from oslo_log import log
@ -708,6 +709,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
share_name,
parent_share_name,
parent_snapshot_name)
vserver_client.split_volume_clone.assert_called_once_with(share_name)
def test_share_exists(self):
@ -871,11 +873,12 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
def test_delete_snapshot(self):
vserver_client = mock.Mock()
vserver_client.is_snapshot_busy.return_value = False
self.mock_object(self.library,
'_get_vserver',
mock.Mock(return_value=(fake.VSERVER1,
vserver_client)))
mock_handle_busy_snapshot = self.mock_object(self.library,
'_handle_busy_snapshot')
self.library.delete_snapshot(self.context,
fake.SNAPSHOT,
@ -885,23 +888,72 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
fake.SNAPSHOT['share_id'])
snapshot_name = self.library._get_valid_snapshot_name(
fake.SNAPSHOT['id'])
self.assertTrue(mock_handle_busy_snapshot.called)
vserver_client.delete_snapshot.assert_called_once_with(share_name,
snapshot_name)
def test_delete_snapshot_busy(self):
def test_handle_busy_snapshot_not_busy(self):
vserver_client = mock.Mock()
vserver_client.is_snapshot_busy.return_value = True
self.mock_object(self.library,
'_get_vserver',
mock.Mock(return_value=(fake.VSERVER1,
vserver_client)))
vserver_client.get_snapshot.return_value = fake.CDOT_SNAPSHOT
result = self.library._handle_busy_snapshot(vserver_client,
fake.SHARE_NAME,
fake.SNAPSHOT_NAME)
self.assertIsNone(result)
self.assertEqual(1, vserver_client.get_snapshot.call_count)
self.assertEqual(0, lib_base.LOG.debug.call_count)
def test_handle_busy_snapshot_not_clone_dependency(self):
snapshot = copy.deepcopy(fake.CDOT_SNAPSHOT_BUSY_VOLUME_CLONE)
snapshot['owners'] = {'fake reason'}
vserver_client = mock.Mock()
vserver_client.get_snapshot.return_value = snapshot
self.assertRaises(exception.ShareSnapshotIsBusy,
self.library.delete_snapshot,
self.context,
fake.SNAPSHOT,
share_server=fake.SHARE_SERVER)
self.library._handle_busy_snapshot,
vserver_client,
fake.SHARE_NAME,
fake.SNAPSHOT_NAME)
self.assertEqual(1, vserver_client.get_snapshot.call_count)
self.assertEqual(0, lib_base.LOG.debug.call_count)
def test_handle_busy_snapshot_clone_finishes(self):
get_snapshot_side_effect = [fake.CDOT_SNAPSHOT_BUSY_VOLUME_CLONE] * 10
get_snapshot_side_effect.append(fake.CDOT_SNAPSHOT)
vserver_client = mock.Mock()
vserver_client.get_snapshot.side_effect = get_snapshot_side_effect
mock_sleep = self.mock_object(time, 'sleep')
result = self.library._handle_busy_snapshot(vserver_client,
fake.SHARE_NAME,
fake.SNAPSHOT_NAME)
self.assertIsNone(result)
self.assertEqual(11, vserver_client.get_snapshot.call_count)
mock_sleep.assert_has_calls([mock.call(3)] * 10)
self.assertEqual(10, lib_base.LOG.debug.call_count)
def test_handle_busy_snapshot_clone_continues(self):
vserver_client = mock.Mock()
vserver_client.get_snapshot.side_effect = [
fake.CDOT_SNAPSHOT_BUSY_VOLUME_CLONE] * 30
mock_sleep = self.mock_object(time, 'sleep')
self.assertRaises(exception.ShareSnapshotIsBusy,
self.library._handle_busy_snapshot,
vserver_client,
fake.SHARE_NAME,
fake.SNAPSHOT_NAME)
self.assertEqual(21, vserver_client.get_snapshot.call_count)
mock_sleep.assert_has_calls([mock.call(3)] * 20)
self.assertEqual(20, lib_base.LOG.debug.call_count)
def test_allow_access(self):

View File

@ -31,6 +31,7 @@ VOLUME_NAME_TEMPLATE = 'share_%(share_id)s'
VSERVER_NAME_TEMPLATE = 'os_%s'
AGGREGATE_NAME_SEARCH_PATTERN = '(.*)'
SHARE_NAME = 'fake_share'
SNAPSHOT_NAME = 'fake_snapshot'
SHARE_SIZE = 10
TENANT_ID = '24cb2448-13d8-4f41-afd9-eff5c4fd2a57'
SHARE_ID = '7cf7c200-d3af-4e05-b87e-9167c95dfcad'
@ -187,6 +188,20 @@ SNAPSHOT = {
'share_id': PARENT_SHARE_ID
}
CDOT_SNAPSHOT = {
'name': SNAPSHOT_NAME,
'volume': SHARE_NAME,
'busy': False,
'owners': set(),
}
CDOT_SNAPSHOT_BUSY_VOLUME_CLONE = {
'name': SNAPSHOT_NAME,
'volume': SHARE_NAME,
'busy': True,
'owners': {'volume clone'},
}
LIF_NAMES = []
LIF_ADDRESSES = ('10.10.10.10', '10.10.10.20')
LIFS = (