NetApp: Add Consistency Group Support for NFS

Adds consistency group support for the NetApp NFS driver.

Implements: blueprint netapp-nfs-consistencygroup-support

DocImpact

Change-Id: I79369e3817f8345b29b90179279de1d7c7f1ca0a
This commit is contained in:
Chuck Fouts 2016-05-19 20:41:07 -04:00
parent 3562200766
commit 389188c5ea
16 changed files with 577 additions and 123 deletions

View File

@ -507,7 +507,9 @@ class NetApp7modeClientTestCase(test.TestCase):
self.connection.invoke_successfully.side_effect = [
fake_clone_id_response, fake_clone_list_response]
self.client.clone_file(expected_src_path, expected_dest_path)
self.client.clone_file(expected_src_path,
expected_dest_path,
source_snapshot=fake.CG_SNAPSHOT_ID)
__, _args, _kwargs = self.connection.invoke_successfully.mock_calls[0]
actual_request = _args[0]
@ -519,6 +521,9 @@ class NetApp7modeClientTestCase(test.TestCase):
self.assertEqual(expected_src_path, actual_src_path)
self.assertEqual(expected_dest_path, actual_dest_path)
self.assertEqual(
fake.CG_SNAPSHOT_ID,
actual_request.get_child_by_name('snapshot-name').get_content())
self.assertEqual(actual_request.get_child_by_name(
'destination-exists'), None)
self.assertTrue(enable_tunneling)

View File

@ -19,6 +19,7 @@ import uuid
from lxml import etree
import mock
import six
import time
from cinder import exception
from cinder import test
@ -537,6 +538,15 @@ class NetAppBaseClientTestCase(test.TestCase):
self.client._commit_cg_snapshot.assert_called_once_with(
fake.CONSISTENCY_GROUP_ID)
def test_create_cg_snapshot_no_id(self):
self.mock_object(self.client, '_start_cg_snapshot', mock.Mock(
return_value=None))
self.assertRaises(exception.VolumeBackendAPIException,
self.client.create_cg_snapshot,
[fake.CG_VOLUME_NAME],
fake.CG_SNAPSHOT_NAME)
def test_start_cg_snapshot(self):
snapshot_init = {
'snapshot': fake.CG_SNAPSHOT_NAME,
@ -559,3 +569,25 @@ class NetAppBaseClientTestCase(test.TestCase):
self.client.send_request.assert_called_once_with(
'cg-commit', {'cg-id': snapshot_commit['cg-id']})
def test_wait_for_busy_snapshot_raise_exception(self):
BUSY_SNAPSHOT = dict(fake.SNAPSHOT)
BUSY_SNAPSHOT['busy'] = True
# Need to mock sleep as it is called by @utils.retry
self.mock_object(time, 'sleep')
mock_get_snapshot = self.mock_object(
self.client, 'get_snapshot',
mock.Mock(return_value=BUSY_SNAPSHOT)
)
self.assertRaises(exception.SnapshotIsBusy,
self.client.wait_for_busy_snapshot,
fake.FLEXVOL, fake.SNAPSHOT_NAME)
calls = [
mock.call(fake.FLEXVOL, fake.SNAPSHOT_NAME),
mock.call(fake.FLEXVOL, fake.SNAPSHOT_NAME),
mock.call(fake.FLEXVOL, fake.SNAPSHOT_NAME),
]
mock_get_snapshot.assert_has_calls(calls)

View File

@ -999,7 +999,8 @@ class NetAppCmodeClientTestCase(test.TestCase):
self.connection.get_api_version.return_value = (1, 20)
self.client.clone_file(expected_flex_vol, expected_src_path,
expected_dest_path, self.vserver)
expected_dest_path, self.vserver,
source_snapshot=fake.CG_SNAPSHOT_ID)
__, _args, __ = self.connection.invoke_successfully.mock_calls[0]
actual_request = _args[0]
@ -1013,6 +1014,9 @@ class NetAppCmodeClientTestCase(test.TestCase):
self.assertEqual(expected_flex_vol, actual_flex_vol)
self.assertEqual(expected_src_path, actual_src_path)
self.assertEqual(expected_dest_path, actual_dest_path)
req_snapshot_child = actual_request.get_child_by_name('snapshot-name')
self.assertEqual(fake.CG_SNAPSHOT_ID, req_snapshot_child.get_content())
self.assertEqual(actual_request.get_child_by_name(
'destination-exists'), None)
@ -3147,3 +3151,14 @@ class NetAppCmodeClientTestCase(test.TestCase):
fake_client.VOLUME_NAME)
self.assertEqual(expected_prov_opts, actual_prov_opts)
def test_wait_for_busy_snapshot(self):
mock_get_snapshot = self.mock_object(
self.client, 'get_snapshot',
mock.Mock(return_value=fake.SNAPSHOT)
)
self.client.wait_for_busy_snapshot(fake.FLEXVOL, fake.SNAPSHOT_NAME)
mock_get_snapshot.assert_called_once_with(fake.FLEXVOL,
fake.SNAPSHOT_NAME)

View File

@ -387,6 +387,7 @@ FAKE_7MODE_POOLS = [
CG_VOLUME_NAME = 'fake_cg_volume'
CG_GROUP_NAME = 'fake_consistency_group'
CG_POOL_NAME = 'cdot'
SOURCE_CG_VOLUME_NAME = 'fake_source_cg_volume'
CG_VOLUME_ID = 'fake_cg_volume_id'
CG_VOLUME_SIZE = 100
@ -419,7 +420,7 @@ CG_VOLUME = {
'name': CG_VOLUME_NAME,
'size': 100,
'id': CG_VOLUME_ID,
'host': 'hostname@backend#cdot',
'host': 'hostname@backend#' + CG_POOL_NAME,
'consistencygroup_id': CONSISTENCY_GROUP_ID,
'status': 'fake_status',
}
@ -435,6 +436,8 @@ CONSISTENCY_GROUP = {
'name': CG_GROUP_NAME,
}
CG_CONTEXT = {}
CG_SNAPSHOT = {
'id': CG_SNAPSHOT_ID,
'name': CG_SNAPSHOT_NAME,

View File

@ -1370,7 +1370,8 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
mock.Mock(return_value=fake.POOL_NAME))
mock_clone_lun = self.mock_object(self.library, '_clone_lun')
mock_busy = self.mock_object(self.library, '_handle_busy_snapshot')
mock_busy = self.mock_object(
self.zapi_client, 'wait_for_busy_snapshot')
self.library.create_cgsnapshot(fake.CG_SNAPSHOT, [snapshot])
@ -1499,16 +1500,3 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
}
mock_clone_source_to_destination.assert_called_once_with(
clone_source_to_destination_args, fake.VOLUME)
def test_handle_busy_snapshot(self):
self.mock_object(block_base, 'LOG')
mock_get_snapshot = self.mock_object(
self.zapi_client, 'get_snapshot',
mock.Mock(return_value=fake.SNAPSHOT)
)
self.library._handle_busy_snapshot(fake.FLEXVOL, fake.SNAPSHOT_NAME)
self.assertEqual(1, block_base.LOG.info.call_count)
mock_get_snapshot.assert_called_once_with(fake.FLEXVOL,
fake.SNAPSHOT_NAME)

View File

@ -81,7 +81,8 @@ class NetApp7modeNfsDriverTestCase(test.TestCase):
mock_get_actual_path_for_export.assert_called_once_with(
fake.EXPORT_PATH)
self.driver.zapi_client.clone_file.assert_called_once_with(
'fake_path/' + fake.FLEXVOL, 'fake_path/fake_clone')
'fake_path/' + fake.FLEXVOL, 'fake_path/fake_clone',
None)
@ddt.data({'nfs_sparsed_volumes': True},
{'nfs_sparsed_volumes': False})
@ -115,6 +116,7 @@ class NetApp7modeNfsDriverTestCase(test.TestCase):
expected = [{'pool_name': '192.168.99.24:/fake/export/path',
'QoS_support': False,
'consistencygroup_support': True,
'thick_provisioning_support': thick,
'thin_provisioning_support': not thick,
'free_capacity_gb': 12.0,
@ -171,3 +173,31 @@ class NetApp7modeNfsDriverTestCase(test.TestCase):
fake.NFS_SHARE)
self.assertEqual(expected, result)
def test_delete_cgsnapshot(self):
mock_delete_file = self.mock_object(self.driver, '_delete_file')
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(
fake.CG_CONTEXT, fake.CG_SNAPSHOT, [fake.SNAPSHOT]))
mock_delete_file.assert_called_once_with(
fake.SNAPSHOT['volume_id'], fake.SNAPSHOT['name'])
self.assertIsNone(model_update)
self.assertIsNone(snapshots_model_update)
def test_get_snapshot_backing_flexvol_names(self):
snapshots = [
{'volume': {'host': 'hostA@192.168.99.25#/fake/volume1'}},
{'volume': {'host': 'hostA@192.168.1.01#/fake/volume2'}},
{'volume': {'host': 'hostA@192.168.99.25#/fake/volume3'}},
{'volume': {'host': 'hostA@192.168.99.25#/fake/volume1'}},
]
hosts = [snap['volume']['host'] for snap in snapshots]
flexvols = self.driver._get_backing_flexvol_names(hosts)
self.assertEqual(3, len(flexvols))
self.assertIn('volume1', flexvols)
self.assertIn('volume2', flexvols)
self.assertIn('volume3', flexvols)

View File

@ -69,6 +69,9 @@ class NetAppNfsDriverTestCase(test.TestCase):
self.driver = nfs_base.NetAppNfsDriver(**kwargs)
self.driver.db = mock.Mock()
self.driver.zapi_client = mock.Mock()
self.zapi_client = self.driver.zapi_client
@mock.patch.object(nfs.NfsDriver, 'do_setup')
@mock.patch.object(na_utils, 'check_flags')
def test_do_setup(self, mock_check_flags, mock_super_do_setup):
@ -125,7 +128,6 @@ class NetAppNfsDriverTestCase(test.TestCase):
def test_get_capacity_info_ipv4_share(self):
expected = fake.CAPACITY_VALUES
self.driver.zapi_client = mock.Mock()
get_capacity = self.driver.zapi_client.get_flexvol_capacity
get_capacity.return_value = fake.CAPACITIES
@ -137,7 +139,6 @@ class NetAppNfsDriverTestCase(test.TestCase):
def test_get_capacity_info_ipv6_share(self):
expected = fake.CAPACITY_VALUES
self.driver.zapi_client = mock.Mock()
get_capacity = self.driver.zapi_client.get_flexvol_capacity
get_capacity.return_value = fake.CAPACITIES
@ -322,8 +323,7 @@ class NetAppNfsDriverTestCase(test.TestCase):
fake.SNAPSHOT['volume_name'], fake.SNAPSHOT['name'],
fake.SNAPSHOT['volume_id'], is_snapshot=True)
@ddt.data(True, False)
def test_delete_snapshot(self, volume_present):
def test_delete_snapshot(self):
updates = {
'name': fake.SNAPSHOT_NAME,
'volume_size': fake.SIZE,
@ -332,24 +332,12 @@ class NetAppNfsDriverTestCase(test.TestCase):
'busy': False,
}
snapshot = fake_snapshot.fake_snapshot_obj(self.ctxt, **updates)
self.mock_object(self.driver, '_get_provider_location',
mock.Mock(return_value=fake.SNAPSHOT_MOUNT))
self.mock_object(self.driver, '_volume_not_present',
mock.Mock(return_value=volume_present))
self.mock_object(self.driver, '_execute')
self.mock_object(self.driver, '_get_volume_path',
mock.Mock(return_value='fake'))
self.driver._execute_as_root = True
self.mock_object(self.driver, '_delete_file')
retval = self.driver.delete_snapshot(snapshot)
self.driver.delete_snapshot(snapshot)
if volume_present:
self.assertTrue(retval)
self.driver._execute.assert_not_called()
else:
self.assertIsNone(retval)
self.driver._execute.assert_called_once_with(
'rm', 'fake', run_as_root=True)
self.driver._delete_file.assert_called_once_with(snapshot.volume_id,
snapshot.name)
def test__get_volume_location(self):
volume_id = fake.VOLUME_ID
@ -931,3 +919,156 @@ class NetAppNfsDriverTestCase(test.TestCase):
self.driver.manage_existing_get_size,
volume,
vol_ref)
def test_create_consistency_group(self):
model_update = self.driver.create_consistencygroup(
fake.CG_CONTEXT, fake.CONSISTENCY_GROUP)
self.assertEqual('available', model_update['status'])
@ddt.data(True, False)
def test_delete_file(self, volume_not_present):
mock_get_provider_location = self.mock_object(
self.driver, '_get_provider_location')
mock_get_provider_location.return_value = fake.NFS_SHARE
mock_volume_not_present = self.mock_object(
self.driver, '_volume_not_present')
mock_volume_not_present.return_value = volume_not_present
mock_get_volume_path = self.mock_object(
self.driver, '_get_volume_path')
mock_get_volume_path.return_value = fake.PATH
mock_delete = self.mock_object(self.driver, '_delete')
self.driver._delete_file(fake.CG_VOLUME_ID, fake.CG_VOLUME_NAME)
mock_get_provider_location.assert_called_once_with(fake.CG_VOLUME_ID)
mock_volume_not_present.assert_called_once_with(
fake.NFS_SHARE, fake.CG_VOLUME_NAME)
if not volume_not_present:
mock_get_volume_path.assert_called_once_with(
fake.NFS_SHARE, fake.CG_VOLUME_NAME)
mock_delete.assert_called_once_with(fake.PATH)
def test_delete_file_volume_not_present(self):
mock_get_provider_location = self.mock_object(
self.driver, '_get_provider_location')
mock_get_provider_location.return_value = fake.NFS_SHARE
mock_volume_not_present = self.mock_object(
self.driver, '_volume_not_present')
mock_volume_not_present.return_value = True
mock_get_volume_path = self.mock_object(
self.driver, '_get_volume_path')
mock_delete = self.mock_object(self.driver, '_delete')
self.driver._delete_file(fake.CG_VOLUME_ID, fake.CG_VOLUME_NAME)
mock_get_provider_location.assert_called_once_with(fake.CG_VOLUME_ID)
mock_volume_not_present.assert_called_once_with(
fake.NFS_SHARE, fake.CG_VOLUME_NAME)
mock_get_volume_path.assert_not_called()
mock_delete.assert_not_called()
def test_update_consistencygroup(self):
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_consistencygroup(fake.CG_CONTEXT, "foo"))
self.assertIsNone(add_volumes_update)
self.assertIsNone(remove_volumes_update)
def test_create_consistencygroup_from_src(self):
mock_create_volume_from_snapshot = self.mock_object(
self.driver, 'create_volume_from_snapshot')
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
fake.CG_CONTEXT, fake.CONSISTENCY_GROUP, [fake.VOLUME],
cgsnapshot=fake.CG_SNAPSHOT, snapshots=[fake.SNAPSHOT]))
mock_create_volume_from_snapshot.assert_called_once_with(
fake.VOLUME, fake.SNAPSHOT)
self.assertIsNone(model_update)
self.assertIsNone(volumes_model_update)
def test_create_consistencygroup_from_src_source_vols(self):
mock_get_snapshot_flexvols = self.mock_object(
self.driver, '_get_backing_flexvol_names')
mock_get_snapshot_flexvols.return_value = (set([fake.CG_POOL_NAME]))
mock_clone_backing_file = self.mock_object(
self.driver, '_clone_backing_file_for_volume')
fake_snapshot_name = 'snapshot-temp-' + fake.CONSISTENCY_GROUP['id']
mock_busy = self.mock_object(
self.driver.zapi_client, 'wait_for_busy_snapshot')
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
fake.CG_CONTEXT, fake.CONSISTENCY_GROUP, [fake.VOLUME],
source_cg=fake.CONSISTENCY_GROUP,
source_vols=[fake.CG_VOLUME]))
mock_get_snapshot_flexvols.assert_called_once_with(
[fake.CG_VOLUME['host']])
self.driver.zapi_client.create_cg_snapshot.assert_called_once_with(
set([fake.CG_POOL_NAME]), fake_snapshot_name)
mock_clone_backing_file.assert_called_once_with(
fake.CG_VOLUME['name'], fake.VOLUME['name'], fake.CG_VOLUME['id'],
source_snapshot=fake_snapshot_name)
mock_busy.assert_called_once_with(
fake.CG_POOL_NAME, fake_snapshot_name)
self.driver.zapi_client.delete_snapshot.assert_called_once_with(
fake.CG_POOL_NAME, fake_snapshot_name)
self.assertIsNone(model_update)
self.assertIsNone(volumes_model_update)
def test_create_consistencygroup_from_src_invalid_parms(self):
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
fake.CG_CONTEXT, fake.CONSISTENCY_GROUP, [fake.VOLUME]))
self.assertIn('error', model_update['status'])
def test_create_cgsnapshot(self):
snapshot = fake.CG_SNAPSHOT
snapshot['volume'] = fake.CG_VOLUME
mock_get_snapshot_flexvols = self.mock_object(
self.driver, '_get_backing_flexvol_names')
mock_get_snapshot_flexvols.return_value = (set([fake.CG_POOL_NAME]))
mock_clone_backing_file = self.mock_object(
self.driver, '_clone_backing_file_for_volume')
mock_busy = self.mock_object(
self.driver.zapi_client, 'wait_for_busy_snapshot')
self.driver.create_cgsnapshot(
fake.CG_CONTEXT, fake.CG_SNAPSHOT, [snapshot])
mock_get_snapshot_flexvols.assert_called_once_with(
[snapshot['volume']['host']])
self.driver.zapi_client.create_cg_snapshot.assert_called_once_with(
set([fake.CG_POOL_NAME]), fake.CG_SNAPSHOT_ID)
mock_clone_backing_file.assert_called_once_with(
snapshot['volume']['name'], snapshot['name'],
snapshot['volume']['id'], source_snapshot=fake.CG_SNAPSHOT_ID)
mock_busy.assert_called_once_with(
fake.CG_POOL_NAME, fake.CG_SNAPSHOT_ID)
self.driver.zapi_client.delete_snapshot.assert_called_once_with(
fake.CG_POOL_NAME, fake.CG_SNAPSHOT_ID)
def test_delete_consistencygroup_volume_delete_failure(self):
self.mock_object(self.driver, '_delete_file',
mock.Mock(side_effect=Exception))
model_update, volumes = self.driver.delete_consistencygroup(
fake.CG_CONTEXT, fake.CONSISTENCY_GROUP, [fake.CG_VOLUME])
self.assertEqual('deleted', model_update['status'])
self.assertEqual('error_deleting', volumes[0]['status'])
def test_delete_consistencygroup(self):
mock_delete_file = self.mock_object(
self.driver, '_delete_file')
model_update, volumes = self.driver.delete_consistencygroup(
fake.CG_CONTEXT, fake.CONSISTENCY_GROUP, [fake.CG_VOLUME])
self.assertEqual('deleted', model_update['status'])
self.assertEqual('deleted', volumes[0]['status'])
mock_delete_file.assert_called_once_with(
fake.CG_VOLUME_ID, fake.CG_VOLUME_NAME)

View File

@ -158,6 +158,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
'netapp_aggregate': 'aggr1',
'netapp_raid_type': 'raid_dp',
'netapp_disk_type': 'SSD',
'consistencygroup_support': True,
},
}
mock_get_ssc = self.mock_object(self.driver.ssc_library,
@ -221,6 +222,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
'netapp_aggregate': 'aggr1',
'netapp_raid_type': 'raid_dp',
'netapp_disk_type': 'SSD',
'consistencygroup_support': True,
}]
self.assertEqual(expected, result)
@ -471,22 +473,21 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
assert_called_once_with('fake_qos_policy_group_info'))
def test_delete_backing_file_for_volume(self):
mock_filer_delete = self.mock_object(self.driver,
'_delete_volume_on_filer')
mock_filer_delete = self.mock_object(self.driver, '_delete_file')
mock_super_delete = self.mock_object(nfs_base.NetAppNfsDriver,
'delete_volume')
self.driver._delete_backing_file_for_volume(fake.NFS_VOLUME)
mock_filer_delete.assert_called_once_with(fake.NFS_VOLUME)
mock_filer_delete.assert_called_once_with(
fake.NFS_VOLUME['id'], fake.NFS_VOLUME['name'])
self.assertEqual(0, mock_super_delete.call_count)
@ddt.data(True, False)
def test_delete_backing_file_for_volume_exception_path(self, super_exc):
mock_exception_log = self.mock_object(nfs_cmode.LOG, 'exception')
exception_call_count = 2 if super_exc else 1
mock_filer_delete = self.mock_object(self.driver,
'_delete_volume_on_filer')
mock_filer_delete = self.mock_object(self.driver, '_delete_file')
mock_filer_delete.side_effect = [Exception]
mock_super_delete = self.mock_object(nfs_base.NetAppNfsDriver,
'delete_volume')
@ -495,20 +496,11 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
self.driver._delete_backing_file_for_volume(fake.NFS_VOLUME)
mock_filer_delete.assert_called_once_with(fake.NFS_VOLUME)
mock_filer_delete.assert_called_once_with(
fake.NFS_VOLUME['id'], fake.NFS_VOLUME['name'])
mock_super_delete.assert_called_once_with(fake.NFS_VOLUME)
self.assertEqual(exception_call_count, mock_exception_log.call_count)
def test_delete_volume_on_filer(self):
mock_get_vs_ip = self.mock_object(self.driver, '_get_export_ip_path')
mock_get_vs_ip.return_value = (fake.VSERVER_NAME, '/%s' % fake.FLEXVOL)
mock_zapi_delete = self.driver.zapi_client.delete_file
self.driver._delete_volume_on_filer(fake.NFS_VOLUME)
mock_zapi_delete.assert_called_once_with(
'/vol/%s/%s' % (fake.FLEXVOL, fake.NFS_VOLUME['name']))
def test_delete_snapshot(self):
mock_get_location = self.mock_object(self.driver,
'_get_provider_location')
@ -521,22 +513,21 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mock_delete_backing.assert_called_once_with(fake.test_snapshot)
def test_delete_backing_file_for_snapshot(self):
mock_filer_delete = self.mock_object(
self.driver, '_delete_snapshot_on_filer')
mock_filer_delete = self.mock_object(self.driver, '_delete_file')
mock_super_delete = self.mock_object(nfs_base.NetAppNfsDriver,
'delete_snapshot')
self.driver._delete_backing_file_for_snapshot(fake.test_snapshot)
mock_filer_delete.assert_called_once_with(fake.test_snapshot)
mock_filer_delete.assert_called_once_with(
fake.test_snapshot['volume_id'], fake.test_snapshot['name'])
self.assertEqual(0, mock_super_delete.call_count)
@ddt.data(True, False)
def test_delete_backing_file_for_snapshot_exception_path(self, super_exc):
mock_exception_log = self.mock_object(nfs_cmode.LOG, 'exception')
exception_call_count = 2 if super_exc else 1
mock_filer_delete = self.mock_object(
self.driver, '_delete_snapshot_on_filer')
mock_filer_delete = self.mock_object(self.driver, '_delete_file')
mock_filer_delete.side_effect = [Exception]
mock_super_delete = self.mock_object(nfs_base.NetAppNfsDriver,
'delete_snapshot')
@ -545,16 +536,18 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
self.driver._delete_backing_file_for_snapshot(fake.test_snapshot)
mock_filer_delete.assert_called_once_with(fake.test_snapshot)
mock_filer_delete.assert_called_once_with(
fake.test_snapshot['volume_id'], fake.test_snapshot['name'])
mock_super_delete.assert_called_once_with(fake.test_snapshot)
self.assertEqual(exception_call_count, mock_exception_log.call_count)
def test_delete_snapshot_on_filer(self):
def test_delete_file(self):
mock_get_vs_ip = self.mock_object(self.driver, '_get_export_ip_path')
mock_get_vs_ip.return_value = (fake.VSERVER_NAME, '/%s' % fake.FLEXVOL)
mock_zapi_delete = self.driver.zapi_client.delete_file
self.driver._delete_snapshot_on_filer(fake.test_snapshot)
self.driver._delete_file(
fake.test_snapshot['volume_id'], fake.test_snapshot['name'])
mock_zapi_delete.assert_called_once_with(
'/vol/%s/%s' % (fake.FLEXVOL, fake.test_snapshot['name']))
@ -1341,3 +1334,42 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
self.assertEqual('dev1', self.driver.failed_over_backend_name)
self.assertEqual('dev1', actual_active)
self.assertEqual([], vol_updates)
def test_delete_cgsnapshot(self):
mock_delete_backing_file = self.mock_object(
self.driver, '_delete_backing_file_for_snapshot')
snapshots = [fake.CG_SNAPSHOT]
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(
fake.CG_CONTEXT, fake.CG_SNAPSHOT, snapshots))
mock_delete_backing_file.assert_called_once_with(fake.CG_SNAPSHOT)
self.assertIsNone(model_update)
self.assertIsNone(snapshots_model_update)
def test_get_snapshot_backing_flexvol_names(self):
snapshots = [
{'volume': {'host': 'hostA@192.168.99.25#/fake/volume1'}},
{'volume': {'host': 'hostA@192.168.1.01#/fake/volume2'}},
{'volume': {'host': 'hostA@192.168.99.25#/fake/volume3'}},
{'volume': {'host': 'hostA@192.168.99.25#/fake/volume1'}},
]
ssc = {
'volume1': {'pool_name': '/fake/volume1', },
'volume2': {'pool_name': '/fake/volume2', },
'volume3': {'pool_name': '/fake/volume3', },
}
mock_get_ssc = self.mock_object(self.driver.ssc_library, 'get_ssc')
mock_get_ssc.return_value = ssc
hosts = [snap['volume']['host'] for snap in snapshots]
flexvols = self.driver._get_backing_flexvol_names(hosts)
mock_get_ssc.assert_called_once_with()
self.assertEqual(3, len(flexvols))
self.assertIn('volume1', flexvols)
self.assertIn('volume2', flexvols)
self.assertIn('volume3', flexvols)

View File

@ -1081,32 +1081,11 @@ class NetAppBlockStorageLibrary(object):
source_snapshot=cgsnapshot['id'])
for flexvol in flexvols:
self._handle_busy_snapshot(flexvol, cgsnapshot['id'])
self.zapi_client.wait_for_busy_snapshot(flexvol, cgsnapshot['id'])
self.zapi_client.delete_snapshot(flexvol, cgsnapshot['id'])
return None, None
@utils.retry(exception.SnapshotIsBusy)
def _handle_busy_snapshot(self, flexvol, snapshot_name):
"""Checks for and handles a busy snapshot.
If a snapshot is not busy, take no action. If a snapshot is busy for
reasons other than a clone dependency, raise immediately. Otherwise,
since we always start a clone split operation after cloning a share,
wait up to a minute for a clone dependency to clear before giving up.
"""
snapshot = self.zapi_client.get_snapshot(flexvol, snapshot_name)
if not snapshot['busy']:
LOG.info(_LI("Backing consistency group snapshot %s "
"available for deletion"), snapshot_name)
return
else:
LOG.debug('Snapshot %(snap)s for vol %(vol)s is busy, waiting '
'for volume clone dependency to clear.',
{'snap': snapshot_name, 'vol': flexvol})
raise exception.SnapshotIsBusy(snapshot_name=snapshot_name)
def delete_cgsnapshot(self, cgsnapshot, snapshots):
"""Delete LUNs backing each snapshot in the cgsnapshot.

View File

@ -326,14 +326,19 @@ class Client(client_base.Client):
raise exception.NotFound(_('No storage path found for export path %s')
% (export_path))
def clone_file(self, src_path, dest_path):
def clone_file(self, src_path, dest_path, source_snapshot=None):
LOG.debug("Cloning with src %(src_path)s, dest %(dest_path)s",
{'src_path': src_path, 'dest_path': dest_path})
zapi_args = {
'source-path': src_path,
'destination-path': dest_path,
'no-snap': 'true',
}
if source_snapshot:
zapi_args['snapshot-name'] = source_snapshot
clone_start = netapp_api.NaElement.create_node_with_children(
'clone-start',
**{'source-path': src_path,
'destination-path': dest_path,
'no-snap': 'true'})
'clone-start', **zapi_args)
result = self.connection.invoke_successfully(clone_start,
enable_tunneling=True)
clone_id_el = result.get_child_by_name('clone-id')

View File

@ -433,3 +433,27 @@ class Client(object):
def _commit_cg_snapshot(self, cg_id):
snapshot_commit = {'cg-id': cg_id}
self.send_request('cg-commit', snapshot_commit)
def get_snapshot(self, volume_name, snapshot_name):
"""Gets a single snapshot."""
raise NotImplementedError()
@utils.retry(exception.SnapshotIsBusy)
def wait_for_busy_snapshot(self, flexvol, snapshot_name):
"""Checks for and handles a busy snapshot.
If a snapshot is busy, for reasons other than cloning, an exception is
raised immediately. Otherwise, wait for a period of time for the clone
dependency to finish before giving up. If the snapshot is not busy then
no action is taken and the method exits.
"""
snapshot = self.get_snapshot(flexvol, snapshot_name)
if not snapshot['busy']:
LOG.debug("Backing consistency group snapshot %s available for "
"deletion.", snapshot_name)
return
else:
LOG.debug("Snapshot %(snap)s for vol %(vol)s is busy, waiting "
"for volume clone dependency to clear.",
{"snap": snapshot_name, "vol": flexvol})
raise exception.SnapshotIsBusy(snapshot_name=snapshot_name)

View File

@ -633,13 +633,15 @@ class Client(client_base.Client):
"%(junction)s ") % msg_fmt)
def clone_file(self, flex_vol, src_path, dest_path, vserver,
dest_exists=False, is_snapshot=False):
dest_exists=False, source_snapshot=None,
is_snapshot=False):
"""Clones file on vserver."""
LOG.debug("Cloning with params volume %(volume)s, src %(src_path)s, "
"dest %(dest_path)s, vserver %(vserver)s",
"dest %(dest_path)s, vserver %(vserver)s,"
"source_snapshot %(source_snapshot)s",
{'volume': flex_vol, 'src_path': src_path,
'dest_path': dest_path, 'vserver': vserver})
'dest_path': dest_path, 'vserver': vserver,
'source_snapshot': source_snapshot})
zapi_args = {
'volume': flex_vol,
'source-path': src_path,
@ -647,9 +649,10 @@ class Client(client_base.Client):
}
if is_snapshot and self.features.BACKUP_CLONE_PARAM:
zapi_args['is-backup'] = 'true'
if source_snapshot:
zapi_args['snapshot-name'] = source_snapshot
clone_create = netapp_api.NaElement.create_node_with_children(
'clone-create', **zapi_args)
major, minor = self.connection.get_api_version()
if major == 1 and minor >= 20 and dest_exists:
clone_create.add_new_child('destination-exists', 'true')

View File

@ -35,6 +35,7 @@ from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp.dataontap.performance import perf_7mode
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
@ -80,17 +81,17 @@ class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver):
def _clone_backing_file_for_volume(self, volume_name, clone_name,
volume_id, share=None,
is_snapshot=False):
is_snapshot=False,
source_snapshot=None):
"""Clone backing file for Cinder volume.
:param: is_snapshot Not used, present for method signature consistency
"""
(_host_ip, export_path) = self._get_export_ip_path(volume_id, share)
storage_path = self.zapi_client.get_actual_path_for_export(export_path)
target_path = '%s/%s' % (storage_path, clone_name)
self.zapi_client.clone_file('%s/%s' % (storage_path, volume_name),
target_path)
target_path, source_snapshot)
def _update_volume_stats(self):
"""Retrieve stats info from vserver."""
@ -138,6 +139,7 @@ class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver):
pool['utilization'] = na_utils.round_down(utilization, '0.01')
pool['filter_function'] = filter_function
pool['goodness_function'] = goodness_function
pool['consistencygroup_support'] = True
pools.append(pool)
@ -216,3 +218,25 @@ class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver):
"""Set QoS policy on backend from volume type information."""
# 7-mode DOT does not support QoS.
return
def _get_backing_flexvol_names(self, hosts):
"""Returns a set of flexvol names."""
flexvols = set()
for host in hosts:
pool_name = volume_utils.extract_host(host, level='pool')
flexvol_name = pool_name.rsplit('/', 1)[1]
flexvols.add(flexvol_name)
return flexvols
@utils.trace_method
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Delete files backing each snapshot in the cgsnapshot.
:return: An implicit update of snapshot models that the manager will
interpret and subsequently set the model state to deleted.
"""
for snapshot in snapshots:
self._delete_file(snapshot['volume_id'], snapshot['name'])
LOG.debug("Snapshot %s deletion successful", snapshot['name'])
return None, None

View File

@ -235,13 +235,19 @@ class NetAppNfsDriver(driver.ManageableVD,
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
nfs_mount = self._get_provider_location(snapshot.volume_id)
self._delete_file(snapshot.volume_id, snapshot.name)
if self._volume_not_present(nfs_mount, snapshot.name):
return True
def _delete_file(self, file_id, file_name):
nfs_share = self._get_provider_location(file_id)
self._execute('rm', self._get_volume_path(nfs_mount, snapshot.name),
run_as_root=self._execute_as_root)
if self._volume_not_present(nfs_share, file_name):
LOG.debug('File %(file_name)s not found when attempting to delete '
'from share %(share)s',
{'file_name': file_name, 'share': nfs_share})
return
path = self._get_volume_path(nfs_share, file_name)
self._delete(path)
def _get_volume_location(self, volume_id):
"""Returns NFS mount address as <nfs_ip_address>:<nfs_mount_dir>."""
@ -251,10 +257,15 @@ class NetAppNfsDriver(driver.ManageableVD,
def _clone_backing_file_for_volume(self, volume_name, clone_name,
volume_id, share=None,
is_snapshot=False):
is_snapshot=False,
source_snapshot=None):
"""Clone backing file for Cinder volume."""
raise NotImplementedError()
def _get_backing_flexvol_names(self, hosts):
"""Returns a set of flexvol names."""
raise NotImplementedError()
def _get_provider_location(self, volume_id):
"""Returns provider location for given volume."""
volume = self.db.volume_get(self._context, volume_id)
@ -1001,3 +1012,139 @@ class NetAppNfsDriver(driver.ManageableVD,
vol_path = os.path.join(volume['provider_location'], vol_str)
LOG.info(_LI("Cinder NFS volume with current path \"%(cr)s\" is "
"no longer being managed."), {'cr': vol_path})
@utils.trace_method
def create_consistencygroup(self, context, group):
"""Driver entry point for creating a consistency group.
ONTAP does not maintain an actual CG construct. As a result, no
communtication to the backend is necessary for consistency group
creation.
:return: Hard-coded model update for consistency group model.
"""
model_update = {'status': 'available'}
return model_update
@utils.trace_method
def delete_consistencygroup(self, context, group, volumes):
"""Driver entry point for deleting a consistency group.
:return: Updated consistency group model and list of volume models
for the volumes that were deleted.
"""
model_update = {'status': 'deleted'}
volumes_model_update = []
for volume in volumes:
try:
self._delete_file(volume['id'], volume['name'])
volumes_model_update.append(
{'id': volume['id'], 'status': 'deleted'})
except Exception:
volumes_model_update.append(
{'id': volume['id'], 'status': 'error_deleting'})
LOG.exception(_LE("Volume %(vol)s in the consistency group "
"could not be deleted."), {'vol': volume})
return model_update, volumes_model_update
@utils.trace_method
def update_consistencygroup(self, context, group, add_volumes=None,
remove_volumes=None):
"""Driver entry point for updating a consistency group.
Since no actual CG construct is ever created in ONTAP, it is not
necessary to update any metadata on the backend. Since this is a NO-OP,
there is guaranteed to be no change in any of the volumes' statuses.
"""
return None, None, None
@utils.trace_method
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Creates a Cinder cgsnapshot object.
The Cinder cgsnapshot object is created by making use of an ONTAP CG
snapshot in order to provide write-order consistency for a set of
backing flexvols. First, a list of the flexvols backing the given
Cinder volumes in the CG is determined. An ONTAP CG snapshot of the
flexvols creates a write-order consistent snapshot of each backing
flexvol. For each Cinder volume in the CG, it is then necessary to
clone its volume from the ONTAP CG snapshot. The naming convention
used to create the clones indicates the clone's role as a Cinder
snapshot and its inclusion in a Cinder CG snapshot. The ONTAP CG
snapshots, of each backing flexvol, are deleted after the cloning
operation is completed.
:return: An implicit update for the cgsnapshot and snapshot models that
is then used by the manager to set the models to available.
"""
hosts = [snapshot['volume']['host'] for snapshot in snapshots]
flexvols = self._get_backing_flexvol_names(hosts)
# Create snapshot for backing flexvol
self.zapi_client.create_cg_snapshot(flexvols, cgsnapshot['id'])
# Start clone process for snapshot files
for snapshot in snapshots:
self._clone_backing_file_for_volume(
snapshot['volume']['name'], snapshot['name'],
snapshot['volume']['id'], source_snapshot=cgsnapshot['id'])
# Delete backing flexvol snapshots
for flexvol_name in flexvols:
self.zapi_client.wait_for_busy_snapshot(
flexvol_name, cgsnapshot['id'])
self.zapi_client.delete_snapshot(flexvol_name, cgsnapshot['id'])
return None, None
@utils.trace_method
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Delete files backing each snapshot in the cgsnapshot."""
raise NotImplementedError()
@utils.trace_method
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
"""Creates a CG from a either a cgsnapshot or group of cinder vols.
:return: An implicit update for the volumes model that is
interpreted by the manager as a successful operation.
"""
LOG.debug("VOLUMES %s ", [dict(vol) for vol in volumes])
model_update = None
if cgsnapshot:
vols = zip(volumes, snapshots)
for volume, snapshot in vols:
self.create_volume_from_snapshot(volume, snapshot)
elif source_cg and source_vols:
hosts = [source_vol['host'] for source_vol in source_vols]
flexvols = self._get_backing_flexvol_names(hosts)
# Create snapshot for backing flexvol
snapshot_name = 'snapshot-temp-' + source_cg['id']
self.zapi_client.create_cg_snapshot(flexvols, snapshot_name)
# Start clone process for new volumes
vols = zip(volumes, source_vols)
for volume, source_vol in vols:
self._clone_backing_file_for_volume(
source_vol['name'], volume['name'],
source_vol['id'], source_snapshot=snapshot_name)
# Delete backing flexvol snapshots
for flexvol_name in flexvols:
self.zapi_client.wait_for_busy_snapshot(
flexvol_name, snapshot_name)
self.zapi_client.delete_snapshot(flexvol_name, snapshot_name)
else:
LOG.error(_LE("Unexpected set of parameters received when "
"creating consistency group from source."))
model_update = {}
model_update['status'] = 'error'
return model_update, None

View File

@ -170,7 +170,8 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
def _clone_backing_file_for_volume(self, volume_name, clone_name,
volume_id, share=None,
is_snapshot=False):
is_snapshot=False,
source_snapshot=None):
"""Clone backing file for Cinder volume."""
(vserver, exp_volume) = self._get_vserver_and_exp_vol(volume_id, share)
self.zapi_client.clone_file(exp_volume, volume_name, clone_name,
@ -237,6 +238,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
# Add driver capabilities and config info
pool['QoS_support'] = True
pool['consistencygroup_support'] = True
# Add up-to-date capacity info
nfs_share = ssc_vol_info['pool_name']
@ -366,6 +368,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
return ssc_vol_name
return None
@utils.trace_method
def delete_volume(self, volume):
"""Deletes a logical volume."""
self._delete_backing_file_for_volume(volume)
@ -383,9 +386,9 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
"""Deletes file on nfs share that backs a cinder volume."""
try:
LOG.debug('Deleting backing file for volume %s.', volume['id'])
self._delete_volume_on_filer(volume)
self._delete_file(volume['id'], volume['name'])
except Exception:
LOG.exception(_LE('Could not do delete of volume %s on filer, '
LOG.exception(_LE('Could not delete volume %s on backend, '
'falling back to exec of "rm" command.'),
volume['id'])
try:
@ -394,43 +397,35 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
LOG.exception(_LE('Exec of "rm" command on backing file for '
'%s was unsuccessful.'), volume['id'])
def _delete_volume_on_filer(self, volume):
(_vserver, flexvol) = self._get_export_ip_path(volume_id=volume['id'])
path_on_filer = '/vol' + flexvol + '/' + volume['name']
LOG.debug('Attempting to delete backing file %s for volume %s on '
'filer.', path_on_filer, volume['id'])
self.zapi_client.delete_file(path_on_filer)
def _delete_file(self, file_id, file_name):
(_vserver, flexvol) = self._get_export_ip_path(volume_id=file_id)
path_on_backend = '/vol' + flexvol + '/' + file_name
LOG.debug('Attempting to delete file %(path)s for ID %(file_id)s on '
'backend.', {'path': path_on_backend, 'file_id': file_id})
self.zapi_client.delete_file(path_on_backend)
@utils.trace_method
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
self._delete_backing_file_for_snapshot(snapshot)
@utils.trace_method
def _delete_backing_file_for_snapshot(self, snapshot):
"""Deletes file on nfs share that backs a cinder volume."""
try:
LOG.debug('Deleting backing file for snapshot %s.', snapshot['id'])
self._delete_snapshot_on_filer(snapshot)
self._delete_file(snapshot['volume_id'], snapshot['name'])
except Exception:
LOG.exception(_LE('Could not do delete of snapshot %s on filer, '
LOG.exception(_LE('Could not delete snapshot %s on backend, '
'falling back to exec of "rm" command.'),
snapshot['id'])
try:
# delete_file_from_share
super(NetAppCmodeNfsDriver, self).delete_snapshot(snapshot)
except Exception:
LOG.exception(_LE('Exec of "rm" command on backing file for'
' %s was unsuccessful.'), snapshot['id'])
@utils.trace_method
def _delete_snapshot_on_filer(self, snapshot):
(_vserver, flexvol) = self._get_export_ip_path(
volume_id=snapshot['volume_id'])
path_on_filer = '/vol' + flexvol + '/' + snapshot['name']
LOG.debug('Attempting to delete backing file %s for snapshot %s '
'on filer.', path_on_filer, snapshot['id'])
self.zapi_client.delete_file(path_on_filer)
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
copy_success = False
@ -653,6 +648,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
if os.path.exists(dst_img_local):
self._delete_file_at_path(dst_img_local)
@utils.trace_method
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
@ -678,3 +674,30 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
"""Failover a backend to a secondary replication target."""
return self._failover_host(volumes, secondary_id=secondary_id)
def _get_backing_flexvol_names(self, hosts):
"""Returns a set of flexvol names."""
flexvols = set()
ssc = self.ssc_library.get_ssc()
for host in hosts:
pool_name = volume_utils.extract_host(host, level='pool')
for flexvol_name, ssc_volume_data in ssc.items():
if ssc_volume_data['pool_name'] == pool_name:
flexvols.add(flexvol_name)
return flexvols
@utils.trace_method
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Delete files backing each snapshot in the cgsnapshot.
:return: An implicit update of snapshot models that the manager will
interpret and subsequently set the model state to deleted.
"""
for snapshot in snapshots:
self._delete_backing_file_for_snapshot(snapshot)
LOG.debug("Snapshot %s deletion successful", snapshot['name'])
return None, None

View File

@ -0,0 +1,3 @@
---
features:
- Added Cinder consistency group for the NetApp NFS driver.