NetApp cDOT: Fix model update for cheesecake volumes

Host level replication capability was added to the NetApp
cDOT Block and File drivers through
I87b92e76d0d5022e9be610b9e237b89417309c05.
However, volumes created on these backends did not
have the 'replication_status' field set.

Fix this by returning model updates to the volume manager.

Change-Id: If1d24e87413e2c829bc5a701293fb3be67412155
Closes-Bug: #1622057
This commit is contained in:
Goutham Pacha Ravi 2016-09-09 21:21:04 -04:00 committed by Goutham Pacha Ravi
parent 27d78f39a0
commit df284e68f9
15 changed files with 261 additions and 64 deletions

View File

@ -141,6 +141,11 @@ class NetAppBlockStorage7modeLibraryTestCase(test.TestCase):
self.assertRaises(exception.VolumeBackendAPIException,
self.library.check_for_setup_error)
def test__get_volume_model_update(self):
"""Driver is not expected to return a model update."""
self.assertIsNone(
self.library._get_volume_model_update(fake.VOLUME_REF))
@ddt.data(None, fake.VFILER)
def test__get_owner(self, vfiler):
self.library.configuration.netapp_server_hostname = 'openstack'

View File

@ -31,9 +31,12 @@ from oslo_log import versionutils
from oslo_utils import units
import six
from cinder import context
from cinder import exception
from cinder.i18n import _LW
from cinder.objects import fields
from cinder import test
from cinder.tests.unit import fake_volume
from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes
from cinder.volume.drivers.netapp.dataontap import block_base
@ -58,6 +61,7 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
self.library.zapi_client = mock.Mock()
self.zapi_client = self.library.zapi_client
self.mock_request = mock.Mock()
self.ctxt = context.RequestContext('fake', 'fake', auth_token=True)
def tearDown(self):
super(NetAppBlockStorageLibraryTestCase, self).tearDown()
@ -130,14 +134,17 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
self.mock_object(self.library, '_create_lun_handle')
self.mock_object(self.library, '_add_lun_to_table')
self.mock_object(self.library, '_mark_qos_policy_group_for_deletion')
self.mock_object(self.library, '_get_volume_model_update')
self.library.create_volume(fake.VOLUME)
self.library._create_lun.assert_called_once_with(
fake.POOL_NAME, fake.LUN_NAME, volume_size_in_bytes,
fake.LUN_METADATA, None)
self.assertEqual(0, self.library.
_mark_qos_policy_group_for_deletion.call_count)
self.library._get_volume_model_update.assert_called_once_with(
fake.VOLUME)
self.assertEqual(
0, self.library. _mark_qos_policy_group_for_deletion.call_count)
self.assertEqual(0, block_base.LOG.error.call_count)
def test_create_volume_no_pool(self):
@ -556,6 +563,72 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
self.library._get_existing_vol_with_manage_ref.assert_called_once_with(
{'ref': 'ref'})
@ddt.data(None,
{'replication_status': fields.ReplicationStatus.ENABLED})
def test_manage_existing_lun_name_matches(self, model_update):
volume = fake_volume.fake_volume_obj(self.ctxt)
existing_ref = {'source-name': 'fake_path'}
mock_lun = block_base.NetAppLun(
volume['name'], volume['name'], '3',
{'UUID': 'fake_uuid', 'Path': 'p'})
self.mock_object(self.library, '_get_existing_vol_with_manage_ref',
mock.Mock(return_value=mock_lun))
self.mock_object(na_utils, 'get_volume_extra_specs', mock.Mock(
return_value=fake.EXTRA_SPECS))
self.mock_object(self.library, '_check_volume_type_for_lun',
mock.Mock(return_value=True))
self.mock_object(self.library, '_setup_qos_for_volume')
self.mock_object(na_utils, 'get_qos_policy_group_name_from_info',
mock.Mock(return_value=None))
self.mock_object(self.library, '_add_lun_to_table')
self.mock_object(self.library, '_get_volume_model_update',
mock.Mock(return_value=model_update))
mock_info_log = self.mock_object(block_base.LOG, 'info')
actual_update = self.library.manage_existing(volume, existing_ref)
self.assertEqual(model_update, actual_update)
self.assertEqual(2, mock_info_log.call_count)
self.library._add_lun_to_table.assert_called_once_with(mock_lun)
@ddt.data(None, 'fake_qos_policy_group_name')
def test_manage_existing_rename_lun(self, qos_policy_group_name):
expected_update = (
{'replication_status': fields.ReplicationStatus.ENABLED})
volume = fake_volume.fake_volume_obj(self.ctxt)
existing_ref = {'source-name': 'fake_path'}
mock_lun = block_base.NetAppLun(
'lun0', 'lun0', '3', {'UUID': 'fake_uuid', 'Path': fake.LUN_PATH})
self.mock_object(self.library, '_get_existing_vol_with_manage_ref',
mock.Mock(return_value=mock_lun))
self.mock_object(na_utils, 'get_volume_extra_specs', mock.Mock(
return_value=fake.EXTRA_SPECS))
self.mock_object(self.library, '_check_volume_type_for_lun',
mock.Mock(return_value=True))
self.mock_object(self.library, '_setup_qos_for_volume')
self.mock_object(na_utils, 'get_qos_policy_group_name_from_info',
mock.Mock(return_value=qos_policy_group_name))
self.mock_object(self.library, '_add_lun_to_table')
self.mock_object(self.library, '_get_volume_model_update',
mock.Mock(return_value=expected_update))
self.mock_object(self.zapi_client, 'set_lun_qos_policy_group')
mock_info_log = self.mock_object(block_base.LOG, 'info')
actual_update = self.library.manage_existing(volume, existing_ref)
expected_new_path = '/vol/vol0/%s' % volume['name']
self.assertEqual(expected_update, actual_update)
self.assertEqual(1, mock_info_log.call_count)
self.library._add_lun_to_table.assert_called_once_with(mock_lun)
if qos_policy_group_name:
(self.zapi_client.set_lun_qos_policy_group.
assert_called_once_with(expected_new_path, qos_policy_group_name))
else:
self.assertFalse(
self.zapi_client.set_lun_qos_policy_group.called)
@mock.patch.object(block_base.LOG, 'info')
def test_unmanage(self, log):
mock_lun = block_base.NetAppLun('handle', 'name', '1',
@ -845,11 +918,14 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
self.mock_object(self.library, '_extend_volume')
self.mock_object(self.library, 'delete_volume')
self.mock_object(self.library, '_mark_qos_policy_group_for_deletion')
self.mock_object(self.library, '_get_volume_model_update',
mock.Mock(return_value={'key': 'value'}))
self.library.lun_space_reservation = 'false'
self.library._clone_source_to_destination(fake.CLONE_SOURCE,
fake.CLONE_DESTINATION)
retval = self.library._clone_source_to_destination(
fake.CLONE_SOURCE, fake.CLONE_DESTINATION)
self.assertEqual({'key': 'value'}, retval)
na_utils.get_volume_extra_specs.assert_called_once_with(
fake.CLONE_DESTINATION)
self.library._setup_qos_for_volume.assert_called_once_with(
@ -1433,6 +1509,8 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
self.library._create_lun.assert_called_once_with(
fake.POOL_NAME, fake.CG_VOLUME_NAME, volume_size_in_bytes,
fake.CG_LUN_METADATA, None)
self.library._get_volume_model_update.assert_called_once_with(
fake.CG_VOLUME)
self.assertEqual(0, self.library.
_mark_qos_policy_group_for_deletion.call_count)
self.assertEqual(0, block_base.LOG.error.call_count)
@ -1449,6 +1527,7 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
self.mock_object(self.library, '_create_lun_handle')
self.mock_object(self.library, '_add_lun_to_table')
self.mock_object(self.library, '_mark_qos_policy_group_for_deletion')
self.mock_object(self.library, '_get_volume_model_update')
def test_create_consistency_group(self):
model_update = self.library.create_consistencygroup(
@ -1482,12 +1561,15 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
self.assertEqual('deleted', model_update['status'])
self.assertEqual('deleted', volumes[0]['status'])
def test_create_consistencygroup_from_src_cg_snapshot(self):
@ddt.data(None,
{'replication_status': fields.ReplicationStatus.ENABLED})
def test_create_consistencygroup_from_src_cg_snapshot(self,
volume_model_update):
mock_clone_source_to_destination = self.mock_object(
self.library, '_clone_source_to_destination')
self.library, '_clone_source_to_destination', mock.Mock(
return_value=volume_model_update))
self.library.create_consistencygroup_from_src(
actual_return_value = self.library.create_consistencygroup_from_src(
fake.CONSISTENCY_GROUP, [fake.VOLUME], cgsnapshot=fake.CG_SNAPSHOT,
snapshots=[fake.CG_VOLUME_SNAPSHOT])
@ -1497,19 +1579,25 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
}
mock_clone_source_to_destination.assert_called_once_with(
clone_source_to_destination_args, fake.VOLUME)
if volume_model_update:
volume_model_update['id'] = fake.VOLUME['id']
expected_return_value = ((None, [volume_model_update])
if volume_model_update else (None, []))
self.assertEqual(expected_return_value, actual_return_value)
def test_create_consistencygroup_from_src_cg(self):
class fake_lun_name(object):
pass
fake_lun_name_instance = fake_lun_name()
fake_lun_name_instance.name = fake.SOURCE_CG_VOLUME['name']
@ddt.data(None,
{'replication_status': fields.ReplicationStatus.ENABLED})
def test_create_consistencygroup_from_src_cg(self, volume_model_update):
lun_name = fake.SOURCE_CG_VOLUME['name']
mock_lun = block_base.NetAppLun(
lun_name, lun_name, '3', {'UUID': 'fake_uuid'})
self.mock_object(self.library, '_get_lun_from_table', mock.Mock(
return_value=fake_lun_name_instance)
)
return_value=mock_lun))
mock_clone_source_to_destination = self.mock_object(
self.library, '_clone_source_to_destination')
self.library, '_clone_source_to_destination',
mock.Mock(return_value=volume_model_update))
self.library.create_consistencygroup_from_src(
actual_return_value = self.library.create_consistencygroup_from_src(
fake.CONSISTENCY_GROUP, [fake.VOLUME],
source_cg=fake.SOURCE_CONSISTENCY_GROUP,
source_vols=[fake.SOURCE_CG_VOLUME])
@ -1518,8 +1606,13 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
'name': fake.SOURCE_CG_VOLUME['name'],
'size': fake.SOURCE_CG_VOLUME['size'],
}
if volume_model_update:
volume_model_update['id'] = fake.VOLUME['id']
expected_return_value = ((None, [volume_model_update])
if volume_model_update else (None, []))
mock_clone_source_to_destination.assert_called_once_with(
clone_source_to_destination_args, fake.VOLUME)
self.assertEqual(expected_return_value, actual_return_value)
def test_add_looping_tasks(self):
mock_add_task = self.mock_object(self.library.loopingcalls, 'add_task')

View File

@ -176,6 +176,11 @@ class NetApp7modeNfsDriverTestCase(test.TestCase):
self.assertEqual(expected, result)
def test__get_volume_model_update(self):
"""Driver is not expected to return a model update."""
self.assertIsNone(
self.driver._get_volume_model_update(fake.VOLUME_REF))
def test_delete_cgsnapshot(self):
mock_delete_file = self.mock_object(self.driver, '_delete_file')

View File

@ -30,6 +30,7 @@ import shutil
from cinder import context
from cinder import exception
from cinder.objects import fields
from cinder import test
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
@ -126,16 +127,22 @@ class NetAppNfsDriverTestCase(test.TestCase):
self.assertEqual('fake-share', pool)
def test_create_volume(self):
@ddt.data(None,
{'replication_status': fields.ReplicationStatus.ENABLED})
def test_create_volume(self, model_update):
self.mock_object(self.driver, '_ensure_shares_mounted')
self.mock_object(na_utils, 'get_volume_extra_specs')
self.mock_object(self.driver, '_do_create_volume')
self.mock_object(self.driver, '_do_qos_for_volume')
self.mock_object(self.driver, '_get_volume_model_update',
mock.Mock(return_value=model_update))
expected = {'provider_location': fake.NFS_SHARE}
if model_update:
expected.update(model_update)
result = self.driver.create_volume(fake.NFS_VOLUME)
actual = self.driver.create_volume(fake.NFS_VOLUME)
self.assertEqual(expected, result)
self.assertEqual(expected, actual)
def test_create_volume_no_pool(self):
volume = copy.deepcopy(fake.NFS_VOLUME)
@ -156,7 +163,8 @@ class NetAppNfsDriverTestCase(test.TestCase):
self.driver.create_volume,
fake.NFS_VOLUME)
def test_clone_source_to_destination_volume(self):
@ddt.data(None, {'key': 'value'})
def test_clone_source_to_destination_volume(self, model_update):
self.mock_object(self.driver, '_get_volume_location', mock.Mock(
return_value=fake.POOL_NAME))
self.mock_object(na_utils, 'get_volume_extra_specs', mock.Mock(
@ -165,7 +173,11 @@ class NetAppNfsDriverTestCase(test.TestCase):
self.driver,
'_clone_with_extension_check')
self.mock_object(self.driver, '_do_qos_for_volume')
self.mock_object(self.driver, '_get_volume_model_update',
mock.Mock(return_value=model_update))
expected = {'provider_location': fake.POOL_NAME}
if model_update:
expected.update(model_update)
result = self.driver._clone_source_to_destination_volume(
fake.CLONE_SOURCE, fake.CLONE_DESTINATION)
@ -783,7 +795,9 @@ class NetAppNfsDriverTestCase(test.TestCase):
self.driver._get_share_mount_and_vol_from_vol_ref,
vol_ref)
def test_manage_existing(self):
@ddt.data(None,
{'replication_status': fields.ReplicationStatus.ENABLED})
def test_manage_existing(self, model_update):
self.mock_object(utils, 'get_file_size',
mock.Mock(return_value=1074253824))
self.driver._mounted_shares = [self.fake_nfs_export_1]
@ -803,10 +817,18 @@ class NetAppNfsDriverTestCase(test.TestCase):
mock_get_specs = self.mock_object(na_utils, 'get_volume_extra_specs')
mock_get_specs.return_value = {}
self.mock_object(self.driver, '_do_qos_for_volume')
self.mock_object(self.driver, '_get_volume_model_update', mock.Mock(
return_value=model_update))
location = self.driver.manage_existing(volume, vol_ref)
actual_model_update = self.driver.manage_existing(volume, vol_ref)
self.assertEqual(self.fake_nfs_export_1, location['provider_location'])
self.assertEqual(
self.fake_nfs_export_1, actual_model_update['provider_location'])
if model_update:
self.assertEqual(model_update['replication_status'],
actual_model_update['replication_status'])
else:
self.assertFalse('replication_status' in actual_model_update)
self.driver._check_volume_type.assert_called_once_with(
volume, self.fake_nfs_export_1, test_file, {})
@ -957,28 +979,32 @@ class NetAppNfsDriverTestCase(test.TestCase):
self.assertIsNone(add_volumes_update)
self.assertIsNone(remove_volumes_update)
def test_create_consistencygroup_from_src(self):
@ddt.data(None,
{'replication_status': fields.ReplicationStatus.ENABLED})
def test_create_consistencygroup_from_src(self, volume_model_update):
volume_model_update = volume_model_update or {}
volume_model_update.update(
{'provider_location': fake.PROVIDER_LOCATION})
mock_create_volume_from_snapshot = self.mock_object(
self.driver, 'create_volume_from_snapshot',
mock.Mock(return_value={
'provider_location': fake.PROVIDER_LOCATION
}))
self.driver, 'create_volume_from_snapshot', mock.Mock(
return_value=volume_model_update))
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
fake.CG_CONTEXT, fake.CONSISTENCY_GROUP, [fake.VOLUME],
cgsnapshot=fake.CG_SNAPSHOT, snapshots=[fake.SNAPSHOT]))
expected_volumes_model_updates = [{'id': fake.VOLUME['id']}]
expected_volumes_model_updates[0].update(volume_model_update)
mock_create_volume_from_snapshot.assert_called_once_with(
fake.VOLUME, fake.SNAPSHOT)
self.assertIsNone(model_update)
expected_update = [{
'id': fake.VOLUME['id'],
'provider_location': fake.PROVIDER_LOCATION,
}]
self.assertEqual(expected_update, volumes_model_update)
self.assertEqual(expected_volumes_model_updates, volumes_model_update)
def test_create_consistencygroup_from_src_source_vols(self):
@ddt.data(None,
{'replication_status': fields.ReplicationStatus.ENABLED})
def test_create_consistencygroup_from_src_source_vols(
self, volume_model_update):
mock_get_snapshot_flexvols = self.mock_object(
self.driver, '_get_flexvol_names_from_hosts')
mock_get_snapshot_flexvols.return_value = (set([fake.CG_POOL_NAME]))
@ -987,6 +1013,8 @@ class NetAppNfsDriverTestCase(test.TestCase):
fake_snapshot_name = 'snapshot-temp-' + fake.CONSISTENCY_GROUP['id']
mock_busy = self.mock_object(
self.driver.zapi_client, 'wait_for_busy_snapshot')
self.mock_object(self.driver, '_get_volume_model_update',
mock.Mock(return_value=volume_model_update))
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
@ -994,6 +1022,12 @@ class NetAppNfsDriverTestCase(test.TestCase):
source_cg=fake.CONSISTENCY_GROUP,
source_vols=[fake.NFS_VOLUME]))
expected_volumes_model_updates = [{
'id': fake.NFS_VOLUME['id'],
'provider_location': fake.PROVIDER_LOCATION,
}]
if volume_model_update:
expected_volumes_model_updates[0].update(volume_model_update)
mock_get_snapshot_flexvols.assert_called_once_with(
[fake.NFS_VOLUME['host']])
self.driver.zapi_client.create_cg_snapshot.assert_called_once_with(
@ -1006,11 +1040,7 @@ class NetAppNfsDriverTestCase(test.TestCase):
self.driver.zapi_client.delete_snapshot.assert_called_once_with(
fake.CG_POOL_NAME, fake_snapshot_name)
self.assertIsNone(model_update)
expected_update = [{
'id': fake.NFS_VOLUME['id'],
'provider_location': fake.PROVIDER_LOCATION,
}]
self.assertEqual(expected_update, volumes_model_update)
self.assertEqual(expected_volumes_model_updates, volumes_model_update)
def test_create_consistencygroup_from_src_invalid_parms(self):

View File

@ -126,6 +126,9 @@ class NetAppBlockStorage7modeLibrary(block_base.NetAppBlockStorageLibrary):
"""Add tasks that need to be executed at a fixed interval."""
super(NetAppBlockStorage7modeLibrary, self)._add_looping_tasks()
def _get_volume_model_update(self, volume):
"""Provide any updates necessary for a volume being created/managed."""
def _create_lun(self, volume_name, lun_name, size,
metadata, qos_policy_group_name=None):
"""Creates a LUN, handling Data ONTAP differences as needed."""

View File

@ -249,9 +249,17 @@ class NetAppBlockStorageLibrary(object):
handle = self._create_lun_handle(metadata)
self._add_lun_to_table(NetAppLun(handle, lun_name, size, metadata))
model_update = self._get_volume_model_update(volume)
return model_update
def _setup_qos_for_volume(self, volume, extra_specs):
return None
def _get_volume_model_update(self, volume):
"""Provide any updates necessary for a volume being created/managed."""
raise NotImplementedError
def _mark_qos_policy_group_for_deletion(self, qos_policy_group_info):
return
@ -349,6 +357,8 @@ class NetAppBlockStorageLibrary(object):
destination_volume['id'])
self.delete_volume(destination_volume)
return self._get_volume_model_update(destination_volume)
except Exception:
LOG.exception(_LE("Exception cloning volume %(name)s from source "
"volume %(source)s."),
@ -694,6 +704,7 @@ class NetAppBlockStorageLibrary(object):
self.zapi_client.move_lun(path, new_path)
lun = self._get_existing_vol_with_manage_ref(
{'source-name': new_path})
if qos_policy_group_name is not None:
self.zapi_client.set_lun_qos_policy_group(new_path,
qos_policy_group_name)
@ -703,6 +714,8 @@ class NetAppBlockStorageLibrary(object):
{'path': lun.get_metadata_property('Path'),
'uuid': lun.get_metadata_property('UUID')})
return self._get_volume_model_update(volume)
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
@ -1118,6 +1131,7 @@ class NetAppBlockStorageLibrary(object):
interpreted by the manager as a successful operation.
"""
LOG.debug("VOLUMES %s ", [dict(vol) for vol in volumes])
volume_model_updates = []
if cgsnapshot:
vols = zip(volumes, snapshots)
@ -1127,7 +1141,11 @@ class NetAppBlockStorageLibrary(object):
'name': snapshot['name'],
'size': snapshot['volume_size'],
}
self._clone_source_to_destination(source, volume)
volume_model_update = self._clone_source_to_destination(
source, volume)
if volume_model_update is not None:
volume_model_update['id'] = volume['id']
volume_model_updates.append(volume_model_update)
else:
vols = zip(volumes, source_vols)
@ -1135,9 +1153,13 @@ class NetAppBlockStorageLibrary(object):
for volume, old_src_vref in vols:
src_lun = self._get_lun_from_table(old_src_vref['name'])
source = {'name': src_lun.name, 'size': old_src_vref['size']}
self._clone_source_to_destination(source, volume)
volume_model_update = self._clone_source_to_destination(
source, volume)
if volume_model_update is not None:
volume_model_update['id'] = volume['id']
volume_model_updates.append(volume_model_update)
return None, None
return None, volume_model_updates
def _get_backing_flexvol_names(self):
"""Returns a list of backing flexvol names."""

View File

@ -30,6 +30,7 @@ import six
from cinder import exception
from cinder.i18n import _
from cinder.objects import fields
from cinder import utils
from cinder.volume.drivers.netapp.dataontap import block_base
from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
@ -401,6 +402,11 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary,
self.zapi_client.provision_qos_policy_group(qos_policy_group_info)
return qos_policy_group_info
def _get_volume_model_update(self, volume):
"""Provide any updates necessary for a volume being created/managed."""
if self.replication_enabled:
return {'replication_status': fields.ReplicationStatus.ENABLED}
def _mark_qos_policy_group_for_deletion(self, qos_policy_group_info):
self.zapi_client.mark_qos_policy_group_for_deletion(
qos_policy_group_info)

View File

@ -49,13 +49,13 @@ class NetApp7modeFibreChannelDriver(driver.BaseVD,
self.library.check_for_setup_error()
def create_volume(self, volume):
self.library.create_volume(volume)
return self.library.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
self.library.create_volume_from_snapshot(volume, snapshot)
return self.library.create_volume_from_snapshot(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
self.library.create_cloned_volume(volume, src_vref)
return self.library.create_cloned_volume(volume, src_vref)
def delete_volume(self, volume):
self.library.delete_volume(volume)

View File

@ -49,13 +49,13 @@ class NetAppCmodeFibreChannelDriver(driver.BaseVD,
self.library.check_for_setup_error()
def create_volume(self, volume):
self.library.create_volume(volume)
return self.library.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
self.library.create_volume_from_snapshot(volume, snapshot)
return self.library.create_volume_from_snapshot(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
self.library.create_cloned_volume(volume, src_vref)
return self.library.create_cloned_volume(volume, src_vref)
def delete_volume(self, volume):
self.library.delete_volume(volume)

View File

@ -48,13 +48,13 @@ class NetApp7modeISCSIDriver(driver.BaseVD,
self.library.check_for_setup_error()
def create_volume(self, volume):
self.library.create_volume(volume)
return self.library.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
self.library.create_volume_from_snapshot(volume, snapshot)
return self.library.create_volume_from_snapshot(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
self.library.create_cloned_volume(volume, src_vref)
return self.library.create_cloned_volume(volume, src_vref)
def delete_volume(self, volume):
self.library.delete_volume(volume)

View File

@ -48,13 +48,13 @@ class NetAppCmodeISCSIDriver(driver.BaseVD,
self.library.check_for_setup_error()
def create_volume(self, volume):
self.library.create_volume(volume)
return self.library.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
self.library.create_volume_from_snapshot(volume, snapshot)
return self.library.create_volume_from_snapshot(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
self.library.create_cloned_volume(volume, src_vref)
return self.library.create_cloned_volume(volume, src_vref)
def delete_volume(self, volume):
self.library.delete_volume(volume)

View File

@ -228,6 +228,9 @@ class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver):
# 7-mode DOT does not support QoS.
return
def _get_volume_model_update(self, volume):
"""Provide any updates necessary for a volume being created/managed."""
def _get_backing_flexvol_names(self):
"""Returns a list of backing flexvol names."""
flexvol_names = []

View File

@ -145,7 +145,9 @@ class NetAppNfsDriver(driver.ManageableVD,
LOG.debug('Using pool %s.', pool_name)
self._do_create_volume(volume)
self._do_qos_for_volume(volume, extra_specs)
return {'provider_location': volume['provider_location']}
model_update = self._get_volume_model_update(volume) or {}
model_update['provider_location'] = volume['provider_location']
return model_update
except Exception:
LOG.exception(_LE("Exception creating vol %(name)s on "
"pool %(pool)s."),
@ -186,8 +188,13 @@ class NetAppNfsDriver(driver.ManageableVD,
self._clone_with_extension_check(
source, destination_volume)
self._do_qos_for_volume(destination_volume, extra_specs)
return {'provider_location': destination_volume[
'provider_location']}
model_update = (
self._get_volume_model_update(destination_volume) or {})
model_update['provider_location'] = destination_volume[
'provider_location']
return model_update
except Exception:
LOG.exception(_LE("Exception creating volume %(name)s from source "
"%(source)s on share %(share)s."),
@ -237,6 +244,10 @@ class NetAppNfsDriver(driver.ManageableVD,
"""Set QoS policy on backend from volume type information."""
raise NotImplementedError()
def _get_volume_model_update(self, volume):
"""Provide any updates necessary for a volume being created/managed."""
raise NotImplementedError()
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
self._clone_backing_file_for_volume(snapshot['volume_name'],
@ -981,7 +992,11 @@ class NetAppNfsDriver(driver.ManageableVD,
{'name': existing_vol_ref['source-name'],
'msg': six.text_type(err)})
raise exception.VolumeBackendAPIException(data=exception_msg)
return {'provider_location': nfs_share}
model_update = self._get_volume_model_update(volume) or {}
model_update['provider_location'] = nfs_share
return model_update
def manage_existing_get_size(self, volume, existing_vol_ref):
"""Returns the size of volume to be managed by manage_existing.
@ -1140,7 +1155,8 @@ class NetAppNfsDriver(driver.ManageableVD,
vols = zip(volumes, snapshots)
for volume, snapshot in vols:
update = self.create_volume_from_snapshot(volume, snapshot)
update = self.create_volume_from_snapshot(
volume, snapshot)
update['id'] = volume['id']
volumes_model_update.append(update)
@ -1158,10 +1174,13 @@ class NetAppNfsDriver(driver.ManageableVD,
self._clone_backing_file_for_volume(
source_vol['name'], volume['name'],
source_vol['id'], source_snapshot=snapshot_name)
update = {'id': volume['id'],
'provider_location': source_vol['provider_location'],
}
volumes_model_update.append(update)
volume_model_update = (
self._get_volume_model_update(volume) or {})
volume_model_update.update({
'id': volume['id'],
'provider_location': source_vol['provider_location'],
})
volumes_model_update.append(volume_model_update)
# Delete backing flexvol snapshots
for flexvol_name in flexvols:

View File

@ -32,6 +32,7 @@ from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder import interface
from cinder.objects import fields
from cinder import utils
from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
@ -162,6 +163,11 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
LOG.debug("Cleaning volume %s", volume['id'])
self._cleanup_volume_on_failure(volume)
def _get_volume_model_update(self, volume):
"""Provide model updates for a volume being created."""
if self.replication_enabled:
return {'replication_status': fields.ReplicationStatus.ENABLED}
def _set_qos_policy_group_on_volume(self, volume, qos_policy_group_info):
if qos_policy_group_info is None:
return

View File

@ -0,0 +1,5 @@
---
fixes:
- The NetApp cDOT driver now sets the ``replication_status`` attribute
appropriately on volumes created within replicated backends when using host
level replication.