Allow drivers to set share size after revert to snapshot
For NetApp ONTAP driver, share revert to snapshot considers the largest size between original share size and snapshot size after revert is done. This is different from the users' and Manila's expectation where share size is updated to snapshot size. Closes-bug: #2064502 Change-Id: I6a1b87eabeb7bef80725a537acb6771c35b76afc
This commit is contained in:
parent
4bf505404a
commit
28908d69da
@ -1831,6 +1831,10 @@ class NetAppCmodeFileStorageLibrary(object):
|
|||||||
self._get_backend_snapshot_name(snapshot['id']))
|
self._get_backend_snapshot_name(snapshot['id']))
|
||||||
LOG.debug('Restoring snapshot %s', snapshot_name)
|
LOG.debug('Restoring snapshot %s', snapshot_name)
|
||||||
vserver_client.restore_snapshot(share_name, snapshot_name)
|
vserver_client.restore_snapshot(share_name, snapshot_name)
|
||||||
|
volume = vserver_client.get_volume(share_name)
|
||||||
|
|
||||||
|
# When calculating the size, round up to the next GB.
|
||||||
|
return int(math.ceil(float(volume['size']) / units.Gi))
|
||||||
|
|
||||||
@na_utils.trace
|
@na_utils.trace
|
||||||
def delete_snapshot(self, context, snapshot, share_server=None,
|
def delete_snapshot(self, context, snapshot, share_server=None,
|
||||||
|
@ -3463,7 +3463,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
|||||||
context, snapshot_instance, snapshot=snapshot)
|
context, snapshot_instance, snapshot=snapshot)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.driver.revert_to_snapshot(context,
|
updated_share_size = self.driver.revert_to_snapshot(
|
||||||
|
context,
|
||||||
snapshot_instance_dict,
|
snapshot_instance_dict,
|
||||||
share_access_rules,
|
share_access_rules,
|
||||||
snapshot_access_rules,
|
snapshot_access_rules,
|
||||||
@ -3496,15 +3497,37 @@ class ShareManager(manager.SchedulerDependentManager):
|
|||||||
resource_id=share_id,
|
resource_id=share_id,
|
||||||
exception=excep)
|
exception=excep)
|
||||||
|
|
||||||
|
# fail-safe in case driver returned size is None or invalid
|
||||||
|
if not updated_share_size:
|
||||||
|
updated_share_size = snapshot['size']
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
int(updated_share_size)
|
||||||
|
except ValueError:
|
||||||
|
updated_share_size = snapshot['size']
|
||||||
|
|
||||||
if reservations:
|
if reservations:
|
||||||
|
if updated_share_size == snapshot['size']:
|
||||||
QUOTAS.commit(
|
QUOTAS.commit(
|
||||||
context, reservations, project_id=project_id, user_id=user_id,
|
context, reservations, project_id=project_id,
|
||||||
share_type_id=share_type_id,
|
user_id=user_id, share_type_id=share_type_id,
|
||||||
)
|
)
|
||||||
|
else:
|
||||||
|
# NOTE(kpdev): The driver tells us that the share size wasn't
|
||||||
|
# modified to the snapshot's size; so no need to commit quota
|
||||||
|
# changes
|
||||||
|
QUOTAS.rollback(
|
||||||
|
context, reservations, project_id=project_id,
|
||||||
|
user_id=user_id, share_type_id=share_type_id,
|
||||||
|
)
|
||||||
|
if updated_share_size != share['size']:
|
||||||
|
LOG.error("Driver returned an unexpected size %d on "
|
||||||
|
"revert to snapshot operation. You need to "
|
||||||
|
"adjust the quota", updated_share_size)
|
||||||
|
|
||||||
self.db.share_update(
|
self.db.share_update(
|
||||||
context, share_id,
|
context, share_id,
|
||||||
{'status': constants.STATUS_AVAILABLE, 'size': snapshot['size']})
|
{'status': constants.STATUS_AVAILABLE, 'size': updated_share_size})
|
||||||
self.db.share_snapshot_update(
|
self.db.share_snapshot_update(
|
||||||
context, snapshot_id, {'status': constants.STATUS_AVAILABLE})
|
context, snapshot_id, {'status': constants.STATUS_AVAILABLE})
|
||||||
|
|
||||||
|
@ -2449,6 +2449,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
|||||||
'_get_vserver',
|
'_get_vserver',
|
||||||
mock.Mock(return_value=(fake.VSERVER1,
|
mock.Mock(return_value=(fake.VSERVER1,
|
||||||
vserver_client)))
|
vserver_client)))
|
||||||
|
vserver_client.get_volume.return_value = fake.FLEXVOL_TO_MANAGE
|
||||||
fake_snapshot = copy.deepcopy(fake.SNAPSHOT)
|
fake_snapshot = copy.deepcopy(fake.SNAPSHOT)
|
||||||
if use_snap_provider_location:
|
if use_snap_provider_location:
|
||||||
fake_snapshot['provider_location'] = 'fake-provider-location'
|
fake_snapshot['provider_location'] = 'fake-provider-location'
|
||||||
@ -2458,7 +2459,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
|||||||
result = self.library.revert_to_snapshot(
|
result = self.library.revert_to_snapshot(
|
||||||
self.context, fake_snapshot, share_server=fake.SHARE_SERVER)
|
self.context, fake_snapshot, share_server=fake.SHARE_SERVER)
|
||||||
|
|
||||||
self.assertIsNone(result)
|
self.assertIsNotNone(result)
|
||||||
share_name = self.library._get_backend_share_name(
|
share_name = self.library._get_backend_share_name(
|
||||||
fake_snapshot['share_id'])
|
fake_snapshot['share_id'])
|
||||||
snapshot_name = (self.library._get_backend_snapshot_name(
|
snapshot_name = (self.library._get_backend_snapshot_name(
|
||||||
|
@ -7930,9 +7930,12 @@ class ShareManagerTestCase(test.TestCase):
|
|||||||
mock.ANY, share, snapshot, reservations, share_access_rules,
|
mock.ANY, share, snapshot, reservations, share_access_rules,
|
||||||
snapshot_access_rules, share_id=share_id)
|
snapshot_access_rules, share_id=share_id)
|
||||||
|
|
||||||
@ddt.data(None, 'fake_reservations')
|
@ddt.data((None, False),
|
||||||
def test__revert_to_snapshot(self, reservations):
|
(None, True),
|
||||||
|
('fake_reservations', False),
|
||||||
|
('fake_reservations', True))
|
||||||
|
@ddt.unpack
|
||||||
|
def test__revert_to_snapshot(self, reservations, revert_return_share_size):
|
||||||
mock_quotas_rollback = self.mock_object(quota.QUOTAS, 'rollback')
|
mock_quotas_rollback = self.mock_object(quota.QUOTAS, 'rollback')
|
||||||
mock_quotas_commit = self.mock_object(quota.QUOTAS, 'commit')
|
mock_quotas_commit = self.mock_object(quota.QUOTAS, 'commit')
|
||||||
self.mock_object(
|
self.mock_object(
|
||||||
@ -7944,14 +7947,15 @@ class ShareManagerTestCase(test.TestCase):
|
|||||||
share = fakes.fake_share(
|
share = fakes.fake_share(
|
||||||
id=share_id, instance={'id': 'fake_instance_id',
|
id=share_id, instance={'id': 'fake_instance_id',
|
||||||
'share_type_id': 'fake_share_type_id'},
|
'share_type_id': 'fake_share_type_id'},
|
||||||
project_id='fake_project', user_id='fake_user', size=2)
|
project_id='fake_project', user_id='fake_user',
|
||||||
|
size=5 if revert_return_share_size else 3)
|
||||||
snapshot_instance = fakes.fake_snapshot_instance(
|
snapshot_instance = fakes.fake_snapshot_instance(
|
||||||
share_id=share_id, share=share, name='fake_snapshot',
|
share_id=share_id, share=share, name='fake_snapshot',
|
||||||
share_instance=share['instance'])
|
share_instance=share['instance'])
|
||||||
snapshot = fakes.fake_snapshot(
|
snapshot = fakes.fake_snapshot(
|
||||||
id='fake_snapshot_id', share_id=share_id, share=share,
|
id='fake_snapshot_id', share_id=share_id, share=share,
|
||||||
instance=snapshot_instance, project_id='fake_project',
|
instance=snapshot_instance, project_id='fake_project',
|
||||||
user_id='fake_user', size=1)
|
user_id='fake_user', size=4)
|
||||||
share_access_rules = []
|
share_access_rules = []
|
||||||
snapshot_access_rules = []
|
snapshot_access_rules = []
|
||||||
|
|
||||||
@ -7965,6 +7969,8 @@ class ShareManagerTestCase(test.TestCase):
|
|||||||
self.share_manager.db, 'share_update')
|
self.share_manager.db, 'share_update')
|
||||||
mock_share_snapshot_update = self.mock_object(
|
mock_share_snapshot_update = self.mock_object(
|
||||||
self.share_manager.db, 'share_snapshot_update')
|
self.share_manager.db, 'share_snapshot_update')
|
||||||
|
mock_driver.revert_to_snapshot.return_value = (
|
||||||
|
5 if revert_return_share_size else None)
|
||||||
|
|
||||||
self.share_manager._revert_to_snapshot(self.context, share, snapshot,
|
self.share_manager._revert_to_snapshot(self.context, share, snapshot,
|
||||||
reservations,
|
reservations,
|
||||||
@ -7978,8 +7984,15 @@ class ShareManagerTestCase(test.TestCase):
|
|||||||
share_access_rules, snapshot_access_rules,
|
share_access_rules, snapshot_access_rules,
|
||||||
share_server=None)
|
share_server=None)
|
||||||
|
|
||||||
self.assertFalse(mock_quotas_rollback.called)
|
|
||||||
if reservations:
|
if reservations:
|
||||||
|
if revert_return_share_size:
|
||||||
|
mock_quotas_rollback.assert_called_once_with(
|
||||||
|
mock.ANY, reservations, project_id='fake_project',
|
||||||
|
user_id='fake_user',
|
||||||
|
share_type_id=(
|
||||||
|
snapshot_instance['share_instance']['share_type_id']))
|
||||||
|
else:
|
||||||
|
self.assertFalse(mock_quotas_rollback.called)
|
||||||
mock_quotas_commit.assert_called_once_with(
|
mock_quotas_commit.assert_called_once_with(
|
||||||
mock.ANY, reservations, project_id='fake_project',
|
mock.ANY, reservations, project_id='fake_project',
|
||||||
user_id='fake_user',
|
user_id='fake_user',
|
||||||
@ -7990,7 +8003,8 @@ class ShareManagerTestCase(test.TestCase):
|
|||||||
|
|
||||||
mock_share_update.assert_called_once_with(
|
mock_share_update.assert_called_once_with(
|
||||||
mock.ANY, share_id,
|
mock.ANY, share_id,
|
||||||
{'status': constants.STATUS_AVAILABLE, 'size': snapshot['size']})
|
{'status': constants.STATUS_AVAILABLE,
|
||||||
|
'size': 5 if revert_return_share_size else 4})
|
||||||
mock_share_snapshot_update.assert_called_once_with(
|
mock_share_snapshot_update.assert_called_once_with(
|
||||||
mock.ANY, 'fake_snapshot_id',
|
mock.ANY, 'fake_snapshot_id',
|
||||||
{'status': constants.STATUS_AVAILABLE})
|
{'status': constants.STATUS_AVAILABLE})
|
||||||
|
@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
fixes:
|
||||||
|
- |
|
||||||
|
When reverting shares to snapshots that are larger or smaller than
|
||||||
|
the share, some storage systems such as NetApp ONTAP always defer
|
||||||
|
to the larger of the two sizes. Manila's share manager interface now
|
||||||
|
accounts for this behavior, and adjusts project quotas appropriately.
|
||||||
|
For more details, please check
|
||||||
|
Launchpad `bug 2064502 <https://bugs.launchpad.net/manila/+bug/2064502>`_
|
Loading…
Reference in New Issue
Block a user