Pass access rules to driver on snapshot revert
In order to revert to a snapshot in the LVM driver (and very likely other drivers) the list of access rules is needed, so this change modifies the driver interface to provide this extra information. This change requires preventing a revert to snapshot operation while access rules on the affected share are out of sync. Closes bug: 1658133 Change-Id: Ia6678bb0e484f9c8f8b05d90e514801ae9baa94b
This commit is contained in:
parent
7c2c97d725
commit
a7d8363d32
@ -129,6 +129,20 @@ class ShareController(shares.ShareMixin,
|
||||
"%(latest_snap_id)s.")
|
||||
raise exc.HTTPConflict(explanation=msg % msg_args)
|
||||
|
||||
# Ensure the access rules are not in the process of updating
|
||||
for instance in share['instances']:
|
||||
access_rules_status = instance['access_rules_status']
|
||||
if access_rules_status != constants.ACCESS_STATE_ACTIVE:
|
||||
msg_args = {
|
||||
'share_id': share_id,
|
||||
'snap_id': snapshot_id,
|
||||
'state': constants.ACCESS_STATE_ACTIVE
|
||||
}
|
||||
msg = _("Snapshot %(snap_id)s belongs to a share "
|
||||
"%(share_id)s which has access rules that are"
|
||||
"not %(state)s.")
|
||||
raise exc.HTTPConflict(explanation=msg % msg_args)
|
||||
|
||||
msg_args = {'share_id': share_id, 'snap_id': snapshot_id}
|
||||
msg = _LI('Reverting share %(share_id)s to snapshot %(snap_id)s.')
|
||||
LOG.info(msg, msg_args)
|
||||
|
@ -55,7 +55,7 @@ class ShareInstanceAccessDatabaseMixin(object):
|
||||
|
||||
@locked_access_rules_operation
|
||||
def get_and_update_share_instance_access_rules_status(
|
||||
self, context, status=None, conditionally_change={},
|
||||
self, context, status=None, conditionally_change=None,
|
||||
share_instance_id=None):
|
||||
"""Get and update the access_rules_status of a share instance.
|
||||
|
||||
@ -73,7 +73,7 @@ class ShareInstanceAccessDatabaseMixin(object):
|
||||
"""
|
||||
if status is not None:
|
||||
updates = {'access_rules_status': status}
|
||||
else:
|
||||
elif conditionally_change:
|
||||
share_instance = self.db.share_instance_get(
|
||||
context, share_instance_id)
|
||||
access_rules_status = share_instance['access_rules_status']
|
||||
@ -84,6 +84,8 @@ class ShareInstanceAccessDatabaseMixin(object):
|
||||
}
|
||||
except KeyError:
|
||||
updates = {}
|
||||
else:
|
||||
updates = {}
|
||||
if updates:
|
||||
share_instance = self.db.share_instance_update(
|
||||
context, share_instance_id, updates, with_share_data=True)
|
||||
@ -91,8 +93,8 @@ class ShareInstanceAccessDatabaseMixin(object):
|
||||
|
||||
@locked_access_rules_operation
|
||||
def get_and_update_share_instance_access_rules(self, context,
|
||||
filters={}, updates={},
|
||||
conditionally_change={},
|
||||
filters=None, updates=None,
|
||||
conditionally_change=None,
|
||||
share_instance_id=None):
|
||||
"""Get and conditionally update all access rules of a share instance.
|
||||
|
||||
@ -130,6 +132,10 @@ class ShareInstanceAccessDatabaseMixin(object):
|
||||
context, share_instance_id, filters=filters)
|
||||
|
||||
if instance_rules and (updates or conditionally_change):
|
||||
if not updates:
|
||||
updates = {}
|
||||
if not conditionally_change:
|
||||
conditionally_change = {}
|
||||
for rule in instance_rules:
|
||||
mapping_state = rule['state']
|
||||
rule_updates = copy.deepcopy(updates)
|
||||
@ -151,11 +157,16 @@ class ShareInstanceAccessDatabaseMixin(object):
|
||||
|
||||
return instance_rules
|
||||
|
||||
def get_share_instance_access_rules(self, context, filters=None,
|
||||
share_instance_id=None):
|
||||
return self.get_and_update_share_instance_access_rules(
|
||||
context, filters, None, None, share_instance_id)
|
||||
|
||||
@locked_access_rules_operation
|
||||
def get_and_update_share_instance_access_rule(self, context, rule_id,
|
||||
updates={},
|
||||
updates=None,
|
||||
share_instance_id=None,
|
||||
conditionally_change={}):
|
||||
conditionally_change=None):
|
||||
"""Get and conditionally update a given share instance access rule.
|
||||
|
||||
:param updates: Set this parameter to a dictionary of key:value
|
||||
@ -180,6 +191,8 @@ class ShareInstanceAccessDatabaseMixin(object):
|
||||
instance_rule_mapping = self.db.share_instance_access_get(
|
||||
context, rule_id, share_instance_id)
|
||||
|
||||
if not updates:
|
||||
updates = {}
|
||||
if conditionally_change:
|
||||
mapping_state = instance_rule_mapping['state']
|
||||
try:
|
||||
|
@ -980,7 +980,8 @@ class ShareDriver(object):
|
||||
the failure.
|
||||
"""
|
||||
|
||||
def revert_to_snapshot(self, context, snapshot, share_server=None):
|
||||
def revert_to_snapshot(self, context, snapshot, access_rules,
|
||||
share_server=None):
|
||||
"""Reverts a share (in place) to the specified snapshot.
|
||||
|
||||
Does not delete the share snapshot. The share and snapshot must both
|
||||
@ -994,6 +995,7 @@ class ShareDriver(object):
|
||||
|
||||
:param context: Current context
|
||||
:param snapshot: The snapshot to be restored
|
||||
:param access_rules: List of all access rules for the affected share
|
||||
:param share_server: Optional -- Share server model or None
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
@ -2063,7 +2065,8 @@ class ShareDriver(object):
|
||||
|
||||
def revert_to_replicated_snapshot(self, context, active_replica,
|
||||
replica_list, active_replica_snapshot,
|
||||
replica_snapshots, share_server=None):
|
||||
replica_snapshots, access_rules,
|
||||
share_server=None):
|
||||
"""Reverts a replicated share (in place) to the specified snapshot.
|
||||
|
||||
.. note::
|
||||
@ -2089,6 +2092,7 @@ class ShareDriver(object):
|
||||
These snapshot instances track the snapshot across the replicas.
|
||||
The snapshot of the active replica to be restored with have its
|
||||
status attribute set to 'restoring'.
|
||||
:param access_rules: List of access rules for the affected share.
|
||||
:param share_server: Optional -- Share server model
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
@ -741,11 +741,14 @@ class HitachiHNASDriver(driver.ShareDriver):
|
||||
{'shr_id': share['id'],
|
||||
'shr_size': six.text_type(new_size)})
|
||||
|
||||
def revert_to_snapshot(self, context, snapshot, share_server=None):
|
||||
def revert_to_snapshot(self, context, snapshot, access_rules,
|
||||
share_server=None):
|
||||
"""Reverts a share to a given snapshot.
|
||||
|
||||
:param context: The `context.RequestContext` object for the request
|
||||
:param snapshot: The snapshot to which the share is to be reverted to.
|
||||
:param access_rules: List of all access rules for the affected share.
|
||||
Not used by this driver.
|
||||
:param share_server: Data structure with share server information.
|
||||
Not used by this driver.
|
||||
"""
|
||||
|
@ -362,7 +362,13 @@ class LVMShareDriver(LVMMixin, driver.ShareDriver):
|
||||
self._extend_container(share, device_name, new_size)
|
||||
self._execute('resize2fs', device_name, run_as_root=True)
|
||||
|
||||
def revert_to_snapshot(self, context, snapshot, share_server=None):
|
||||
def revert_to_snapshot(self, context, snapshot, access_rules,
|
||||
share_server=None):
|
||||
share = snapshot['share']
|
||||
# Temporarily remove all access rules
|
||||
self._get_helper(share).update_access(self.share_server,
|
||||
share['name'], [], [], [])
|
||||
# Unmount the filesystem
|
||||
self._remove_export(context, snapshot)
|
||||
# First we merge the snapshot LV and the share LV
|
||||
# This won't actually do anything until the LV is reactivated
|
||||
@ -370,7 +376,6 @@ class LVMShareDriver(LVMMixin, driver.ShareDriver):
|
||||
snapshot['name'])
|
||||
self._execute('lvconvert', '--merge', snap_lv_name, run_as_root=True)
|
||||
# Unmount the share so we can deactivate it
|
||||
share = snapshot['share']
|
||||
self._unmount_device(share)
|
||||
# Deactivate the share LV
|
||||
share_lv_name = "%s/%s" % (self.configuration.lvm_share_volume_group,
|
||||
@ -381,11 +386,15 @@ class LVMShareDriver(LVMMixin, driver.ShareDriver):
|
||||
self._execute('lvchange', '-ay', share_lv_name, run_as_root=True)
|
||||
# Now recreate the snapshot that was destroyed by the merge
|
||||
self._create_snapshot(context, snapshot)
|
||||
# Finally we can mount the share again
|
||||
# At this point we can mount the share again
|
||||
device_name = self._get_local_path(share)
|
||||
self._mount_device(share, device_name)
|
||||
device_name = self._get_local_path(snapshot)
|
||||
self._mount_device(snapshot, device_name)
|
||||
# Lastly we add all the access rules back
|
||||
self._get_helper(share).update_access(self.share_server,
|
||||
share['name'], access_rules,
|
||||
[], [])
|
||||
|
||||
def create_snapshot(self, context, snapshot, share_server=None):
|
||||
self._create_snapshot(context, snapshot)
|
||||
|
@ -55,7 +55,7 @@ class NetAppCmodeMultiSvmShareDriver(driver.ShareDriver):
|
||||
def create_snapshot(self, context, snapshot, **kwargs):
|
||||
return self.library.create_snapshot(context, snapshot, **kwargs)
|
||||
|
||||
def revert_to_snapshot(self, context, snapshot, **kwargs):
|
||||
def revert_to_snapshot(self, context, snapshot, access_rules, **kwargs):
|
||||
return self.library.revert_to_snapshot(context, snapshot, **kwargs)
|
||||
|
||||
def delete_share(self, context, share, **kwargs):
|
||||
@ -170,7 +170,8 @@ class NetAppCmodeMultiSvmShareDriver(driver.ShareDriver):
|
||||
|
||||
def revert_to_replicated_snapshot(self, context, active_replica,
|
||||
replica_list, active_replica_snapshot,
|
||||
replica_snapshots, share_server=None):
|
||||
replica_snapshots, access_rules,
|
||||
share_server=None):
|
||||
raise NotImplementedError()
|
||||
|
||||
def migration_check_compatibility(self, context, source_share,
|
||||
|
@ -55,7 +55,7 @@ class NetAppCmodeSingleSvmShareDriver(driver.ShareDriver):
|
||||
def create_snapshot(self, context, snapshot, **kwargs):
|
||||
return self.library.create_snapshot(context, snapshot, **kwargs)
|
||||
|
||||
def revert_to_snapshot(self, context, snapshot, **kwargs):
|
||||
def revert_to_snapshot(self, context, snapshot, access_rules, **kwargs):
|
||||
return self.library.revert_to_snapshot(context, snapshot, **kwargs)
|
||||
|
||||
def delete_share(self, context, share, **kwargs):
|
||||
@ -185,7 +185,8 @@ class NetAppCmodeSingleSvmShareDriver(driver.ShareDriver):
|
||||
|
||||
def revert_to_replicated_snapshot(self, context, active_replica,
|
||||
replica_list, active_replica_snapshot,
|
||||
replica_snapshots, **kwargs):
|
||||
replica_snapshots, access_rules,
|
||||
**kwargs):
|
||||
return self.library.revert_to_replicated_snapshot(
|
||||
context, active_replica, replica_list, active_replica_snapshot,
|
||||
replica_snapshots, **kwargs)
|
||||
|
@ -2463,21 +2463,28 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
|
||||
@add_hooks
|
||||
@utils.require_driver_initialized
|
||||
def revert_to_snapshot(self, context, snapshot_id, reservations,
|
||||
share_id=None):
|
||||
|
||||
def revert_to_snapshot(self, context, snapshot_id,
|
||||
reservations, share_id=None):
|
||||
# TODO(bswartz) fix bug 1662572 and remove share_id
|
||||
context = context.elevated()
|
||||
snapshot = self.db.share_snapshot_get(context, snapshot_id)
|
||||
share = snapshot['share']
|
||||
share_id = share['id']
|
||||
share_instance_id = snapshot.instance.share_instance_id
|
||||
access_rules = self.access_helper.get_share_instance_access_rules(
|
||||
context, filters={'state': constants.STATUS_ACTIVE},
|
||||
share_instance_id=share_instance_id)
|
||||
|
||||
if share.get('has_replicas'):
|
||||
self._revert_to_replicated_snapshot(
|
||||
context, share, snapshot, reservations, share_id=share_id)
|
||||
context, share, snapshot, reservations, access_rules,
|
||||
share_id=share_id)
|
||||
else:
|
||||
self._revert_to_snapshot(context, share, snapshot, reservations)
|
||||
self._revert_to_snapshot(context, share, snapshot, reservations,
|
||||
access_rules)
|
||||
|
||||
def _revert_to_snapshot(self, context, share, snapshot, reservations):
|
||||
def _revert_to_snapshot(self, context, share, snapshot, reservations,
|
||||
access_rules):
|
||||
|
||||
share_server = self._get_share_server(context, share)
|
||||
share_id = share['id']
|
||||
@ -2495,6 +2502,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
try:
|
||||
self.driver.revert_to_snapshot(context,
|
||||
snapshot_instance_dict,
|
||||
access_rules,
|
||||
share_server=share_server)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
@ -2788,7 +2796,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
|
||||
@locked_share_replica_operation
|
||||
def _revert_to_replicated_snapshot(self, context, share, snapshot,
|
||||
reservations, share_id=None):
|
||||
reservations, access_rules,
|
||||
share_id=None):
|
||||
|
||||
share_server = self._get_share_server(context, share)
|
||||
snapshot_id = snapshot['id']
|
||||
@ -2825,7 +2834,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
try:
|
||||
self.driver.revert_to_replicated_snapshot(
|
||||
context, active_replica, replica_list, active_replica_snapshot,
|
||||
replica_snapshots, share_server=share_server)
|
||||
replica_snapshots, access_rules, share_server=share_server)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
|
||||
|
@ -147,6 +147,12 @@ class ShareAPITest(test.TestCase):
|
||||
share = copy.deepcopy(self.share)
|
||||
share['status'] = constants.STATUS_AVAILABLE
|
||||
share['revert_to_snapshot_support'] = True
|
||||
share["instances"] = [
|
||||
{
|
||||
"id": "fakeid",
|
||||
"access_rules_status": constants.ACCESS_STATE_ACTIVE,
|
||||
},
|
||||
]
|
||||
share = fake_share.fake_share(**share)
|
||||
snapshot = copy.deepcopy(self.snapshot)
|
||||
snapshot['status'] = constants.STATUS_AVAILABLE
|
||||
@ -297,6 +303,39 @@ class ShareAPITest(test.TestCase):
|
||||
'1',
|
||||
body=body)
|
||||
|
||||
def test__revert_snapshot_access_applying(self):
|
||||
|
||||
share = copy.deepcopy(self.share)
|
||||
share['status'] = constants.STATUS_AVAILABLE
|
||||
share['revert_to_snapshot_support'] = True
|
||||
share["instances"] = [
|
||||
{
|
||||
"id": "fakeid",
|
||||
"access_rules_status": constants.SHARE_INSTANCE_RULES_SYNCING,
|
||||
},
|
||||
]
|
||||
share = fake_share.fake_share(**share)
|
||||
snapshot = copy.deepcopy(self.snapshot)
|
||||
snapshot['status'] = constants.STATUS_AVAILABLE
|
||||
body = {'revert': {'snapshot_id': '2'}}
|
||||
req = fakes.HTTPRequest.blank(
|
||||
'/shares/1/action', use_admin_context=False, version='2.27')
|
||||
self.mock_object(
|
||||
self.controller, '_validate_revert_parameters',
|
||||
mock.Mock(return_value=body['revert']))
|
||||
self.mock_object(share_api.API, 'get', mock.Mock(return_value=share))
|
||||
self.mock_object(share_api.API, 'get_snapshot',
|
||||
mock.Mock(return_value=snapshot))
|
||||
self.mock_object(share_api.API, 'get_latest_snapshot_for_share',
|
||||
mock.Mock(return_value=snapshot))
|
||||
self.mock_object(share_api.API, 'revert_to_snapshot')
|
||||
|
||||
self.assertRaises(webob.exc.HTTPConflict,
|
||||
self.controller._revert,
|
||||
req,
|
||||
'1',
|
||||
body=body)
|
||||
|
||||
def test__revert_snapshot_not_latest(self):
|
||||
|
||||
share = copy.deepcopy(self.share)
|
||||
|
@ -315,7 +315,8 @@ class DummyDriver(driver.ShareDriver):
|
||||
"""Removes the specified snapshot from Manila management."""
|
||||
|
||||
@slow_me_down
|
||||
def revert_to_snapshot(self, context, snapshot, share_server=None):
|
||||
def revert_to_snapshot(self, context, snapshot, access_rules,
|
||||
share_server=None):
|
||||
"""Reverts a share (in place) to the specified snapshot."""
|
||||
|
||||
@slow_me_down
|
||||
@ -488,7 +489,8 @@ class DummyDriver(driver.ShareDriver):
|
||||
@slow_me_down
|
||||
def revert_to_replicated_snapshot(self, context, active_replica,
|
||||
replica_list, active_replica_snapshot,
|
||||
replica_snapshots, share_server=None):
|
||||
replica_snapshots, access_rules,
|
||||
share_server=None):
|
||||
"""Reverts a replicated share (in place) to the specified snapshot."""
|
||||
|
||||
@slow_me_down
|
||||
|
@ -533,8 +533,10 @@ class LVMShareDriverTestCase(test.TestCase):
|
||||
self.assertEqual('test-pool', self._driver._stats['pools'])
|
||||
|
||||
def test_revert_to_snapshot(self):
|
||||
mock_update_access = self.mock_object(self._helper_nfs,
|
||||
'update_access')
|
||||
self._driver.revert_to_snapshot(self._context, self.snapshot,
|
||||
self.share_server)
|
||||
[], self.share_server)
|
||||
snap_lv = "%s/fakesnapshotname" % (CONF.lvm_share_volume_group)
|
||||
share_lv = "%s/fakename" % (CONF.lvm_share_volume_group)
|
||||
share_mount_path = self._get_mount_path(self.snapshot['share'])
|
||||
@ -560,6 +562,7 @@ class LVMShareDriverTestCase(test.TestCase):
|
||||
("chmod 777 %s" % snapshot_mount_path),
|
||||
]
|
||||
self.assertEqual(expected_exec, fake_utils.fake_execute_get_log())
|
||||
self.assertEqual(2, mock_update_access.call_count)
|
||||
|
||||
def test_snapshot_update_access(self):
|
||||
access_rules = [{
|
||||
|
@ -5256,38 +5256,51 @@ class ShareManagerTestCase(test.TestCase):
|
||||
reservations = 'fake_reservations'
|
||||
share_id = 'fake_share_id'
|
||||
snapshot_id = 'fake_snapshot_id'
|
||||
instance_id = 'fake_instance_id'
|
||||
share_instance = fakes.fake_share_instance(
|
||||
id=instance_id, share_id=share_id)
|
||||
share = fakes.fake_share(
|
||||
id=share_id, instance={'id': 'fake_instance_id'},
|
||||
id=share_id, instance=share_instance,
|
||||
project_id='fake_project', user_id='fake_user', size=2,
|
||||
has_replicas=has_replicas)
|
||||
snapshot_instance = fakes.fake_snapshot_instance(
|
||||
share_id=share_id, share=share, name='fake_snapshot')
|
||||
share_id=instance_id, share=share, name='fake_snapshot',
|
||||
share_instance=share_instance, share_instance_id=instance_id)
|
||||
snapshot = fakes.fake_snapshot(
|
||||
id=snapshot_id, share_id=share_id, share=share,
|
||||
instance=snapshot_instance, project_id='fake_project',
|
||||
user_id='fake_user', size=1)
|
||||
access_rules = ['fake_access_rule']
|
||||
|
||||
mock_share_snapshot_get = self.mock_object(
|
||||
self.share_manager.db, 'share_snapshot_get',
|
||||
mock.Mock(return_value=snapshot))
|
||||
mock_access_get = self.mock_object(
|
||||
self.share_manager.access_helper,
|
||||
'get_share_instance_access_rules',
|
||||
mock.Mock(return_value=access_rules))
|
||||
mock_revert_to_snapshot = self.mock_object(
|
||||
self.share_manager, '_revert_to_snapshot')
|
||||
mock_revert_to_replicated_snapshot = self.mock_object(
|
||||
self.share_manager, '_revert_to_replicated_snapshot')
|
||||
|
||||
self.share_manager.revert_to_snapshot(
|
||||
self.context, snapshot_id, reservations, share_id=share_id)
|
||||
self.context, snapshot_id, reservations)
|
||||
|
||||
mock_share_snapshot_get.assert_called_once_with(mock.ANY, snapshot_id)
|
||||
mock_access_get.assert_called_once_with(
|
||||
mock.ANY, filters={'state': constants.STATUS_ACTIVE},
|
||||
share_instance_id=instance_id)
|
||||
|
||||
if not has_replicas:
|
||||
mock_revert_to_snapshot.assert_called_once_with(
|
||||
mock.ANY, share, snapshot, reservations)
|
||||
mock.ANY, share, snapshot, reservations, access_rules)
|
||||
self.assertFalse(mock_revert_to_replicated_snapshot.called)
|
||||
else:
|
||||
self.assertFalse(mock_revert_to_snapshot.called)
|
||||
mock_revert_to_replicated_snapshot.assert_called_once_with(
|
||||
mock.ANY, share, snapshot, reservations, share_id=share_id)
|
||||
mock.ANY, share, snapshot, reservations, access_rules,
|
||||
share_id=share_id)
|
||||
|
||||
@ddt.data(None, 'fake_reservations')
|
||||
def test__revert_to_snapshot(self, reservations):
|
||||
@ -5309,6 +5322,7 @@ class ShareManagerTestCase(test.TestCase):
|
||||
id='fake_snapshot_id', share_id=share_id, share=share,
|
||||
instance=snapshot_instance, project_id='fake_project',
|
||||
user_id='fake_user', size=1)
|
||||
access_rules = []
|
||||
|
||||
self.mock_object(
|
||||
self.share_manager.db, 'share_snapshot_get',
|
||||
@ -5322,12 +5336,13 @@ class ShareManagerTestCase(test.TestCase):
|
||||
self.share_manager.db, 'share_snapshot_update')
|
||||
|
||||
self.share_manager._revert_to_snapshot(
|
||||
self.context, share, snapshot, reservations)
|
||||
self.context, share, snapshot, reservations, access_rules)
|
||||
|
||||
mock_driver.revert_to_snapshot.assert_called_once_with(
|
||||
mock.ANY,
|
||||
self._get_snapshot_instance_dict(
|
||||
snapshot_instance, share, snapshot=snapshot),
|
||||
access_rules,
|
||||
share_server=None)
|
||||
|
||||
self.assertFalse(mock_quotas_rollback.called)
|
||||
@ -5366,6 +5381,7 @@ class ShareManagerTestCase(test.TestCase):
|
||||
id='fake_snapshot_id', share_id=share_id, share=share,
|
||||
instance=snapshot_instance, project_id='fake_project',
|
||||
user_id='fake_user', size=1)
|
||||
access_rules = []
|
||||
|
||||
self.mock_object(
|
||||
self.share_manager.db, 'share_snapshot_get',
|
||||
@ -5383,12 +5399,14 @@ class ShareManagerTestCase(test.TestCase):
|
||||
self.context,
|
||||
share,
|
||||
snapshot,
|
||||
reservations)
|
||||
reservations,
|
||||
access_rules)
|
||||
|
||||
mock_driver.revert_to_snapshot.assert_called_once_with(
|
||||
mock.ANY,
|
||||
self._get_snapshot_instance_dict(
|
||||
snapshot_instance, share, snapshot=snapshot),
|
||||
access_rules,
|
||||
share_server=None)
|
||||
|
||||
self.assertFalse(mock_quotas_commit.called)
|
||||
@ -5580,6 +5598,7 @@ class ShareManagerTestCase(test.TestCase):
|
||||
id='rid2', share_id=share_id, host='secondary',
|
||||
replica_state=constants.REPLICA_STATE_IN_SYNC, as_primitive=False)
|
||||
replicas = [active_replica, replica]
|
||||
access_rules = []
|
||||
self.mock_object(
|
||||
db, 'share_snapshot_get', mock.Mock(return_value=snapshot))
|
||||
self.mock_object(
|
||||
@ -5601,7 +5620,8 @@ class ShareManagerTestCase(test.TestCase):
|
||||
self.share_manager.db, 'share_snapshot_instance_update')
|
||||
|
||||
self.share_manager._revert_to_replicated_snapshot(
|
||||
self.context, share, snapshot, reservations, share_id=share_id)
|
||||
self.context, share, snapshot, reservations, access_rules,
|
||||
share_id=share_id)
|
||||
|
||||
self.assertTrue(mock_driver.revert_to_replicated_snapshot.called)
|
||||
self.assertFalse(mock_quotas_rollback.called)
|
||||
@ -5642,6 +5662,7 @@ class ShareManagerTestCase(test.TestCase):
|
||||
id='rid2', share_id=share_id, host='secondary',
|
||||
replica_state=constants.REPLICA_STATE_IN_SYNC, as_primitive=False)
|
||||
replicas = [active_replica, replica]
|
||||
access_rules = []
|
||||
self.mock_object(
|
||||
db, 'share_snapshot_get', mock.Mock(return_value=snapshot))
|
||||
self.mock_object(
|
||||
@ -5670,6 +5691,7 @@ class ShareManagerTestCase(test.TestCase):
|
||||
share,
|
||||
snapshot,
|
||||
reservations,
|
||||
access_rules,
|
||||
share_id=share_id)
|
||||
|
||||
self.assertTrue(mock_driver.revert_to_replicated_snapshot.called)
|
||||
|
@ -0,0 +1,4 @@
|
||||
---
|
||||
fixes:
|
||||
- Fixed failure when reverting a share to a snapshot using the LVM driver
|
||||
while access rules exist for that share.
|
Loading…
Reference in New Issue
Block a user