Update share/snapshot instance deferred deletion
- Deferred deletion would be regular deletion where quota is freed before resource deletion by driver and deletion states are hidden from end-user. Also, everything will be handled by Manila or admin if any error occurs in deletion. - Any error during deferred deletion will put share/snapshot in error_deferrer_deleting state and periodic task will handle it. - count_share_group_snapshot_members_in_share() should not return share instances in deferred deletion states as those shares are hidden from end-user once deferred deleted. Only actual deletion (by driver) will make sure snapshot instance of share instance should be deleted before share instance deletion. partially-implements: bp/deferred-deletion Closes-bug: #2068043 Change-Id: Ie827619d3952d1db69559206b5dfd47efb4cf7b1 (cherry picked from commite8f086d0f5
) (cherry picked from commitc78042bf99
)
This commit is contained in:
parent
3560e0df9b
commit
4aeb4be69a
@ -1534,9 +1534,12 @@ def count_share_groups_in_share_network(context, share_network_id):
|
||||
return IMPL.count_share_groups_in_share_network(context, share_network_id)
|
||||
|
||||
|
||||
def count_share_group_snapshot_members_in_share(context, share_id):
|
||||
def count_share_group_snapshot_members_in_share(
|
||||
context, share_id, include_deferred_deleting=True
|
||||
):
|
||||
"""Returns the number of group snapshot members linked to the share."""
|
||||
return IMPL.count_share_group_snapshot_members_in_share(context, share_id)
|
||||
return IMPL.count_share_group_snapshot_members_in_share(
|
||||
context, share_id, include_deferred_deleting=include_deferred_deleting)
|
||||
|
||||
|
||||
def share_group_snapshot_get(context, share_group_snapshot_id):
|
||||
|
@ -6564,16 +6564,32 @@ def count_share_groups_in_share_network(context, share_network_id):
|
||||
|
||||
@require_context
|
||||
@context_manager.reader
|
||||
def count_share_group_snapshot_members_in_share(context, share_id):
|
||||
return model_query(
|
||||
def count_share_group_snapshot_members_in_share(
|
||||
context, share_id, include_deferred_deleting=True
|
||||
):
|
||||
query = model_query(
|
||||
context, models.ShareSnapshotInstance,
|
||||
project_only=True, read_deleted="no",
|
||||
).join(
|
||||
models.ShareInstance,
|
||||
models.ShareInstance.id == (
|
||||
models.ShareSnapshotInstance.share_instance_id),
|
||||
).filter(
|
||||
)
|
||||
|
||||
if include_deferred_deleting:
|
||||
# consider deferred deleting states in query
|
||||
return query.filter(
|
||||
models.ShareInstance.share_id == share_id,
|
||||
).count()
|
||||
|
||||
deferred_delete_states = [
|
||||
constants.STATUS_DEFERRED_DELETING,
|
||||
constants.STATUS_ERROR_DEFERRED_DELETING,
|
||||
]
|
||||
return query.filter(
|
||||
models.ShareInstance.share_id == share_id,
|
||||
and_(models.ShareSnapshotInstance.status.not_in(
|
||||
deferred_delete_states))
|
||||
).count()
|
||||
|
||||
|
||||
|
@ -1392,7 +1392,7 @@ class API(base.Base):
|
||||
|
||||
share_group_snapshot_members_count = (
|
||||
self.db.count_share_group_snapshot_members_in_share(
|
||||
context, share_id))
|
||||
context, share_id, include_deferred_deleting=False))
|
||||
if share_group_snapshot_members_count:
|
||||
msg = (
|
||||
_("Share still has %d dependent share group snapshot "
|
||||
@ -2187,13 +2187,15 @@ class API(base.Base):
|
||||
if force and deferred_delete:
|
||||
deferred_delete = False
|
||||
|
||||
status = constants.STATUS_DELETING
|
||||
if deferred_delete:
|
||||
status = constants.STATUS_DEFERRED_DELETING
|
||||
|
||||
for snapshot_instance in snapshot_instances:
|
||||
self.db.share_snapshot_instance_update(
|
||||
context, snapshot_instance['id'], {'status': status})
|
||||
current_status = snapshot['aggregate_status']
|
||||
if current_status not in (constants.STATUS_DEFERRED_DELETING,
|
||||
constants.STATUS_ERROR_DEFERRED_DELETING):
|
||||
new_status = constants.STATUS_DELETING
|
||||
if deferred_delete:
|
||||
new_status = constants.STATUS_DEFERRED_DELETING
|
||||
for snapshot_instance in snapshot_instances:
|
||||
self.db.share_snapshot_instance_update(
|
||||
context, snapshot_instance['id'], {'status': new_status})
|
||||
|
||||
if share['has_replicas']:
|
||||
self.share_rpcapi.delete_replicated_snapshot(
|
||||
|
@ -3530,6 +3530,37 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
self._notify_about_share_usage(context, share,
|
||||
share_instance, "delete.start")
|
||||
|
||||
error_state = None
|
||||
if deferred_delete:
|
||||
try:
|
||||
self.db.update_share_instance_quota_usages(
|
||||
context, share_instance_id)
|
||||
LOG.info(
|
||||
"Share instance %s had its deletion deferred. Quota was "
|
||||
"reclaimed and the share driver will proceed with the "
|
||||
"deletion.", share_instance_id
|
||||
)
|
||||
except Exception:
|
||||
LOG.warning(
|
||||
"Error occurred during quota usage update. Administrator "
|
||||
"must rectify quotas.")
|
||||
|
||||
snap_instances = (
|
||||
self.db.share_snapshot_instance_get_all_with_filters(
|
||||
context, {'share_instance_ids': share_instance_id}))
|
||||
if snap_instances:
|
||||
# The share has some snapshot instances whose deletion
|
||||
# was deferred. We relegate deletion of the share to
|
||||
# a periodic task so it can be processed after
|
||||
# all its snapshots are deleted. So we're deliberately
|
||||
# setting the share instance's status to
|
||||
# "error_deferred_deleting"
|
||||
self.db.share_instance_update(
|
||||
context,
|
||||
share_instance_id,
|
||||
{'status': constants.STATUS_ERROR_DEFERRED_DELETING})
|
||||
return
|
||||
|
||||
try:
|
||||
self.access_helper.update_access_rules(
|
||||
context,
|
||||
@ -3549,10 +3580,14 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
LOG.error(msg, share_instance_id)
|
||||
exc_context.reraise = False
|
||||
else:
|
||||
error_state = constants.STATUS_ERROR_DELETING
|
||||
if deferred_delete:
|
||||
error_state = constants.STATUS_ERROR_DEFERRED_DELETING
|
||||
exc_context.reraise = False
|
||||
self.db.share_instance_update(
|
||||
context,
|
||||
share_instance_id,
|
||||
{'status': constants.STATUS_ERROR_DELETING})
|
||||
{'status': error_state})
|
||||
self.message_api.create(
|
||||
context,
|
||||
message_field.Action.DELETE_ACCESS_RULES,
|
||||
@ -3561,21 +3596,9 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
resource_id=share_instance_id,
|
||||
exception=excep)
|
||||
|
||||
if deferred_delete:
|
||||
try:
|
||||
LOG.info(
|
||||
"Share instance %s has been added to a deferred deletion "
|
||||
"queue and will be deleted during the next iteration of "
|
||||
"the periodic deletion task", share_instance_id
|
||||
)
|
||||
self.db.update_share_instance_quota_usages(
|
||||
context, share_instance_id)
|
||||
return
|
||||
except Exception:
|
||||
LOG.warning(
|
||||
"Error occured during quota usage update. Administrator "
|
||||
"must rectify quotas.")
|
||||
return
|
||||
if error_state == constants.STATUS_ERROR_DEFERRED_DELETING and (
|
||||
not force):
|
||||
return
|
||||
|
||||
try:
|
||||
self.driver.delete_share(context, share_instance,
|
||||
@ -3594,10 +3617,14 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
LOG.error(msg, share_instance_id)
|
||||
exc_context.reraise = False
|
||||
else:
|
||||
error_state = constants.STATUS_ERROR_DELETING
|
||||
if deferred_delete:
|
||||
error_state = constants.STATUS_ERROR_DEFERRED_DELETING
|
||||
exc_context.reraise = False
|
||||
self.db.share_instance_update(
|
||||
context,
|
||||
share_instance_id,
|
||||
{'status': constants.STATUS_ERROR_DELETING})
|
||||
{'status': error_state})
|
||||
self.message_api.create(
|
||||
context,
|
||||
message_field.Action.DELETE,
|
||||
@ -3606,6 +3633,10 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
resource_id=share_instance_id,
|
||||
exception=excep)
|
||||
|
||||
if error_state == constants.STATUS_ERROR_DEFERRED_DELETING and (
|
||||
not force):
|
||||
return
|
||||
|
||||
need_to_update_usages = True
|
||||
if share_instance['status'] in (
|
||||
constants.STATUS_DEFERRED_DELETING,
|
||||
@ -3642,27 +3673,6 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
else:
|
||||
self.delete_share_server(context, share_server)
|
||||
|
||||
def _get_share_instances_with_deferred_deletion(self, ctxt):
|
||||
share_instances = self.db.share_instance_get_all(
|
||||
ctxt,
|
||||
filters={
|
||||
'status': constants.STATUS_DEFERRED_DELETING,
|
||||
'host': self.host,
|
||||
})
|
||||
|
||||
share_instances_error_deferred_deleting = (
|
||||
self.db.share_instance_get_all(
|
||||
ctxt,
|
||||
filters={
|
||||
'status': constants.STATUS_ERROR_DEFERRED_DELETING,
|
||||
'host': self.host,
|
||||
}))
|
||||
updated_del = timeutils.utcnow() - datetime.timedelta(minutes=30)
|
||||
for share_instance in share_instances_error_deferred_deleting:
|
||||
if share_instance.get('updated_at') < updated_del:
|
||||
share_instances.append(share_instance)
|
||||
return share_instances
|
||||
|
||||
@periodic_task.periodic_task(
|
||||
spacing=CONF.periodic_deferred_delete_interval)
|
||||
@utils.require_driver_initialized
|
||||
@ -3670,8 +3680,13 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
LOG.debug("Checking for shares in 'deferred_deleting' status to "
|
||||
"process their deletion.")
|
||||
ctxt = ctxt.elevated()
|
||||
share_instances = (
|
||||
self._get_share_instances_with_deferred_deletion(ctxt))
|
||||
share_instances = self.db.share_instance_get_all(
|
||||
ctxt,
|
||||
filters={
|
||||
'status': constants.STATUS_ERROR_DEFERRED_DELETING,
|
||||
'host': self.host,
|
||||
},
|
||||
)
|
||||
|
||||
for share_instance in share_instances:
|
||||
share_instance_id = share_instance['id']
|
||||
@ -3681,6 +3696,28 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
share_instance_id
|
||||
)
|
||||
)
|
||||
|
||||
snap_instances = (
|
||||
self.db.share_snapshot_instance_get_all_with_filters(
|
||||
ctxt, {'share_instance_ids': share_instance_id}))
|
||||
if snap_instances:
|
||||
LOG.warning("Snapshot instances are present for the "
|
||||
"share instance: %s.", share_instance_id)
|
||||
continue
|
||||
|
||||
try:
|
||||
self.access_helper.update_access_rules(
|
||||
ctxt,
|
||||
share_instance_id,
|
||||
delete_all_rules=True,
|
||||
share_server=share_server
|
||||
)
|
||||
except Exception:
|
||||
msg = ("The driver was unable to delete access rules "
|
||||
"for the instance: %s.")
|
||||
LOG.error(msg, share_instance_id)
|
||||
continue
|
||||
|
||||
try:
|
||||
self.driver.delete_share(ctxt, share_instance,
|
||||
share_server=share_server)
|
||||
@ -3691,10 +3728,6 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
msg = ("The driver was unable to delete the share "
|
||||
"instance: %s on the backend. ")
|
||||
LOG.error(msg, share_instance_id)
|
||||
self.db.share_instance_update(
|
||||
ctxt,
|
||||
share_instance_id,
|
||||
{'status': constants.STATUS_ERROR_DEFERRED_DELETING})
|
||||
continue
|
||||
|
||||
self.db.share_instance_delete(ctxt, share_instance_id)
|
||||
@ -3867,8 +3900,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
self.db.share_snapshot_instance_update(
|
||||
context, snapshot_instance_id, model_update)
|
||||
|
||||
def _delete_snapshot_quota(self, context, snapshot,
|
||||
deferred_delete=False):
|
||||
def _delete_snapshot_quota(self, context, snapshot):
|
||||
share_type_id = snapshot['share']['instance']['share_type_id']
|
||||
reservations = None
|
||||
try:
|
||||
@ -3908,6 +3940,19 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
|
||||
share_ref = self.db.share_get(context, snapshot_ref['share_id'])
|
||||
|
||||
if deferred_delete:
|
||||
try:
|
||||
self._delete_snapshot_quota(context, snapshot_ref)
|
||||
LOG.info(
|
||||
"Snapshot instance %s had its deletion deferred. Quota "
|
||||
"was reclaimed and the share driver will proceed with "
|
||||
"the deletion.", snapshot_instance['id']
|
||||
)
|
||||
except Exception:
|
||||
LOG.warning(
|
||||
"Error occured during quota usage update. Administrator "
|
||||
"must rectify quotas.")
|
||||
|
||||
if share_ref['mount_snapshot_support']:
|
||||
try:
|
||||
self.snapshot_access_helper.update_access_rules(
|
||||
@ -3921,23 +3966,6 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
"for snapshot %s. Moving on.",
|
||||
snapshot_instance['snapshot_id'])
|
||||
|
||||
if deferred_delete:
|
||||
try:
|
||||
LOG.info(
|
||||
"Snapshot instance %s has been added to a deferred "
|
||||
"deletion queue and will be deleted during the next "
|
||||
"iteration of the periodic deletion task",
|
||||
snapshot_instance['id']
|
||||
)
|
||||
self._delete_snapshot_quota(
|
||||
context, snapshot_ref, deferred_delete=True)
|
||||
return
|
||||
except Exception:
|
||||
LOG.warning(
|
||||
"Error occured during quota usage update. Administrator "
|
||||
"must rectify quotas.")
|
||||
return
|
||||
|
||||
try:
|
||||
self.driver.delete_snapshot(context, snapshot_instance,
|
||||
share_server=share_server)
|
||||
@ -3952,10 +3980,14 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
LOG.exception(msg, snapshot_id)
|
||||
exc.reraise = False
|
||||
else:
|
||||
error_state = constants.STATUS_ERROR_DELETING
|
||||
if deferred_delete:
|
||||
error_state = constants.STATUS_ERROR_DEFERRED_DELETING
|
||||
exc.reraise = False
|
||||
self.db.share_snapshot_instance_update(
|
||||
context,
|
||||
snapshot_instance_id,
|
||||
{'status': constants.STATUS_ERROR_DELETING})
|
||||
{'status': error_state})
|
||||
self.message_api.create(
|
||||
context,
|
||||
message_field.Action.DELETE,
|
||||
@ -3964,21 +3996,18 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
resource_id=snapshot_instance_id,
|
||||
exception=excep)
|
||||
|
||||
snapshot_instance = self.db.share_snapshot_instance_get(
|
||||
context, snapshot_ref.instance['id'])
|
||||
if snapshot_instance['status'] == (
|
||||
constants.STATUS_ERROR_DEFERRED_DELETING) and not force:
|
||||
return
|
||||
|
||||
self.db.share_snapshot_instance_delete(context, snapshot_instance_id)
|
||||
self._delete_snapshot_quota(context, snapshot_ref)
|
||||
|
||||
def _get_snapshot_instances_with_deletion_deferred(self, ctxt):
|
||||
snap_instances = self.db.share_snapshot_instance_get_all_with_filters(
|
||||
ctxt, {'statuses': constants.STATUS_DEFERRED_DELETING})
|
||||
|
||||
snap_instances_error_deferred_deleting = \
|
||||
self.db.share_snapshot_instance_get_all_with_filters(
|
||||
ctxt, {'statuses': constants.STATUS_ERROR_DEFERRED_DELETING})
|
||||
updated_del = timeutils.utcnow() - datetime.timedelta(minutes=30)
|
||||
for snap_instance in snap_instances_error_deferred_deleting:
|
||||
if snap_instance.get('updated_at') < updated_del:
|
||||
snap_instances.append(snap_instance)
|
||||
return snap_instances
|
||||
if snapshot_instance['status'] not in (
|
||||
constants.STATUS_DEFERRED_DELETING,
|
||||
constants.STATUS_ERROR_DEFERRED_DELETING
|
||||
):
|
||||
self._delete_snapshot_quota(context, snapshot_ref)
|
||||
|
||||
@periodic_task.periodic_task(
|
||||
spacing=CONF.periodic_deferred_delete_interval)
|
||||
@ -3988,7 +4017,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
"process their deletion.")
|
||||
ctxt = ctxt.elevated()
|
||||
snapshot_instances = (
|
||||
self._get_snapshot_instances_with_deletion_deferred(ctxt))
|
||||
self.db.share_snapshot_instance_get_all_with_filters(
|
||||
ctxt, {'statuses': constants.STATUS_ERROR_DEFERRED_DELETING}))
|
||||
|
||||
for snapshot_instance in snapshot_instances:
|
||||
snapshot_instance_id = snapshot_instance['id']
|
||||
@ -4001,10 +4031,6 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
self.driver.delete_snapshot(ctxt, snapshot_instance,
|
||||
share_server=share_server)
|
||||
except Exception:
|
||||
self.db.share_snapshot_instance_update(
|
||||
ctxt,
|
||||
snapshot_instance_id,
|
||||
{'status': constants.STATUS_ERROR_DEFERRED_DELETING})
|
||||
continue
|
||||
self.db.share_snapshot_instance_delete(ctxt,
|
||||
snapshot_instance_id)
|
||||
|
@ -2214,11 +2214,13 @@ class ShareManagerTestCase(test.TestCase):
|
||||
share_id = 'FAKE_SHARE_ID'
|
||||
share = fakes.fake_share(id=share_id)
|
||||
snapshot_instance = fakes.fake_snapshot_instance(
|
||||
share_id=share_id, share=share, name='fake_snapshot')
|
||||
share_id=share_id, share=share, name='fake_snapshot',
|
||||
status=constants.STATUS_DEFERRED_DELETING)
|
||||
snapshot = fakes.fake_snapshot(
|
||||
share_id=share_id, share=share, instance=snapshot_instance,
|
||||
project_id=self.context.project_id, size=1)
|
||||
snapshot_id = snapshot['id']
|
||||
|
||||
self.mock_object(self.share_manager.db, 'share_snapshot_get',
|
||||
mock.Mock(return_value=snapshot))
|
||||
self.mock_object(self.share_manager.db, 'share_snapshot_instance_get',
|
||||
@ -2227,6 +2229,8 @@ class ShareManagerTestCase(test.TestCase):
|
||||
mock.Mock(return_value=share))
|
||||
self.mock_object(self.share_manager, '_get_share_server',
|
||||
mock.Mock(return_value=None))
|
||||
self.mock_object(
|
||||
self.share_manager.db, 'share_snapshot_instance_delete')
|
||||
|
||||
self.mock_object(self.share_manager.driver, 'delete_snapshot')
|
||||
self.mock_object(quota.QUOTAS, 'reserve',
|
||||
@ -2236,12 +2240,14 @@ class ShareManagerTestCase(test.TestCase):
|
||||
self.share_manager.delete_snapshot(self.context, snapshot_id,
|
||||
deferred_delete=True)
|
||||
|
||||
self.share_manager.driver.delete_snapshot.assert_not_called()
|
||||
self.share_manager.db.share_snapshot_instance_delete.assert_called()
|
||||
self.share_manager.driver.delete_snapshot.assert_called()
|
||||
quota.QUOTAS.reserve.assert_called_once_with(
|
||||
mock.ANY, project_id=self.context.project_id, snapshots=-1,
|
||||
snapshot_gigabytes=-snapshot['size'], user_id=snapshot['user_id'],
|
||||
share_type_id=share['instance']['share_type_id'])
|
||||
self.assertEqual(False, quota_commit_call.called)
|
||||
self.assertTrue(self.share_manager.driver.delete_snapshot.called)
|
||||
|
||||
@ddt.data(True, False)
|
||||
def test_delete_snapshot_with_quota_error(self, quota_error):
|
||||
@ -2345,8 +2351,7 @@ class ShareManagerTestCase(test.TestCase):
|
||||
resource_id=snapshot_instance['id'],
|
||||
exception=mock.ANY)
|
||||
|
||||
@ddt.data(True, False)
|
||||
def test_do_deferred_snapshot_deletion(self, consider_error_deleting):
|
||||
def test_do_deferred_snapshot_deletion(self):
|
||||
instance_1 = db_utils.create_share_instance(
|
||||
share_id='fake_id',
|
||||
share_type_id='fake_type_id')
|
||||
@ -2360,14 +2365,10 @@ class ShareManagerTestCase(test.TestCase):
|
||||
db_utils.create_snapshot_instance(
|
||||
snapshot_id=snapshot['id'],
|
||||
share_instance_id=instance_1['id'],
|
||||
status='deferred_deleting')
|
||||
mins = 20
|
||||
if consider_error_deleting:
|
||||
mins = 40
|
||||
status='error_deferred_deleting')
|
||||
db_utils.create_snapshot_instance(
|
||||
snapshot_id=snapshot['id'],
|
||||
share_instance_id=instance_2['id'],
|
||||
updated_at=timeutils.utcnow() - datetime.timedelta(minutes=mins),
|
||||
status='error_deferred_deleting')
|
||||
|
||||
self.mock_object(self.share_manager, '_get_share_server',
|
||||
@ -2377,10 +2378,7 @@ class ShareManagerTestCase(test.TestCase):
|
||||
self.mock_object(self.share_manager.db,
|
||||
'share_snapshot_instance_update')
|
||||
self.share_manager.do_deferred_snapshot_deletion(self.context)
|
||||
if consider_error_deleting:
|
||||
self.assertEqual(2, mock_delete_snapshot.call_count)
|
||||
else:
|
||||
self.assertEqual(1, mock_delete_snapshot.call_count)
|
||||
self.assertEqual(2, mock_delete_snapshot.call_count)
|
||||
|
||||
def test_do_deferred_snapshot_deletion_exception(self):
|
||||
instance_1 = db_utils.create_share_instance(
|
||||
@ -2390,10 +2388,10 @@ class ShareManagerTestCase(test.TestCase):
|
||||
id='fake_id',
|
||||
instances=[instance_1])
|
||||
snapshot = db_utils.create_snapshot(share_id=share['id'])
|
||||
si = db_utils.create_snapshot_instance(
|
||||
db_utils.create_snapshot_instance(
|
||||
snapshot_id=snapshot['id'],
|
||||
share_instance_id=instance_1['id'],
|
||||
status='deferred_deleting')
|
||||
status='error_deferred_deleting')
|
||||
|
||||
self.mock_object(self.share_manager, '_get_share_server',
|
||||
mock.Mock(return_value=None))
|
||||
@ -2407,9 +2405,6 @@ class ShareManagerTestCase(test.TestCase):
|
||||
'share_snapshot_instance_delete')
|
||||
|
||||
self.share_manager.do_deferred_snapshot_deletion(self.context)
|
||||
self.share_manager.db.share_snapshot_instance_update.assert_any_call(
|
||||
mock.ANY, si['id'],
|
||||
{'status': constants.STATUS_ERROR_DEFERRED_DELETING})
|
||||
mock_delete_snapshot_db.assert_not_called()
|
||||
|
||||
def test_create_share_instance_with_share_network_dhss_false(self):
|
||||
@ -3965,17 +3960,17 @@ class ShareManagerTestCase(test.TestCase):
|
||||
host=self.share_manager.host
|
||||
)
|
||||
share_type = db_utils.create_share_type()
|
||||
share = db_utils.create_share(share_network_id=share_net['id'],
|
||||
share_server_id=share_srv['id'],
|
||||
share_type_id=share_type['id'])
|
||||
share = db_utils.create_share(
|
||||
share_network_id=share_net['id'],
|
||||
share_server_id=share_srv['id'],
|
||||
share_type_id=share_type['id'],
|
||||
status=constants.STATUS_DEFERRED_DELETING)
|
||||
share_srv = db.share_server_get(self.context, share_srv['id'])
|
||||
|
||||
manager.CONF.delete_share_server_with_last_share = False
|
||||
self.share_manager.driver = mock.Mock()
|
||||
self.mock_object(db, 'share_server_get',
|
||||
mock.Mock(return_value=share_srv))
|
||||
mock_access_helper_call = self.mock_object(
|
||||
self.share_manager.access_helper, 'update_access_rules')
|
||||
|
||||
self.mock_object(
|
||||
quota.QUOTAS, 'reserve',
|
||||
@ -3984,15 +3979,12 @@ class ShareManagerTestCase(test.TestCase):
|
||||
self.mock_object(manager.LOG, 'exception')
|
||||
self.mock_object(self.share_manager.db, 'share_instance_update',
|
||||
mock.Mock(return_value=None))
|
||||
self.mock_object(self.share_manager.driver, 'delete_share')
|
||||
|
||||
self.share_manager.delete_share_instance(self.context,
|
||||
share.instance['id'],
|
||||
deferred_delete=True)
|
||||
|
||||
mock_access_helper_call.assert_called_once_with(
|
||||
utils.IsAMatcher(context.RequestContext), share.instance['id'],
|
||||
delete_all_rules=True, share_server=share_srv)
|
||||
|
||||
reservation_params = {
|
||||
'gigabytes': -share['size'],
|
||||
'shares': -1,
|
||||
@ -4004,6 +3996,7 @@ class ShareManagerTestCase(test.TestCase):
|
||||
mock.ANY, **reservation_params,
|
||||
)
|
||||
self.assertFalse(quota.QUOTAS.commit.called)
|
||||
self.assertTrue(self.share_manager.driver.delete_share.called)
|
||||
self.assertFalse(self.share_manager.driver.teardown_network.called)
|
||||
|
||||
def test_delete_share_instance_deferred_delete(self):
|
||||
@ -4012,17 +4005,17 @@ class ShareManagerTestCase(test.TestCase):
|
||||
host=self.share_manager.host
|
||||
)
|
||||
share_type = db_utils.create_share_type()
|
||||
share = db_utils.create_share(share_network_id=share_net['id'],
|
||||
share_server_id=share_srv['id'],
|
||||
share_type_id=share_type['id'])
|
||||
share = db_utils.create_share(
|
||||
share_network_id=share_net['id'],
|
||||
share_server_id=share_srv['id'],
|
||||
share_type_id=share_type['id'],
|
||||
status=constants.STATUS_DEFERRED_DELETING)
|
||||
share_srv = db.share_server_get(self.context, share_srv['id'])
|
||||
|
||||
manager.CONF.delete_share_server_with_last_share = False
|
||||
self.share_manager.driver = mock.Mock()
|
||||
self.mock_object(db, 'share_server_get',
|
||||
mock.Mock(return_value=share_srv))
|
||||
mock_access_helper_call = self.mock_object(
|
||||
self.share_manager.access_helper, 'update_access_rules')
|
||||
|
||||
self.mock_object(quota.QUOTAS, 'reserve',
|
||||
mock.Mock(return_value='fake_reservation'))
|
||||
@ -4030,15 +4023,12 @@ class ShareManagerTestCase(test.TestCase):
|
||||
self.mock_object(manager.LOG, 'exception')
|
||||
self.mock_object(self.share_manager.db, 'share_instance_update',
|
||||
mock.Mock(return_value=None))
|
||||
self.mock_object(self.share_manager.driver, 'delete_share')
|
||||
|
||||
self.share_manager.delete_share_instance(self.context,
|
||||
share.instance['id'],
|
||||
deferred_delete=True)
|
||||
|
||||
mock_access_helper_call.assert_called_once_with(
|
||||
utils.IsAMatcher(context.RequestContext), share.instance['id'],
|
||||
delete_all_rules=True, share_server=share_srv)
|
||||
|
||||
reservation_params = {
|
||||
'gigabytes': -share['size'],
|
||||
'shares': -1,
|
||||
@ -4053,10 +4043,10 @@ class ShareManagerTestCase(test.TestCase):
|
||||
mock.ANY, mock.ANY, project_id=share['project_id'],
|
||||
share_type_id=share_type['id'], user_id=share['user_id'],
|
||||
)
|
||||
self.assertTrue(self.share_manager.driver.delete_share.called)
|
||||
self.assertFalse(self.share_manager.driver.teardown_network.called)
|
||||
|
||||
@ddt.data(True, False)
|
||||
def test_do_deferred_share_deletion(self, consider_error_deleting):
|
||||
def test_do_deferred_share_deletion(self):
|
||||
share = db_utils.create_share_without_instance(
|
||||
id='fake_id',
|
||||
status=constants.STATUS_AVAILABLE)
|
||||
@ -4079,19 +4069,6 @@ class ShareManagerTestCase(test.TestCase):
|
||||
'host': self.host,
|
||||
}
|
||||
si_2 = db_utils.create_share_instance(**kwargs)
|
||||
mins = 20
|
||||
if consider_error_deleting:
|
||||
mins = 40
|
||||
kwargs = {
|
||||
'id': 3,
|
||||
'share_id': share['id'],
|
||||
'share_server_id': share_server['id'],
|
||||
'status': 'error_deferred_deleting',
|
||||
'updated_at': (
|
||||
timeutils.utcnow() - datetime.timedelta(minutes=mins)),
|
||||
'host': self.host,
|
||||
}
|
||||
si_3 = db_utils.create_share_instance(**kwargs)
|
||||
|
||||
self.mock_object(self.share_manager.db, 'share_server_get',
|
||||
mock.Mock(return_value=share_server))
|
||||
@ -4100,19 +4077,14 @@ class ShareManagerTestCase(test.TestCase):
|
||||
self.mock_object(self.share_manager.db, 'share_instance_delete')
|
||||
self.mock_object(
|
||||
self.share_manager.db, 'share_instance_get_all',
|
||||
mock.Mock(side_effect=[
|
||||
[si_1, si_2],
|
||||
[si_3] if consider_error_deleting else {}]))
|
||||
mock.Mock(return_value=[si_1, si_2]))
|
||||
self.mock_object(self.share_manager, '_check_delete_share_server')
|
||||
self.mock_object(self.share_manager, '_notify_about_share_usage')
|
||||
mock_delete_share = self.mock_object(
|
||||
self.share_manager.driver, 'delete_share')
|
||||
|
||||
self.share_manager.do_deferred_share_deletion(self.context)
|
||||
if consider_error_deleting:
|
||||
self.assertEqual(3, mock_delete_share.call_count)
|
||||
else:
|
||||
self.assertEqual(2, mock_delete_share.call_count)
|
||||
self.assertEqual(2, mock_delete_share.call_count)
|
||||
|
||||
def test_do_deferred_share_deletion_exception(self):
|
||||
share = db_utils.create_share_without_instance(
|
||||
@ -4144,9 +4116,6 @@ class ShareManagerTestCase(test.TestCase):
|
||||
mock.Mock(side_effect=exception.ManilaException))
|
||||
|
||||
self.share_manager.do_deferred_share_deletion(self.context)
|
||||
self.share_manager.db.share_instance_update.assert_any_call(
|
||||
mock.ANY, si['id'],
|
||||
{'status': constants.STATUS_ERROR_DEFERRED_DELETING})
|
||||
mock_delete.assert_not_called()
|
||||
|
||||
def test_setup_server(self):
|
||||
|
@ -0,0 +1,10 @@
|
||||
---
|
||||
fixes:
|
||||
- |
|
||||
When deferred deletion is enabled, Manila will attempt regular deletion.
|
||||
While quota is released, and these objects (shares, snapshots) are hidden
|
||||
from the user. Any error during deferred deletion will put resource in
|
||||
`error_deferred_deleting` state. After that, it will be handled in
|
||||
periodic task. Also, Manila will make sure snapshot instances are deleted
|
||||
before share instance delete. For more details, please check
|
||||
Launchpad `bug 2068043 <https://bugs.launchpad.net/manila/+bug/2068043>`_
|
Loading…
Reference in New Issue
Block a user