diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 88c9789e7c..072987db4a 100755 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -242,6 +242,10 @@ function configure_manila { iniset $MANILA_CONF DEFAULT driver_restore_continue_update_interval $MANILA_RESTORE_BACKUP_CONTINUE_TASK_INTERVAL fi + if ! [[ -z $MANILA_DEFERRED_DELETE_TASK_INTERVAL ]]; then + iniset $MANILA_CONF DEFAULT periodic_deferred_delete_interval $MANILA_DEFERRED_DELETE_TASK_INTERVAL + fi + if ! [[ -z $MANILA_DATA_COPY_CHECK_HASH ]]; then iniset $MANILA_CONF DEFAULT check_hash $MANILA_DATA_COPY_CHECK_HASH fi diff --git a/devstack/settings b/devstack/settings index cfc201d275..a5d0f44a03 100644 --- a/devstack/settings +++ b/devstack/settings @@ -185,6 +185,9 @@ MANILA_NEUTRON_VNIC_TYPE=${MANILA_NEUTRON_VNIC_TYPE:-"normal"} # SSH TIMEOUT MANILA_SSH_TIMEOUT=${MANILA_SSH_TIMEOUT:-180} +# Share and snapshot deferred deletion task interval +MANILA_DEFERRED_DELETE_TASK_INTERVAL=${MANILA_DEFERRED_DELETE_TASK_INTERVAL:-10} + # Admin Network setup MANILA_ADMIN_NET_RANGE=${MANILA_ADMIN_NET_RANGE:=10.2.5.0/24} diff --git a/doc/source/contributor/samples/dummy_local.conf b/doc/source/contributor/samples/dummy_local.conf index a9e3062dd7..606decc76c 100644 --- a/doc/source/contributor/samples/dummy_local.conf +++ b/doc/source/contributor/samples/dummy_local.conf @@ -31,6 +31,7 @@ MANILA_SERVICE_IMAGE_ENABLED=false MANILA_SHARE_MIGRATION_PERIOD_TASK_INTERVAL=1 MANILA_SERVER_MIGRATION_PERIOD_TASK_INTERVAL=10 MANILA_REPLICA_STATE_UPDATE_INTERVAL=10 +MANILA_DEFERRED_DELETE_TASK_INTERVAL=10 MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS='snapshot_support=True create_share_from_snapshot_support=True revert_to_snapshot_support=True mount_snapshot_support=True' MANILA_ENABLED_BACKENDS=buenosaires,saopaulo,lima,bogota MANILA_OPTGROUP_buenosaires_driver_handles_share_servers=false diff --git a/manila/common/constants.py b/manila/common/constants.py index 45a750a091..7ac4b5f1a0 100644 --- a/manila/common/constants.py +++ b/manila/common/constants.py @@ -23,9 +23,11 @@ DB_DISPLAY_FIELDS_MAX_LENGTH = 255 STATUS_CREATING = 'creating' STATUS_CREATING_FROM_SNAPSHOT = 'creating_from_snapshot' STATUS_DELETING = 'deleting' +STATUS_DEFERRED_DELETING = 'deferred_deleting' STATUS_DELETED = 'deleted' STATUS_ERROR = 'error' STATUS_ERROR_DELETING = 'error_deleting' +STATUS_ERROR_DEFERRED_DELETING = 'error_deferred_deleting' STATUS_AVAILABLE = 'available' STATUS_INACTIVE = 'inactive' STATUS_MANAGING = 'manage_starting' diff --git a/manila/db/api.py b/manila/db/api.py index 78bc2be16b..177d46a2fc 100644 --- a/manila/db/api.py +++ b/manila/db/api.py @@ -322,6 +322,11 @@ def share_instance_delete(context, instance_id, session=None, need_to_update_usages=need_to_update_usages) +def update_share_instance_quota_usages(context, instance_id, session=None): + """Update share instance quota usages""" + return IMPL.update_share_instance_quota_usages(context, instance_id) + + def share_instance_update(context, instance_id, values, with_share_data=False): """Update share instance fields.""" return IMPL.share_instance_update(context, instance_id, values, diff --git a/manila/db/sqlalchemy/api.py b/manila/db/sqlalchemy/api.py index c3f1c1ef3f..c2925ecd4a 100644 --- a/manila/db/sqlalchemy/api.py +++ b/manila/db/sqlalchemy/api.py @@ -1841,6 +1841,10 @@ def share_instance_get_all(context, filters=None, session=None): query = query.filter( models.ShareInstance.share_server_id == share_server_id) + status = filters.get('status') + if status: + query = query.filter(models.ShareInstance.status == status) + # Returns list of share instances that satisfy filters. query = query.all() return query @@ -1848,9 +1852,12 @@ def share_instance_get_all(context, filters=None, session=None): @require_context def _update_share_instance_usages(context, share, instance_ref, - is_replica=False): + is_replica=False, + deferred_delete=False): deltas = {} - no_instances_remain = len(share.instances) == 0 + # if share is expected to be deferred_deleted, we drop its quotas + # whether or not it has additional share instances + no_instances_remain = deferred_delete or len(share.instances) == 0 share_usages_to_release = {"shares": -1, "gigabytes": -share['size']} replica_usages_to_release = {"share_replicas": -1, "replica_gigabytes": -share['size']} @@ -1916,7 +1923,24 @@ def share_instance_delete(context, instance_id, session=None, if need_to_update_usages: _update_share_instance_usages(context, share, instance_ref, - is_replica=is_replica) + is_replica=is_replica, + deferred_delete=False) + + +@require_context +def update_share_instance_quota_usages(context, instance_id, session=None): + # This method is specifically written for deferred deletion share + # instance usage. + session = session or get_session() + + with session.begin(): + instance_ref = share_instance_get(context, instance_id, + session=session) + is_replica = instance_ref['replica_state'] is not None + share = share_get(context, instance_ref['share_id'], session=session) + _update_share_instance_usages(context, share, instance_ref, + is_replica=is_replica, + deferred_delete=True) def _set_instances_share_data(context, instances, session): @@ -2240,6 +2264,13 @@ def _process_share_filters(query, filters, project_id=None, is_public=False): query = query.filter(and_(models.ShareTypeExtraSpecs.key == k, models.ShareTypeExtraSpecs.value == v)) + if not filters.get('list_deferred_delete'): + query = query.filter(and_( + models.ShareInstance.status != ( + constants.STATUS_DEFERRED_DELETING), + models.ShareInstance.status != ( + constants.STATUS_ERROR_DEFERRED_DELETING))) + return query @@ -3531,6 +3562,13 @@ def _share_snapshot_get_all_with_filters(context, project_id=None, query = exact_filter(query, models.ShareSnapshot, filters, legal_filter_keys) + if not filters.get('list_deferred_delete'): + query = query.filter(and_( + models.ShareSnapshotInstance.status != ( + constants.STATUS_DEFERRED_DELETING), + models.ShareSnapshotInstance.status != ( + constants.STATUS_ERROR_DEFERRED_DELETING))) + query = apply_sorting(models.ShareSnapshot, query, sort_key, sort_dir) count = None diff --git a/manila/policies/share_snapshot.py b/manila/policies/share_snapshot.py index 49633ad504..7720e89c68 100644 --- a/manila/policies/share_snapshot.py +++ b/manila/policies/share_snapshot.py @@ -94,7 +94,12 @@ deprecated_get_snapshot_metadata = policy.DeprecatedRule( deprecated_reason=DEPRECATED_REASON, deprecated_since='ZED' ) - +deprecated_list_snapshots_in_deferred_deletion_states = policy.DeprecatedRule( + name=BASE_POLICY_NAME % 'list_snapshots_in_deferred_deletion_states', + check_str=base.RULE_ADMIN_API, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='2023.2/Bobcat' +) share_snapshot_policies = [ policy.DocumentedRuleDefault( @@ -269,6 +274,19 @@ share_snapshot_policies = [ ], deprecated_rule=deprecated_get_snapshot_metadata ), + policy.DocumentedRuleDefault( + name=BASE_POLICY_NAME % 'list_snapshots_in_deferred_deletion_states', + check_str=base.ADMIN, + scope_types=['project'], + description="List share snapshots whose deletion has been deferred", + operations=[ + { + 'method': 'GET', + 'path': '/v2/snapshots', + } + ], + deprecated_rule=deprecated_list_snapshots_in_deferred_deletion_states + ), ] diff --git a/manila/policies/shares.py b/manila/policies/shares.py index 43a367827a..bc3eacf8ea 100644 --- a/manila/policies/shares.py +++ b/manila/policies/shares.py @@ -220,6 +220,13 @@ deprecated_update_admin_only_metadata = policy.DeprecatedRule( deprecated_since="YOGA" ) +deprecated_list_shares_in_deferred_deletion_states = policy.DeprecatedRule( + name=BASE_POLICY_NAME % 'list_shares_in_deferred_deletion_states', + check_str=base.RULE_ADMIN_API, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='2023.2/Bobcat' +) + shares_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'create', @@ -658,6 +665,20 @@ shares_policies = [ ], deprecated_rule=deprecated_share_get_metadata ), + policy.DocumentedRuleDefault( + name=BASE_POLICY_NAME % 'list_shares_in_deferred_deletion_states', + check_str=base.ADMIN, + scope_types=['project'], + description="List shares whose deletion has been deferred", + operations=[ + { + 'method': 'GET', + 'path': '/v2/shares', + }, + ], + deprecated_rule=deprecated_list_shares_in_deferred_deletion_states + ), + ] # NOTE(gouthamr) For historic reasons, some snapshot policies used diff --git a/manila/share/api.py b/manila/share/api.py index bce1fbeb56..1a1be8e542 100644 --- a/manila/share/api.py +++ b/manila/share/api.py @@ -56,7 +56,14 @@ share_api_opts = [ 'When enabling this option make sure that filter ' 'CreateFromSnapshotFilter is enabled and to have hosts ' 'reporting replication_domain option.' - ) + ), + cfg.BoolOpt('is_deferred_deletion_enabled', + default=False, + help='Whether to delete shares and share snapshots in a ' + 'deferred manner. Setting this option to True will cause ' + 'quotas to be released immediately if a deletion request ' + 'is accepted. Deletions may eventually fail, and ' + 'rectifying them will require manual intervention.'), ] CONF = cfg.CONF @@ -1449,18 +1456,29 @@ class API(base.Base): statuses = (constants.STATUS_AVAILABLE, constants.STATUS_ERROR, constants.STATUS_INACTIVE) if not (force or share_instance['status'] in statuses): - msg = _("Share instance status must be one of %(statuses)s") % { + msg = _("Share instance status must be one of %(statuses)s") % { "statuses": statuses} raise exception.InvalidShareInstance(reason=msg) - share_instance = self.db.share_instance_update( - context, share_instance['id'], - {'status': constants.STATUS_DELETING, - 'terminated_at': timeutils.utcnow()} - ) + deferred_delete = CONF.is_deferred_deletion_enabled + if force and deferred_delete: + deferred_delete = False - self.share_rpcapi.delete_share_instance(context, share_instance, - force=force) + current_status = share_instance['status'] + if current_status not in (constants.STATUS_DEFERRED_DELETING, + constants.STATUS_ERROR_DEFERRED_DELETING): + new_status = constants.STATUS_DELETING + if deferred_delete: + new_status = constants.STATUS_DEFERRED_DELETING + share_instance = self.db.share_instance_update( + context, share_instance['id'], + {'status': new_status, 'terminated_at': timeutils.utcnow()} + ) + + self.share_rpcapi.delete_share_instance( + context, share_instance, + force=force, + deferred_delete=deferred_delete) # NOTE(u_glide): 'updated_at' timestamp is used to track last usage of # share server. This is required for automatic share servers cleanup @@ -2159,10 +2177,17 @@ class API(base.Base): context, {'snapshot_ids': snapshot['id']}) ) + deferred_delete = CONF.is_deferred_deletion_enabled + if force and deferred_delete: + deferred_delete = False + + status = constants.STATUS_DELETING + if deferred_delete: + status = constants.STATUS_DEFERRED_DELETING + for snapshot_instance in snapshot_instances: self.db.share_snapshot_instance_update( - context, snapshot_instance['id'], - {'status': constants.STATUS_DELETING}) + context, snapshot_instance['id'], {'status': status}) if share['has_replicas']: self.share_rpcapi.delete_replicated_snapshot( @@ -2170,7 +2195,8 @@ class API(base.Base): share_id=share['id'], force=force) else: self.share_rpcapi.delete_snapshot( - context, snapshot, share['instance']['host'], force=force) + context, snapshot, share['instance']['host'], + force=force, deferred_delete=deferred_delete) @policy.wrap_check_policy('share') def update(self, context, share, fields): @@ -2259,6 +2285,15 @@ class API(base.Base): self.db.share_get_all_by_project_with_count if show_count else self.db.share_get_all_by_project)} + # check if user is querying with deferred states and forbid + # users that aren't authorized to query shares in these states + policy_str = "list_shares_in_deferred_deletion_states" + do_raise = ('status' in filters and 'deferred' in filters['status']) + show_deferred_deleted = policy.check_policy( + context, 'share', policy_str, do_raise=do_raise) + if show_deferred_deleted: + filters['list_deferred_delete'] = True + # Get filtered list of shares if 'host' in filters: policy.check_policy(context, 'share', 'list_by_host') @@ -2323,6 +2358,16 @@ class API(base.Base): "'%(v)s'.") % {'k': k, 'v': string_args[k]} raise exception.InvalidInput(reason=msg) + # check if user is querying with deferred states and forbid + # users that aren't authorized to query shares in these states + policy_str = "list_snapshots_in_deferred_deletion_states" + do_raise = ('status' in search_opts and + 'deferred' in search_opts['status']) + show_deferred_deleted = policy.check_policy( + context, 'share_snapshot', policy_str, do_raise=do_raise) + if show_deferred_deleted: + search_opts['list_deferred_delete'] = True + get_methods = { 'get_all': ( self.db.share_snapshot_get_all_with_count diff --git a/manila/share/manager.py b/manila/share/manager.py index d98b3929bd..e68f7458e0 100644 --- a/manila/share/manager.py +++ b/manila/share/manager.py @@ -151,7 +151,12 @@ share_manager_opts = [ default=60, help='This value, specified in seconds, determines how often ' 'the share manager will poll to perform the next steps ' - 'of restore such as fetch the progress of restore.') + 'of restore such as fetch the progress of restore.'), + cfg.IntOpt('periodic_deferred_delete_interval', + default=300, + help='This value, specified in seconds, determines how often ' + 'the share manager will try to delete the share and share ' + 'snapshots in backend driver.'), ] CONF = cfg.CONF @@ -259,7 +264,7 @@ def add_hooks(f): class ShareManager(manager.SchedulerDependentManager): """Manages NAS storages.""" - RPC_API_VERSION = '1.26' + RPC_API_VERSION = '1.27' def __init__(self, share_driver=None, service_name=None, *args, **kwargs): """Load the driver from args, or from flags.""" @@ -3508,16 +3513,20 @@ class ShareManager(manager.SchedulerDependentManager): msg_args = {'share': share_id, 'snap': snapshot_id} LOG.info(msg, msg_args) + def _get_share_details_from_instance(self, context, share_instance_id): + share_instance = self._get_share_instance(context, share_instance_id) + share = self.db.share_get(context, share_instance.get('share_id')) + share_server = self._get_share_server(context, share_instance) + return (share, share_instance, share_server) + @add_hooks @utils.require_driver_initialized - def delete_share_instance(self, context, share_instance_id, force=False): + def delete_share_instance(self, context, share_instance_id, force=False, + deferred_delete=False): """Delete a share instance.""" context = context.elevated() - share_instance = self._get_share_instance(context, share_instance_id) - share_id = share_instance.get('share_id') - share_server = self._get_share_server(context, share_instance) - share = self.db.share_get(context, share_id) - + share, share_instance, share_server = ( + self._get_share_details_from_instance(context, share_instance_id)) self._notify_about_share_usage(context, share, share_instance, "delete.start") @@ -3552,6 +3561,22 @@ class ShareManager(manager.SchedulerDependentManager): resource_id=share_instance_id, exception=excep) + if deferred_delete: + try: + LOG.info( + "Share instance %s has been added to a deferred deletion " + "queue and will be deleted during the next iteration of " + "the periodic deletion task", share_instance_id + ) + self.db.update_share_instance_quota_usages( + context, share_instance_id) + return + except Exception: + LOG.warning( + "Error occured during quota usage update. Administrator " + "must rectify quotas.") + return + try: self.driver.delete_share(context, share_instance, share_server=share_server) @@ -3581,8 +3606,16 @@ class ShareManager(manager.SchedulerDependentManager): resource_id=share_instance_id, exception=excep) + need_to_update_usages = True + if share_instance['status'] in ( + constants.STATUS_DEFERRED_DELETING, + constants.STATUS_ERROR_DEFERRED_DELETING + ): + need_to_update_usages = False + self.db.share_instance_delete( - context, share_instance_id, need_to_update_usages=True) + context, share_instance_id, + need_to_update_usages=need_to_update_usages) LOG.info("Share instance %s: deleted successfully.", share_instance_id) @@ -3609,6 +3642,62 @@ class ShareManager(manager.SchedulerDependentManager): else: self.delete_share_server(context, share_server) + def _get_share_instances_with_deferred_deletion(self, ctxt): + share_instances = self.db.share_instance_get_all( + ctxt, filters={'status': constants.STATUS_DEFERRED_DELETING}) + + share_instances_error_deferred_deleting = ( + self.db.share_instance_get_all( + ctxt, + filters={'status': constants.STATUS_ERROR_DEFERRED_DELETING})) + updated_del = timeutils.utcnow() - datetime.timedelta(minutes=30) + for share_instance in share_instances_error_deferred_deleting: + if share_instance.get('updated_at') < updated_del: + share_instances.append(share_instance) + return share_instances + + @periodic_task.periodic_task( + spacing=CONF.periodic_deferred_delete_interval) + @utils.require_driver_initialized + def do_deferred_share_deletion(self, ctxt): + LOG.debug("Checking for shares in 'deferred_deleting' status to " + "process their deletion.") + ctxt = ctxt.elevated() + share_instances = ( + self._get_share_instances_with_deferred_deletion(ctxt)) + + for share_instance in share_instances: + share_instance_id = share_instance['id'] + share, share_instance, share_server = ( + self._get_share_details_from_instance( + ctxt, + share_instance_id + ) + ) + try: + self.driver.delete_share(ctxt, share_instance, + share_server=share_server) + except exception.ShareResourceNotFound: + LOG.warning("Share instance %s does not exist in the " + "backend.", share_instance_id) + except Exception: + msg = ("The driver was unable to delete the share " + "instance: %s on the backend. ") + LOG.error(msg, share_instance_id) + self.db.share_instance_update( + ctxt, + share_instance_id, + {'status': constants.STATUS_ERROR_DEFERRED_DELETING}) + continue + + self.db.share_instance_delete(ctxt, share_instance_id) + LOG.info("Share instance %s: deferred deleted successfully.", + share_instance_id) + self._check_delete_share_server(ctxt, + share_instance=share_instance) + self._notify_about_share_usage(ctxt, share, + share_instance, "delete.end") + @periodic_task.periodic_task(spacing=600) @utils.require_driver_initialized def delete_free_share_servers(self, ctxt): @@ -3771,9 +3860,32 @@ class ShareManager(manager.SchedulerDependentManager): self.db.share_snapshot_instance_update( context, snapshot_instance_id, model_update) + def _delete_snapshot_quota(self, context, snapshot, + deferred_delete=False): + share_type_id = snapshot['share']['instance']['share_type_id'] + reservations = None + try: + reservations = QUOTAS.reserve( + context, project_id=snapshot['project_id'], snapshots=-1, + snapshot_gigabytes=-snapshot['size'], + user_id=snapshot['user_id'], + share_type_id=share_type_id, + ) + except Exception: + LOG.exception("Failed to update quota usages while deleting " + "snapshot %s.", snapshot['id']) + + if reservations: + QUOTAS.commit( + context, reservations, project_id=snapshot['project_id'], + user_id=snapshot['user_id'], + share_type_id=share_type_id, + ) + @add_hooks @utils.require_driver_initialized - def delete_snapshot(self, context, snapshot_id, force=False): + def delete_snapshot(self, context, snapshot_id, force=False, + deferred_delete=False): """Delete share snapshot.""" context = context.elevated() snapshot_ref = self.db.share_snapshot_get(context, snapshot_id) @@ -3784,11 +3896,6 @@ class ShareManager(manager.SchedulerDependentManager): context, snapshot_ref.instance['id'], with_share_data=True) snapshot_instance_id = snapshot_instance['id'] - if context.project_id != snapshot_ref['project_id']: - project_id = snapshot_ref['project_id'] - else: - project_id = context.project_id - snapshot_instance = self._get_snapshot_instance_dict( context, snapshot_instance) @@ -3807,6 +3914,23 @@ class ShareManager(manager.SchedulerDependentManager): "for snapshot %s. Moving on.", snapshot_instance['snapshot_id']) + if deferred_delete: + try: + LOG.info( + "Snapshot instance %s has been added to a deferred " + "deletion queue and will be deleted during the next " + "iteration of the periodic deletion task", + snapshot_instance['id'] + ) + self._delete_snapshot_quota( + context, snapshot_ref, deferred_delete=True) + return + except Exception: + LOG.warning( + "Error occured during quota usage update. Administrator " + "must rectify quotas.") + return + try: self.driver.delete_snapshot(context, snapshot_instance, share_server=share_server) @@ -3834,26 +3958,49 @@ class ShareManager(manager.SchedulerDependentManager): exception=excep) self.db.share_snapshot_instance_delete(context, snapshot_instance_id) + self._delete_snapshot_quota(context, snapshot_ref) - share_type_id = snapshot_ref['share']['instance']['share_type_id'] - try: - reservations = QUOTAS.reserve( - context, project_id=project_id, snapshots=-1, - snapshot_gigabytes=-snapshot_ref['size'], - user_id=snapshot_ref['user_id'], - share_type_id=share_type_id, - ) - except Exception: - reservations = None - LOG.exception("Failed to update quota usages while deleting " - "snapshot %s.", snapshot_id) + def _get_snapshot_instances_with_deletion_deferred(self, ctxt): + snap_instances = self.db.share_snapshot_instance_get_all_with_filters( + ctxt, {'statuses': constants.STATUS_DEFERRED_DELETING}) - if reservations: - QUOTAS.commit( - context, reservations, project_id=project_id, - user_id=snapshot_ref['user_id'], - share_type_id=share_type_id, - ) + snap_instances_error_deferred_deleting = \ + self.db.share_snapshot_instance_get_all_with_filters( + ctxt, {'statuses': constants.STATUS_ERROR_DEFERRED_DELETING}) + updated_del = timeutils.utcnow() - datetime.timedelta(minutes=30) + for snap_instance in snap_instances_error_deferred_deleting: + if snap_instance.get('updated_at') < updated_del: + snap_instances.append(snap_instance) + return snap_instances + + @periodic_task.periodic_task( + spacing=CONF.periodic_deferred_delete_interval) + @utils.require_driver_initialized + def do_deferred_snapshot_deletion(self, ctxt): + LOG.debug("Checking for snapshots in 'deferred_deleting' status to " + "process their deletion.") + ctxt = ctxt.elevated() + snapshot_instances = ( + self._get_snapshot_instances_with_deletion_deferred(ctxt)) + + for snapshot_instance in snapshot_instances: + snapshot_instance_id = snapshot_instance['id'] + share_server = self._get_share_server( + ctxt, snapshot_instance['share_instance']) + snapshot_instance = self._get_snapshot_instance_dict( + ctxt, snapshot_instance) + + try: + self.driver.delete_snapshot(ctxt, snapshot_instance, + share_server=share_server) + except Exception: + self.db.share_snapshot_instance_update( + ctxt, + snapshot_instance_id, + {'status': constants.STATUS_ERROR_DEFERRED_DELETING}) + continue + self.db.share_snapshot_instance_delete(ctxt, + snapshot_instance_id) @add_hooks @utils.require_driver_initialized diff --git a/manila/share/rpcapi.py b/manila/share/rpcapi.py index dd731cf8dd..93a02b46c3 100644 --- a/manila/share/rpcapi.py +++ b/manila/share/rpcapi.py @@ -87,6 +87,7 @@ class ShareAPI(object): 1.25 - Add transfer_accept() 1.26 - Add create_backup() and delete_backup() restore_backup() methods + 1.27 - Update delete_share_instance() and delete_snapshot() methods """ BASE_RPC_API_VERSION = '1.0' @@ -95,7 +96,7 @@ class ShareAPI(object): super(ShareAPI, self).__init__() target = messaging.Target(topic=CONF.share_topic, version=self.BASE_RPC_API_VERSION) - self.client = rpc.get_client(target, version_cap='1.26') + self.client = rpc.get_client(target, version_cap='1.27') def create_share_instance(self, context, share_instance, host, request_spec, filter_properties, @@ -163,13 +164,15 @@ class ShareAPI(object): snapshot_id=snapshot['id'], reservations=reservations) - def delete_share_instance(self, context, share_instance, force=False): + def delete_share_instance(self, context, share_instance, force=False, + deferred_delete=False): host = utils.extract_host(share_instance['host']) - call_context = self.client.prepare(server=host, version='1.4') + call_context = self.client.prepare(server=host, version='1.27') call_context.cast(context, 'delete_share_instance', share_instance_id=share_instance['id'], - force=force) + force=force, + deferred_delete=deferred_delete) def migration_start(self, context, share, dest_host, force_host_assisted_migration, preserve_metadata, @@ -270,13 +273,15 @@ class ShareAPI(object): share_id=share['id'], snapshot_id=snapshot['id']) - def delete_snapshot(self, context, snapshot, host, force=False): + def delete_snapshot(self, context, snapshot, host, force=False, + deferred_delete=False): new_host = utils.extract_host(host) - call_context = self.client.prepare(server=new_host) + call_context = self.client.prepare(server=new_host, version='1.27') call_context.cast(context, 'delete_snapshot', snapshot_id=snapshot['id'], - force=force) + force=force, + deferred_delete=deferred_delete) def create_replicated_snapshot(self, context, share, replicated_snapshot): host = utils.extract_host(share['instance']['host']) diff --git a/manila/tests/db/sqlalchemy/test_api.py b/manila/tests/db/sqlalchemy/test_api.py index ed0fc2aab4..4958b74151 100644 --- a/manila/tests/db/sqlalchemy/test_api.py +++ b/manila/tests/db/sqlalchemy/test_api.py @@ -587,6 +587,18 @@ class ShareDatabaseAPITestCase(test.TestCase): self.assertEqual('share-%s' % instance['id'], instance['name']) + def test_share_instance_get_all_by_status(self): + share = db_utils.create_share() + db_utils.create_share_instance( + share_id=share['id'], status='creating') + share2 = db_utils.create_share() + db_utils.create_share_instance( + share_id=share2['id'], status='error_deferred_deleting') + + instances = db_api.share_instance_get_all( + self.ctxt, filters={'status': 'error_deferred_deleting'}) + self.assertEqual(1, len(instances)) + def test_share_instance_get_all_by_ids(self): fake_share = db_utils.create_share() expected_share_instance = db_utils.create_share_instance( diff --git a/manila/tests/share/test_api.py b/manila/tests/share/test_api.py index 25c6800176..ca3301e88e 100644 --- a/manila/tests/share/test_api.py +++ b/manila/tests/share/test_api.py @@ -241,10 +241,11 @@ class ShareAPITestCase(test.TestCase): mock.Mock(return_value=share.instance)) self.mock_object(self.api.share_rpcapi, 'delete_share_instance') self.mock_object(db_api, 'share_server_update') + self.mock_object(db_api, 'share_instance_delete') return share.instance - def test_get_all_admin_no_filters(self): + def test_get_all_admin_default_filters(self): self.mock_object(db_api, 'share_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[0])) ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=True) @@ -253,7 +254,9 @@ class ShareAPITestCase(test.TestCase): db_api.share_get_all_by_project.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', - project_id='fake_pid_1', filters={}, is_public=False + project_id='fake_pid_1', + filters={'list_deferred_delete': True}, + is_public=False ) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[0], shares) @@ -265,7 +268,8 @@ class ShareAPITestCase(test.TestCase): shares = self.api.get_all(ctx, {'all_tenants': 1}) db_api.share_get_all.assert_called_once_with( - ctx, sort_dir='desc', sort_key='created_at', filters={}) + ctx, sort_dir='desc', sort_key='created_at', + filters={'list_deferred_delete': True}) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES, shares) def test_get_all_admin_filter_by_all_tenants_with_blank(self): @@ -276,7 +280,8 @@ class ShareAPITestCase(test.TestCase): shares = self.api.get_all(ctx, {'all_tenants': ''}) db_api.share_get_all.assert_called_once_with( - ctx, sort_dir='desc', sort_key='created_at', filters={}) + ctx, sort_dir='desc', sort_key='created_at', + filters={'list_deferred_delete': True}) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES, shares) def test_get_all_admin_filter_by_all_tenants_with_false(self): @@ -288,7 +293,8 @@ class ShareAPITestCase(test.TestCase): db_api.share_get_all_by_project.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', - project_id='fake_pid_1', filters={}, is_public=False + project_id='fake_pid_1', filters={'list_deferred_delete': True}, + is_public=False ) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[0], shares) @@ -311,7 +317,7 @@ class ShareAPITestCase(test.TestCase): raise exception.NotAuthorized ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False) - self.mock_object( + mock_policy = self.mock_object( share_api.policy, 'check_policy', mock.Mock(side_effect=fake_policy_checker)) @@ -319,8 +325,12 @@ class ShareAPITestCase(test.TestCase): exception.NotAuthorized, self.api.get_all, ctx, filters) - share_api.policy.check_policy.assert_called_once_with( - ctx, 'share', policy) + mock_policy.assert_has_calls([ + mock.call( + ctx, 'share', + 'list_shares_in_deferred_deletion_states', + do_raise=False), + mock.call(ctx, 'share', policy)]) def test_get_all_admin_filter_by_share_server_and_all_tenants(self): # NOTE(vponomaryov): if share_server_id provided, 'all_tenants' opt @@ -330,13 +340,19 @@ class ShareAPITestCase(test.TestCase): mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[2:])) self.mock_object(db_api, 'share_get_all') self.mock_object(db_api, 'share_get_all_by_project') + mock_policy = self.mock_object(share_api.policy, 'check_policy') shares = self.api.get_all( ctx, {'share_server_id': 'fake_server_3', 'all_tenants': 1}) - share_api.policy.check_policy.assert_called_once_with( - ctx, 'share', 'list_by_share_server_id') + mock_policy.assert_has_calls([ + mock.call( + ctx, 'share', + 'list_shares_in_deferred_deletion_states', + do_raise=False), + mock.call(ctx, 'share', 'list_by_share_server_id')]) + db_api.share_get_all_by_share_server.assert_called_once_with( ctx, 'fake_server_3', sort_dir='desc', sort_key='created_at', - filters={}, + filters={'list_deferred_delete': True}, ) db_api.share_get_all_by_project.assert_has_calls([]) db_api.share_get_all.assert_has_calls([]) @@ -347,7 +363,8 @@ class ShareAPITestCase(test.TestCase): self.mock_object( db_api, 'share_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[1::2])) - expected_filters = {'display_name': 'bar'} + expected_filters = {'display_name': 'bar', + 'list_deferred_delete': True} shares = self.api.get_all(ctx, {'display_name': 'bar'}) @@ -378,6 +395,7 @@ class ShareAPITestCase(test.TestCase): self.mock_object(db_api, 'share_get_all_by_project', mock.Mock(return_value=expected_result)) expected_filters = copy.copy(search_opts) + expected_filters.update({'list_deferred_delete': True}) shares = self.api.get_all(ctx, search_opts) @@ -400,7 +418,9 @@ class ShareAPITestCase(test.TestCase): db_api.share_get_all_by_project.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', project_id='fake_pid_2', - filters={'export_location_' + type: 'test'}, is_public=False + filters={'export_location_' + type: 'test', + 'list_deferred_delete': True}, + is_public=False ) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[1:], shares) @@ -412,12 +432,14 @@ class ShareAPITestCase(test.TestCase): shares = self.api.get_all(ctx, {'name': 'foo', 'all_tenants': 1}) db_api.share_get_all.assert_called_once_with( - ctx, sort_dir='desc', sort_key='created_at', filters={}) + ctx, sort_dir='desc', sort_key='created_at', + filters={'list_deferred_delete': True}) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[:1], shares) def test_get_all_admin_filter_by_status(self): ctx = context.RequestContext('fake_uid', 'fake_pid_2', is_admin=True) - expected_filter = {'status': constants.STATUS_AVAILABLE} + expected_filter = {'status': constants.STATUS_AVAILABLE, + 'list_deferred_delete': True} self.mock_object( db_api, 'share_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[0::2])) @@ -435,7 +457,8 @@ class ShareAPITestCase(test.TestCase): self.mock_object( db_api, 'share_get_all', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[1::2])) - expected_filter = {'status': constants.STATUS_ERROR} + expected_filter = {'status': constants.STATUS_ERROR, + 'list_deferred_delete': True} shares = self.api.get_all( ctx, {'status': constants.STATUS_ERROR, 'all_tenants': 1}) db_api.share_get_all.assert_called_once_with( @@ -448,6 +471,8 @@ class ShareAPITestCase(test.TestCase): ctx = context.RequestContext('fake_uid', 'fake_pid_2', is_admin=False) self.mock_object(db_api, 'share_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[1:])) + self.mock_policy_check = self.mock_object( + policy, 'check_policy', mock.Mock(return_value=False)) shares = self.api.get_all(ctx, {'all_tenants': 1}) @@ -464,6 +489,8 @@ class ShareAPITestCase(test.TestCase): mock.Mock(side_effect=[ _FAKE_LIST_OF_ALL_SHARES[1::2], _FAKE_LIST_OF_ALL_SHARES[2::4]])) + self.mock_policy_check = self.mock_object( + policy, 'check_policy', mock.Mock(return_value=False)) shares = self.api.get_all( ctx, {'name': 'bar', 'status': constants.STATUS_ERROR}) @@ -501,6 +528,8 @@ class ShareAPITestCase(test.TestCase): is_admin=False) self.mock_object(db_api, 'share_get_all_by_project', mock.Mock( return_value=_FAKE_LIST_OF_ALL_SHARES[1:])) + self.mock_policy_check = self.mock_object( + policy, 'check_policy', mock.Mock(return_value=False)) shares = self.api.get_all(ctx, {'is_public': is_public}) db_api.share_get_all_by_project.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', @@ -514,6 +543,9 @@ class ShareAPITestCase(test.TestCase): is_admin=False) self.mock_object(db_api, 'share_get_all_by_project', mock.Mock( return_value=_FAKE_LIST_OF_ALL_SHARES[1:])) + self.mock_policy_check = self.mock_object( + policy, 'check_policy', mock.Mock(return_value=False)) + shares = self.api.get_all(ctx, {'is_public': is_public}) db_api.share_get_all_by_project.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', @@ -532,6 +564,8 @@ class ShareAPITestCase(test.TestCase): self.mock_object(db_api, 'share_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[0])) ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False) + self.mock_policy_check = self.mock_object( + policy, 'check_policy', mock.Mock(return_value=False)) shares = self.api.get_all(ctx, sort_key='status', sort_dir='asc') @@ -567,13 +601,26 @@ class ShareAPITestCase(test.TestCase): self.mock_object(db_api, 'share_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[0])) ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False) + mock_policy_check = self.mock_object( + policy, 'check_policy', mock.Mock(return_value=False)) + search_opts = {key: {'foo1': 'bar1', 'foo2': 'bar2'}} shares = self.api.get_all(ctx, search_opts=search_opts.copy()) if key == 'extra_specs': - share_api.policy.check_policy.assert_called_once_with( - ctx, 'share_types_extra_spec', 'index') + mock_policy_check.assert_has_calls([ + mock.call(ctx, 'share_types_extra_spec', 'index'), + mock.call( + ctx, 'share', + 'list_shares_in_deferred_deletion_states', + do_raise=False)]) + else: + mock_policy_check.assert_called_once_with( + ctx, 'share', + 'list_shares_in_deferred_deletion_states', + do_raise=False), + db_api.share_get_all_by_project.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', project_id='fake_pid_1', filters=search_opts, is_public=False) @@ -2283,7 +2330,8 @@ class ShareAPITestCase(test.TestCase): mock.Mock(return_value=share)): self.api.delete_snapshot(self.context, snapshot) self.share_rpcapi.delete_snapshot.assert_called_once_with( - self.context, snapshot, share['host'], force=False) + self.context, snapshot, share['host'], force=False, + deferred_delete=False) share_api.policy.check_policy.assert_called_once_with( self.context, 'share', 'delete_snapshot', snapshot) db_api.share_snapshot_instance_update.assert_called_once_with( @@ -2327,7 +2375,8 @@ class ShareAPITestCase(test.TestCase): self.context, snapshot_instance['id'], {'status': constants.STATUS_DELETING}) mock_rpc_call.assert_called_once_with( - self.context, snapshot, share['instance']['host'], force=True) + self.context, snapshot, share['instance']['host'], force=True, + deferred_delete=False) @ddt.data(True, False) def test_delete_snapshot_replicated_snapshot(self, force): @@ -2735,9 +2784,13 @@ class ShareAPITestCase(test.TestCase): {'status': constants.STATUS_DELETING, 'terminated_at': self.dt_utc} ) - self.api.share_rpcapi.delete_share_instance.assert_called_once_with( - self.context, instance, force=force - ) + self.api.share_rpcapi.delete_share_instance.\ + assert_called_once_with( + self.context, + instance, + force=force, + deferred_delete=False + ) db_api.share_server_update( self.context, instance['share_server_id'], @@ -2789,19 +2842,31 @@ class ShareAPITestCase(test.TestCase): mock.Mock()) def test_get_all_snapshots_admin_not_all_tenants(self): ctx = context.RequestContext('fakeuid', 'fakepid', is_admin=True) + mock_policy = self.mock_object(share_api.policy, 'check_policy', + mock.Mock(return_value=False)) self.api.get_all_snapshots(ctx) - share_api.policy.check_policy.assert_called_once_with( - ctx, 'share_snapshot', 'get_all_snapshots') + mock_policy.assert_has_calls([ + mock.call(ctx, 'share_snapshot', 'get_all_snapshots'), + mock.call( + ctx, 'share_snapshot', + 'list_snapshots_in_deferred_deletion_states', + do_raise=False)]) db_api.share_snapshot_get_all_by_project.assert_called_once_with( ctx, 'fakepid', limit=None, offset=None, sort_dir='desc', sort_key='share_id', filters={}) @mock.patch.object(db_api, 'share_snapshot_get_all', mock.Mock()) def test_get_all_snapshots_admin_all_tenants(self): + mock_policy = self.mock_object(share_api.policy, 'check_policy', + mock.Mock(return_value=False)) self.api.get_all_snapshots(self.context, search_opts={'all_tenants': 1}) - share_api.policy.check_policy.assert_called_once_with( - self.context, 'share_snapshot', 'get_all_snapshots') + mock_policy.assert_has_calls([ + mock.call(self.context, 'share_snapshot', 'get_all_snapshots'), + mock.call( + self.context, 'share_snapshot', + 'list_snapshots_in_deferred_deletion_states', + do_raise=False)]) db_api.share_snapshot_get_all.assert_called_once_with( self.context, limit=None, offset=None, sort_dir='desc', sort_key='share_id', filters={}) @@ -2810,9 +2875,15 @@ class ShareAPITestCase(test.TestCase): mock.Mock()) def test_get_all_snapshots_not_admin(self): ctx = context.RequestContext('fakeuid', 'fakepid', is_admin=False) + mock_policy = self.mock_object(share_api.policy, 'check_policy', + mock.Mock(return_value=False)) self.api.get_all_snapshots(ctx) - share_api.policy.check_policy.assert_called_once_with( - ctx, 'share_snapshot', 'get_all_snapshots') + mock_policy.assert_has_calls([ + mock.call(ctx, 'share_snapshot', 'get_all_snapshots'), + mock.call( + ctx, 'share_snapshot', + 'list_snapshots_in_deferred_deletion_states', + do_raise=False)]) db_api.share_snapshot_get_all_by_project.assert_called_once_with( ctx, 'fakepid', limit=None, offset=None, sort_dir='desc', sort_key='share_id', filters={}) @@ -2823,12 +2894,18 @@ class ShareAPITestCase(test.TestCase): ctx = context.RequestContext('fakeuid', 'fakepid', is_admin=False) self.mock_object(db_api, 'share_snapshot_get_all_by_project', mock.Mock(return_value=fake_objs)) + mock_policy = self.mock_object(share_api.policy, 'check_policy', + mock.Mock(return_value=False)) result = self.api.get_all_snapshots(ctx, search_opts) self.assertEqual(fake_objs, result) - share_api.policy.check_policy.assert_called_once_with( - ctx, 'share_snapshot', 'get_all_snapshots') + mock_policy.assert_has_calls([ + mock.call(ctx, 'share_snapshot', 'get_all_snapshots'), + mock.call( + ctx, 'share_snapshot', + 'list_snapshots_in_deferred_deletion_states', + do_raise=False)]) db_api.share_snapshot_get_all_by_project.assert_called_once_with( ctx, 'fakepid', limit=None, offset=None, sort_dir='desc', sort_key='share_id', filters=search_opts) @@ -2854,6 +2931,7 @@ class ShareAPITestCase(test.TestCase): ctx = context.RequestContext('fakeuid', 'fakepid', is_admin=False) self.mock_object(db_api, 'share_snapshot_get_all_by_project', mock.Mock(return_value=res_snapshots)) + mock_policy = self.mock_object(share_api.policy, 'check_policy') result = self.api.get_all_snapshots(ctx, search_opts) @@ -2863,8 +2941,12 @@ class ShareAPITestCase(test.TestCase): elif get_snapshot_number == 1: self.assertEqual(fake_objs[1:2], result) - share_api.policy.check_policy.assert_called_once_with( - ctx, 'share_snapshot', 'get_all_snapshots') + mock_policy.assert_has_calls([ + mock.call(ctx, 'share_snapshot', 'get_all_snapshots'), + mock.call( + ctx, 'share_snapshot', + 'list_snapshots_in_deferred_deletion_states', + do_raise=False)]) db_api.share_snapshot_get_all_by_project.assert_called_once_with( ctx, 'fakepid', limit=None, offset=None, sort_dir='desc', sort_key='share_id', filters=search_opts) @@ -2874,10 +2956,17 @@ class ShareAPITestCase(test.TestCase): db_api, 'share_snapshot_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SNAPSHOTS[0])) ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False) + mock_policy = self.mock_object(share_api.policy, 'check_policy', + mock.Mock(return_value=False)) snapshots = self.api.get_all_snapshots( ctx, sort_key='status', sort_dir='asc') - share_api.policy.check_policy.assert_called_once_with( - ctx, 'share_snapshot', 'get_all_snapshots') + mock_policy.assert_has_calls([ + mock.call(ctx, 'share_snapshot', 'get_all_snapshots'), + mock.call( + ctx, 'share_snapshot', + 'list_snapshots_in_deferred_deletion_states', + do_raise=False)]) + db_api.share_snapshot_get_all_by_project.assert_called_once_with( ctx, 'fake_pid_1', limit=None, offset=None, sort_dir='asc', sort_key='status', filters={}) diff --git a/manila/tests/share/test_manager.py b/manila/tests/share/test_manager.py index e445e8d463..798791a491 100644 --- a/manila/tests/share/test_manager.py +++ b/manila/tests/share/test_manager.py @@ -408,6 +408,7 @@ class ShareManagerTestCase(test.TestCase): "create_replicated_snapshot", "delete_replicated_snapshot", "periodic_share_replica_snapshot_update", + "do_deferred_share_deletion" ) def test_call_driver_when_its_init_failed(self, method_name): self.mock_object(self.share_manager.driver, 'do_setup', @@ -2208,6 +2209,39 @@ class ShareManagerTestCase(test.TestCase): resource_id=snapshot_instance['id'], exception=mock.ANY) + def test_delete_snapshot_deferred_delete_quota_error(self): + share_id = 'FAKE_SHARE_ID' + share = fakes.fake_share(id=share_id) + snapshot_instance = fakes.fake_snapshot_instance( + share_id=share_id, share=share, name='fake_snapshot') + snapshot = fakes.fake_snapshot( + share_id=share_id, share=share, instance=snapshot_instance, + project_id=self.context.project_id, size=1) + snapshot_id = snapshot['id'] + self.mock_object(self.share_manager.db, 'share_snapshot_get', + mock.Mock(return_value=snapshot)) + self.mock_object(self.share_manager.db, 'share_snapshot_instance_get', + mock.Mock(return_value=snapshot_instance)) + self.mock_object(self.share_manager.db, 'share_get', + mock.Mock(return_value=share)) + self.mock_object(self.share_manager, '_get_share_server', + mock.Mock(return_value=None)) + + self.mock_object(self.share_manager.driver, 'delete_snapshot') + self.mock_object(quota.QUOTAS, 'reserve', + mock.Mock(side_effect=exception.QuotaError(code=500))) + quota_commit_call = self.mock_object(quota.QUOTAS, 'commit') + + self.share_manager.delete_snapshot(self.context, snapshot_id, + deferred_delete=True) + + self.share_manager.driver.delete_snapshot.assert_not_called() + quota.QUOTAS.reserve.assert_called_once_with( + mock.ANY, project_id=self.context.project_id, snapshots=-1, + snapshot_gigabytes=-snapshot['size'], user_id=snapshot['user_id'], + share_type_id=share['instance']['share_type_id']) + self.assertEqual(False, quota_commit_call.called) + @ddt.data(True, False) def test_delete_snapshot_with_quota_error(self, quota_error): @@ -2251,7 +2285,6 @@ class ShareManagerTestCase(test.TestCase): mock.ANY, expected_snapshot_instance_dict, share_server=None) self.assertFalse(db_update_call.called) self.assertTrue(snapshot_destroy_call.called) - self.assertTrue(manager.QUOTAS.reserve.called) quota.QUOTAS.reserve.assert_called_once_with( mock.ANY, project_id=self.context.project_id, snapshots=-1, snapshot_gigabytes=-snapshot['size'], user_id=snapshot['user_id'], @@ -2311,6 +2344,43 @@ class ShareManagerTestCase(test.TestCase): resource_id=snapshot_instance['id'], exception=mock.ANY) + @ddt.data(True, False) + def test_do_deferred_snapshot_deletion(self, consider_error_deleting): + instance_1 = db_utils.create_share_instance( + share_id='fake_id', + share_type_id='fake_type_id') + instance_2 = db_utils.create_share_instance( + share_id='fake_id', + share_type_id='fake_type_id') + share = db_utils.create_share( + id='fake_id', + instances=[instance_1, instance_2]) + snapshot = db_utils.create_snapshot(share_id=share['id']) + db_utils.create_snapshot_instance( + snapshot_id=snapshot['id'], + share_instance_id=instance_1['id'], + status='deferred_deleting') + mins = 20 + if consider_error_deleting: + mins = 40 + db_utils.create_snapshot_instance( + snapshot_id=snapshot['id'], + share_instance_id=instance_2['id'], + updated_at=timeutils.utcnow() - datetime.timedelta(minutes=mins), + status='error_deferred_deleting') + + self.mock_object(self.share_manager, '_get_share_server', + mock.Mock(return_value=None)) + mock_delete_share = self.mock_object( + self.share_manager.driver, 'delete_snapshot') + self.mock_object(self.share_manager.db, + 'share_snapshot_instance_update') + self.share_manager.do_deferred_snapshot_deletion(self.context) + if consider_error_deleting: + self.assertEqual(2, mock_delete_share.call_count) + else: + self.assertEqual(1, mock_delete_share.call_count) + def test_create_share_instance_with_share_network_dhss_false(self): manager.CONF.set_default('driver_handles_share_servers', False) self.mock_object( @@ -3858,6 +3928,152 @@ class ShareManagerTestCase(test.TestCase): delete_all_rules=True, share_server=share_srv) self.assertTrue(manager.LOG.warning.called) + def test_delete_share_instance_deferred_delete_quota_error(self): + share_net = db_utils.create_share_network() + share_srv = db_utils.create_share_server( + host=self.share_manager.host + ) + share_type = db_utils.create_share_type() + share = db_utils.create_share(share_network_id=share_net['id'], + share_server_id=share_srv['id'], + share_type_id=share_type['id']) + share_srv = db.share_server_get(self.context, share_srv['id']) + + manager.CONF.delete_share_server_with_last_share = False + self.share_manager.driver = mock.Mock() + self.mock_object(db, 'share_server_get', + mock.Mock(return_value=share_srv)) + mock_access_helper_call = self.mock_object( + self.share_manager.access_helper, 'update_access_rules') + + self.mock_object( + quota.QUOTAS, 'reserve', + mock.Mock(side_effect=exception.QuotaError(code='500'))) + self.mock_object(quota.QUOTAS, 'commit') + self.mock_object(manager.LOG, 'exception') + self.mock_object(self.share_manager.db, 'share_instance_update', + mock.Mock(return_value=None)) + + self.share_manager.delete_share_instance(self.context, + share.instance['id'], + deferred_delete=True) + + mock_access_helper_call.assert_called_once_with( + utils.IsAMatcher(context.RequestContext), share.instance['id'], + delete_all_rules=True, share_server=share_srv) + + reservation_params = { + 'gigabytes': -share['size'], + 'shares': -1, + 'project_id': share['project_id'], + 'share_type_id': share_type['id'], + 'user_id': share['user_id'], + } + quota.QUOTAS.reserve.assert_called_once_with( + mock.ANY, **reservation_params, + ) + self.assertFalse(quota.QUOTAS.commit.called) + self.assertFalse(self.share_manager.driver.teardown_network.called) + + def test_delete_share_instance_deferred_delete(self): + share_net = db_utils.create_share_network() + share_srv = db_utils.create_share_server( + host=self.share_manager.host + ) + share_type = db_utils.create_share_type() + share = db_utils.create_share(share_network_id=share_net['id'], + share_server_id=share_srv['id'], + share_type_id=share_type['id']) + share_srv = db.share_server_get(self.context, share_srv['id']) + + manager.CONF.delete_share_server_with_last_share = False + self.share_manager.driver = mock.Mock() + self.mock_object(db, 'share_server_get', + mock.Mock(return_value=share_srv)) + mock_access_helper_call = self.mock_object( + self.share_manager.access_helper, 'update_access_rules') + + self.mock_object(quota.QUOTAS, 'reserve', + mock.Mock(return_value='fake_reservation')) + self.mock_object(quota.QUOTAS, 'commit') + self.mock_object(manager.LOG, 'exception') + self.mock_object(self.share_manager.db, 'share_instance_update', + mock.Mock(return_value=None)) + + self.share_manager.delete_share_instance(self.context, + share.instance['id'], + deferred_delete=True) + + mock_access_helper_call.assert_called_once_with( + utils.IsAMatcher(context.RequestContext), share.instance['id'], + delete_all_rules=True, share_server=share_srv) + + reservation_params = { + 'gigabytes': -share['size'], + 'shares': -1, + 'project_id': share['project_id'], + 'share_type_id': share_type['id'], + 'user_id': share['user_id'], + } + quota.QUOTAS.reserve.assert_called_once_with( + mock.ANY, **reservation_params, + ) + quota.QUOTAS.commit.assert_called_once_with( + mock.ANY, mock.ANY, project_id=share['project_id'], + share_type_id=share_type['id'], user_id=share['user_id'], + ) + self.assertFalse(self.share_manager.driver.teardown_network.called) + + @ddt.data(True, False) + def test_do_deferred_share_deletion(self, consider_error_deleting): + share = db_utils.create_share_without_instance( + id='fake_id', + status=constants.STATUS_AVAILABLE) + share_server = fakes.fake_share_server_get() + kwargs = { + 'id': 1, + 'share_id': share['id'], + 'share_server_id': share_server['id'], + 'status': 'deferred_deleting', + 'updated_at': timeutils.utcnow() + } + db_utils.create_share_instance(**kwargs) + kwargs = { + 'id': 2, + 'share_id': share['id'], + 'share_server_id': share_server['id'], + 'status': 'deferred_deleting', + 'updated_at': timeutils.utcnow() + } + db_utils.create_share_instance(**kwargs) + mins = 20 + if consider_error_deleting: + mins = 40 + kwargs = { + 'id': 3, + 'share_id': share['id'], + 'share_server_id': share_server['id'], + 'status': 'error_deferred_deleting', + 'updated_at': timeutils.utcnow() - datetime.timedelta(minutes=mins) + } + db_utils.create_share_instance(**kwargs) + + self.mock_object(self.share_manager.db, 'share_server_get', + mock.Mock(return_value=share_server)) + self.mock_object(self.share_manager.db, 'share_get', + mock.Mock(return_value=share)) + self.mock_object(self.share_manager.db, 'share_instance_delete') + self.mock_object(self.share_manager, '_check_delete_share_server') + self.mock_object(self.share_manager, '_notify_about_share_usage') + mock_delete_share = self.mock_object( + self.share_manager.driver, 'delete_share') + + self.share_manager.do_deferred_share_deletion(self.context) + if consider_error_deleting: + self.assertEqual(3, mock_delete_share.call_count) + else: + self.assertEqual(2, mock_delete_share.call_count) + def test_setup_server(self): # Setup required test data metadata = {'fake_metadata_key': 'fake_metadata_value'} diff --git a/manila/tests/share/test_rpcapi.py b/manila/tests/share/test_rpcapi.py index 51a3a94cec..e1f3013dad 100644 --- a/manila/tests/share/test_rpcapi.py +++ b/manila/tests/share/test_rpcapi.py @@ -207,9 +207,10 @@ class ShareRpcAPITestCase(test.TestCase): def test_delete_share_instance(self): self._test_share_api('delete_share_instance', rpc_method='cast', - version='1.4', + version='1.27', share_instance=self.fake_share, - force=False) + force=False, + deferred_delete=False) def test_update_access(self): self._test_share_api('update_access', @@ -226,9 +227,11 @@ class ShareRpcAPITestCase(test.TestCase): def test_delete_snapshot(self): self._test_share_api('delete_snapshot', rpc_method='cast', + version='1.27', snapshot=self.fake_snapshot, host='fake_host', - force=False) + force=False, + deferred_delete=False) def test_delete_share_server(self): self._test_share_api('delete_share_server', diff --git a/releasenotes/notes/blueprint-share-and-snapshot-deferred-deletion-b3453718fd1e4b56.yaml b/releasenotes/notes/blueprint-share-and-snapshot-deferred-deletion-b3453718fd1e4b56.yaml new file mode 100644 index 0000000000..dafcfe7335 --- /dev/null +++ b/releasenotes/notes/blueprint-share-and-snapshot-deferred-deletion-b3453718fd1e4b56.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + With deferred deletion, when resource(share or snapshot) is deleted, the + quota is freed immediately and periodic tasks will delete the resource + (i.e. share or snapshot) in driver. The resources errored during deletion + are retried for deletion after some time in the same periodic tasks.