From b450eef832ff471b78776c9715b0878e95d69263 Mon Sep 17 00:00:00 2001 From: john-griffith Date: Thu, 21 Mar 2013 09:48:03 -0600 Subject: [PATCH] Snapshot reservation sync calls wrong resource. The snapshot reservations code isn't calling the correct resource on sync (it's calling volumes). There's also some problems with the logic being used on the delete/clean up that are fixed here as well. Fixes bug: 1157506 Fixes bug: 1157982 Change-Id: I91327b8043ab63aa35ea8a91b6de544bf5bf6c61 --- cinder/db/api.py | 2 +- cinder/flags.py | 5 ++++- cinder/quota.py | 6 +++--- cinder/tests/test_quota.py | 11 ++++++----- cinder/volume/api.py | 7 +++++-- cinder/volume/manager.py | 16 ++++++++++++++++ etc/cinder/cinder.conf.sample | 4 +++- 7 files changed, 38 insertions(+), 13 deletions(-) diff --git a/cinder/db/api.py b/cinder/db/api.py index dc5e675da5f..90011933097 100644 --- a/cinder/db/api.py +++ b/cinder/db/api.py @@ -314,7 +314,7 @@ def snapshot_data_get_for_project(context, project_id, session=None): """Get count and gigabytes used for snapshots for specified project.""" return IMPL.snapshot_data_get_for_project(context, project_id, - session=None) + session) def snapshot_get_active_by_window(context, begin, end=None, project_id=None): diff --git a/cinder/flags.py b/cinder/flags.py index bfe7661819d..d82e1ca8a50 100644 --- a/cinder/flags.py +++ b/cinder/flags.py @@ -238,6 +238,9 @@ global_opts = [ default=None, help='A list of backend names to use. These backend names ' 'should be backed by a unique [CONFIG] group ' - 'with its options'), ] + 'with its options'), + cfg.BoolOpt('no_snapshot_gb_quota', + default=False, + help='Whether snapshots count against GigaByte quota'), ] FLAGS.register_opts(global_opts) diff --git a/cinder/quota.py b/cinder/quota.py index 2dd21875227..34196c2df5b 100644 --- a/cinder/quota.py +++ b/cinder/quota.py @@ -738,9 +738,9 @@ def _sync_volumes(context, project_id, session): def _sync_snapshots(context, project_id, session): return dict(zip(('snapshots', 'gigabytes'), - db.volume_data_get_for_project(context, - project_id, - session=session))) + db.snapshot_data_get_for_project(context, + project_id, + session=session))) QUOTAS = QuotaEngine() diff --git a/cinder/tests/test_quota.py b/cinder/tests/test_quota.py index f6c81837b57..20b956335f8 100644 --- a/cinder/tests/test_quota.py +++ b/cinder/tests/test_quota.py @@ -68,13 +68,14 @@ class QuotaIntegrationTestCase(test.TestCase): vol['user_id'] = self.user_id vol['project_id'] = self.project_id vol['size'] = size - return db.volume_create(self.context, vol)['id'] + vol['status'] = 'available' + return db.volume_create(self.context, vol) def test_too_many_volumes(self): volume_ids = [] for i in range(FLAGS.quota_volumes): - volume_id = self._create_volume() - volume_ids.append(volume_id) + vol_ref = self._create_volume() + volume_ids.append(vol_ref['id']) self.assertRaises(exception.QuotaError, volume.API().create, self.context, 10, '', '', None) @@ -83,8 +84,8 @@ class QuotaIntegrationTestCase(test.TestCase): def test_too_many_gigabytes(self): volume_ids = [] - volume_id = self._create_volume(size=20) - volume_ids.append(volume_id) + vol_ref = self._create_volume(size=20) + volume_ids.append(vol_ref['id']) self.assertRaises(exception.QuotaError, volume.API().create, self.context, 10, '', '', None) diff --git a/cinder/volume/api.py b/cinder/volume/api.py index a6fb584b18d..8c3d394b046 100644 --- a/cinder/volume/api.py +++ b/cinder/volume/api.py @@ -491,8 +491,11 @@ class API(base.Base): raise exception.InvalidVolume(reason=msg) try: - reservations = QUOTAS.reserve(context, snapshots=1, - gigabytes=volume['size']) + if FLAGS.no_snapshot_gb_quota: + reservations = QUOTAS.reserve(context, snapshots=1) + else: + reservations = QUOTAS.reserve(context, snapshots=1, + gigabytes=volume['size']) except exception.OverQuota as e: overs = e.kwargs['overs'] usages = e.kwargs['usages'] diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index 9a9c234b221..f8e9b28ee9f 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -489,9 +489,25 @@ class VolumeManager(manager.SchedulerDependentManager): snapshot_ref['id'], {'status': 'error_deleting'}) + # Get reservations + try: + if CONF.no_snapshot_gb_quota: + reservations = QUOTAS.reserve(context, snapshots=-1) + else: + reservations = QUOTAS.reserve( + context, + snapshots=-1, + gigabytes=-snapshot_ref['volume_size']) + except Exception: + reservations = None + LOG.exception(_("Failed to update usages deleting snapshot")) self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot_id) self.db.snapshot_destroy(context, snapshot_id) LOG.info(_("snapshot %s: deleted successfully"), snapshot_ref['name']) + + # Commit the reservations + if reservations: + QUOTAS.commit(context, reservations) return True def attach_volume(self, context, volume_id, instance_uuid, mountpoint): diff --git a/etc/cinder/cinder.conf.sample b/etc/cinder/cinder.conf.sample index 2036f7025ab..4092bd288e1 100644 --- a/etc/cinder/cinder.conf.sample +++ b/etc/cinder/cinder.conf.sample @@ -44,6 +44,8 @@ # syslog facility to receive log lines (string value) #syslog_log_facility=LOG_USER +# Do not count snapshots against gigabytes quota (bool value) +#no_snapshot_gb_quota=False # # Options defined in cinder.exception @@ -1168,4 +1170,4 @@ #volume_driver=cinder.volume.driver.FakeISCSIDriver -# Total option count: 254 +# Total option count: 255