Snapshot reservation sync calls wrong resource.

The snapshot reservations code isn't calling the
correct resource on sync (it's calling volumes).  There's
also some problems with the logic being used on the delete/clean up
that are fixed here as well.

Fixes bug: 1157506
Fixes bug: 1157982

Change-Id: I91327b8043ab63aa35ea8a91b6de544bf5bf6c61
(cherry picked from commit b450eef832)
This commit is contained in:
john-griffith 2013-03-21 09:48:03 -06:00
parent 407d9e5e95
commit 5ffed5d2f7
7 changed files with 38 additions and 13 deletions

View File

@ -314,7 +314,7 @@ def snapshot_data_get_for_project(context, project_id, session=None):
"""Get count and gigabytes used for snapshots for specified project."""
return IMPL.snapshot_data_get_for_project(context,
project_id,
session=None)
session)
####################

View File

@ -238,6 +238,9 @@ global_opts = [
default=None,
help='A list of backend names to use. These backend names '
'should be backed by a unique [CONFIG] group '
'with its options'), ]
'with its options'),
cfg.BoolOpt('no_snapshot_gb_quota',
default=False,
help='Whether snapshots count against GigaByte quota'), ]
FLAGS.register_opts(global_opts)

View File

@ -738,9 +738,9 @@ def _sync_volumes(context, project_id, session):
def _sync_snapshots(context, project_id, session):
return dict(zip(('snapshots', 'gigabytes'),
db.volume_data_get_for_project(context,
project_id,
session=session)))
db.snapshot_data_get_for_project(context,
project_id,
session=session)))
QUOTAS = QuotaEngine()

View File

@ -68,13 +68,14 @@ class QuotaIntegrationTestCase(test.TestCase):
vol['user_id'] = self.user_id
vol['project_id'] = self.project_id
vol['size'] = size
return db.volume_create(self.context, vol)['id']
vol['status'] = 'available'
return db.volume_create(self.context, vol)
def test_too_many_volumes(self):
volume_ids = []
for i in range(FLAGS.quota_volumes):
volume_id = self._create_volume()
volume_ids.append(volume_id)
vol_ref = self._create_volume()
volume_ids.append(vol_ref['id'])
self.assertRaises(exception.QuotaError,
volume.API().create,
self.context, 10, '', '', None)
@ -83,8 +84,8 @@ class QuotaIntegrationTestCase(test.TestCase):
def test_too_many_gigabytes(self):
volume_ids = []
volume_id = self._create_volume(size=20)
volume_ids.append(volume_id)
vol_ref = self._create_volume(size=20)
volume_ids.append(vol_ref['id'])
self.assertRaises(exception.QuotaError,
volume.API().create,
self.context, 10, '', '', None)

View File

@ -491,8 +491,11 @@ class API(base.Base):
raise exception.InvalidVolume(reason=msg)
try:
reservations = QUOTAS.reserve(context, snapshots=1,
gigabytes=volume['size'])
if FLAGS.no_snapshot_gb_quota:
reservations = QUOTAS.reserve(context, snapshots=1)
else:
reservations = QUOTAS.reserve(context, snapshots=1,
gigabytes=volume['size'])
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']

View File

@ -489,9 +489,25 @@ class VolumeManager(manager.SchedulerDependentManager):
snapshot_ref['id'],
{'status': 'error_deleting'})
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reservations = QUOTAS.reserve(context, snapshots=-1)
else:
reservations = QUOTAS.reserve(
context,
snapshots=-1,
gigabytes=-snapshot_ref['volume_size'])
except Exception:
reservations = None
LOG.exception(_("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot_id)
self.db.snapshot_destroy(context, snapshot_id)
LOG.info(_("snapshot %s: deleted successfully"), snapshot_ref['name'])
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations)
return True
def attach_volume(self, context, volume_id, instance_uuid, mountpoint):

View File

@ -44,6 +44,8 @@
# syslog facility to receive log lines (string value)
#syslog_log_facility=LOG_USER
# Do not count snapshots against gigabytes quota (bool value)
#no_snapshot_gb_quota=False
#
# Options defined in cinder.exception
@ -1168,4 +1170,4 @@
#volume_driver=cinder.volume.driver.FakeISCSIDriver
# Total option count: 254
# Total option count: 255