Add config option to set max_volume_size_limit

There is a need to limit maximum size of a volume to levels
that the storage infrastructure can handle.
Setting a maximum limit on size of a volume also prevents
a tenant from creating large volumes that have not been tested
and certified to satisfy SLA objectives.

This feature allows admin to set volume size limit for a tenant.
Get default value for volume size limit via config.
The defaults will either come from the default values
set in the quota configuration option or via cinder.conf
if the user has configured default values for quotas there.

The per_volume_size_limit defaults to -1["No Limit"] always
unless changed in cinder.conf by admin

Change-Id: Ieb5c087ca7a33d22342470ea790a0c979a6244ea
Implements: blueprint cinder-quota-define-per-volume
This commit is contained in:
Rakesh Mishra 2015-05-27 02:24:20 +05:30
parent 92d17118d0
commit 445dfee259
7 changed files with 124 additions and 11 deletions

View File

@ -0,0 +1,62 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import timeutils
from sqlalchemy import MetaData, Table
from cinder.i18n import _LE
# Get default value via config. The default will either
# come from the default value set in the quota configuration option
# or via cinder.conf if the user has configured
# default value for per volume size limit there.
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
"""Add default "per_volume_gigabytes" row into DB."""
meta = MetaData()
meta.bind = migrate_engine
quota_classes = Table('quota_classes', meta, autoload=True)
row = quota_classes.count().\
where(quota_classes.c.resource == 'per_volume_gigabytes').\
execute().scalar()
# Do not add entry if there is already 'default' entry exists
# in the database.
# We don't want to write over something the user added.
if row:
return
try:
# Set default per_volume_gigabytes for per volume size
qci = quota_classes.insert()
qci.execute({'created_at': timeutils.utcnow(),
'class_name': 'default',
'resource': 'per_volume_gigabytes',
'hard_limit': -1,
'deleted': False, })
except Exception:
LOG.error(_LE("Default per_volume_gigabytes row not inserted "
"into the quota_classes."))
raise
def downgrade(migrate_engine):
"""Don't delete the 'default' entries at downgrade time.
We don't know if the user had default entries when we started.
If they did, we wouldn't want to remove them. So, the safest
thing to do is just leave the 'default' entries at downgrade time.
"""
pass

View File

@ -455,6 +455,11 @@ class VolumeSizeExceedsAvailableQuota(QuotaError):
"%(consumed)sG has been consumed.")
class VolumeSizeExceedsLimit(QuotaError):
message = _("Requested volume size %(size)d is larger than "
"maximum allowed limit %(limit)d.")
class VolumeBackupSizeExceedsAvailableQuota(QuotaError):
message = _("Requested backup exceeds allowed Backup gigabytes "
"quota. Requested %(requested)sG, quota is %(quota)sG and "

View File

@ -69,7 +69,10 @@ quota_opts = [
cfg.BoolOpt('use_default_quota_class',
default=True,
help='Enables or disables use of default quota class '
'with default quota.'), ]
'with default quota.'),
cfg.IntOpt('per_volume_size_limit',
default=-1,
help='Max size allowed per volume, in gigabytes'), ]
CONF = cfg.CONF
CONF.register_opts(quota_opts)
@ -523,7 +526,8 @@ class ReservableResource(BaseResource):
"""
super(ReservableResource, self).__init__(name, flag=flag)
self.sync = sync
if sync:
self.sync = sync
class AbsoluteResource(BaseResource):
@ -869,6 +873,7 @@ class VolumeTypeQuotaEngine(QuotaEngine):
result = {}
# Global quotas.
argses = [('volumes', '_sync_volumes', 'quota_volumes'),
('per_volume_gigabytes', None, 'per_volume_size_limit'),
('snapshots', '_sync_snapshots', 'quota_snapshots'),
('gigabytes', '_sync_gigabytes', 'quota_gigabytes'),
('backups', '_sync_backups', 'quota_backups'),

View File

@ -30,12 +30,13 @@ from cinder import test
def make_body(root=True, gigabytes=1000, snapshots=10,
volumes=10, backups=10, backup_gigabytes=1000,
tenant_id='foo'):
tenant_id='foo', per_volume_gigabytes=-1):
resources = {'gigabytes': gigabytes,
'snapshots': snapshots,
'volumes': volumes,
'backups': backups,
'backup_gigabytes': backup_gigabytes}
'backup_gigabytes': backup_gigabytes,
'per_volume_gigabytes': per_volume_gigabytes, }
# need to consider preexisting volume types as well
volume_types = db.volume_type_get_all(context.get_admin_context())
for volume_type in volume_types:

View File

@ -33,13 +33,14 @@ QUOTAS = quota.QUOTAS
def make_body(root=True, gigabytes=1000, snapshots=10,
volumes=10, backups=10,
backup_gigabytes=1000,
backup_gigabytes=1000, per_volume_gigabytes=-1,
volume_types_faked=None,
tenant_id='foo'):
resources = {'gigabytes': gigabytes,
'snapshots': snapshots,
'volumes': volumes,
'backups': backups,
'per_volume_gigabytes': per_volume_gigabytes,
'backup_gigabytes': backup_gigabytes}
if not volume_types_faked:
volume_types_faked = {'fake_type': None}

View File

@ -96,6 +96,19 @@ class QuotaIntegrationTestCase(test.TestCase):
backup['status'] = 'available'
return db.backup_create(self.context, backup)
def test_volume_size_limit_exceeds(self):
resource = 'volumes_%s' % self.volume_type_name
db.quota_class_create(self.context, 'default', resource, 1)
flag_args = {
'quota_volumes': 10,
'quota_gigabytes': 1000,
'per_volume_size_limit': 5
}
self.flags(**flag_args)
self.assertRaises(exception.VolumeSizeExceedsLimit,
volume.API().create,
self.context, 10, '', '',)
def test_too_many_volumes(self):
volume_ids = []
for _i in range(CONF.quota_volumes):
@ -766,7 +779,8 @@ class VolumeTypeQuotaEngineTestCase(test.TestCase):
engine = quota.VolumeTypeQuotaEngine()
self.assertEqual(engine.resource_names,
['backup_gigabytes', 'backups',
'gigabytes', 'snapshots', 'volumes'])
'gigabytes', 'per_volume_gigabytes',
'snapshots', 'volumes'])
def test_volume_type_resources(self):
ctx = context.RequestContext('admin', 'admin', is_admin=True)
@ -792,8 +806,10 @@ class VolumeTypeQuotaEngineTestCase(test.TestCase):
self.assertEqual(engine.resource_names,
['backup_gigabytes', 'backups',
'gigabytes', 'gigabytes_type1', 'gigabytes_type_2',
'snapshots', 'snapshots_type1', 'snapshots_type_2',
'volumes', 'volumes_type1', 'volumes_type_2'])
'per_volume_gigabytes', 'snapshots',
'snapshots_type1', 'snapshots_type_2', 'volumes',
'volumes_type1', 'volumes_type_2',
])
db.volume_type_destroy(ctx, vtype['id'])
db.volume_type_destroy(ctx, vtype2['id'])
@ -834,7 +850,8 @@ class DbQuotaDriverTestCase(test.TestCase):
snapshots=10,
gigabytes=1000,
backups=10,
backup_gigabytes=1000))
backup_gigabytes=1000,
per_volume_gigabytes=-1))
def _stub_quota_class_get_default(self):
# Stub out quota_class_get_default
@ -873,7 +890,8 @@ class DbQuotaDriverTestCase(test.TestCase):
gigabytes=500,
snapshots=10,
backups=10,
backup_gigabytes=500))
backup_gigabytes=500,
per_volume_gigabytes=-1))
def test_get_class_quotas_no_defaults(self):
self._stub_quota_class_get_all_by_name()
@ -937,6 +955,9 @@ class DbQuotaDriverTestCase(test.TestCase):
backup_gigabytes=dict(limit=50,
in_use=10,
reserved=0, ),
per_volume_gigabytes=dict(in_use=0,
limit=-1,
reserved= 0)
))
def test_get_project_quotas_alt_context_no_class(self):
@ -964,6 +985,10 @@ class DbQuotaDriverTestCase(test.TestCase):
backup_gigabytes=dict(limit=50,
in_use=10,
reserved=0, ),
per_volume_gigabytes=dict(in_use=0,
limit=-1,
reserved=0)
))
def test_get_project_quotas_alt_context_with_class(self):
@ -992,6 +1017,10 @@ class DbQuotaDriverTestCase(test.TestCase):
backup_gigabytes=dict(limit=50,
in_use=10,
reserved=0, ),
per_volume_gigabytes=dict(in_use=0,
limit=-1,
reserved= 0)
))
def test_get_project_quotas_no_defaults(self):
@ -1038,7 +1067,8 @@ class DbQuotaDriverTestCase(test.TestCase):
snapshots=dict(limit=10, ),
backups=dict(limit=10, ),
gigabytes=dict(limit=50, ),
backup_gigabytes=dict(limit=50, ),))
backup_gigabytes=dict(limit=50, ),
per_volume_gigabytes=dict(limit=-1, )))
def _stub_get_project_quotas(self):
def fake_get_project_quotas(context, resources, project_id,

View File

@ -571,6 +571,15 @@ class QuotaReserveTask(flow_utils.CinderTask):
super(QuotaReserveTask, self).__init__(addons=[ACTION])
def execute(self, context, size, volume_type_id, optional_args):
try:
values = {'per_volume_gigabytes': size}
QUOTAS.limit_check(context, project_id=context.project_id,
**values)
except exception.OverQuota as e:
quotas = e.kwargs['quotas']
raise exception.VolumeSizeExceedsLimit(
size=size, limit=quotas['per_volume_gigabytes'])
try:
reserve_opts = {'volumes': 1, 'gigabytes': size}
QUOTAS.add_volume_type_opts(context, reserve_opts, volume_type_id)