Implement static quotas
This commit adds the ability to specify static quotas in glare conf: 1. Number of created artifacts (max_artifact_number in default section) 2. Amount of stored data (max_uploaded_data in default section) 3. Number of created artifact of specific artifact type (max_artifact_number in each artifact type section) 4. Amount of stored data for specific artifact type (max_uploaded_data in each artifact type section) Partially Implements: blueprint glare-quotas Change-Id: I3c9ab6f24c656283d8b2283af4c455fbc411862e
This commit is contained in:
parent
552667059a
commit
5253c5c216
@ -33,6 +33,7 @@ from glare.i18n import _
|
||||
from glare import locking
|
||||
from glare.notification import Notifier
|
||||
from glare.objects.meta import registry
|
||||
from glare import quota
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -79,9 +80,10 @@ class Engine(object):
|
||||
filters.extend([('owner', 'eq:' + owner),
|
||||
('visibility', 'private')])
|
||||
|
||||
scope_id = "%s:%s:%s" % (type_name, name, version)
|
||||
if visibility != 'public':
|
||||
scope_id += ':%s' % owner
|
||||
scope_id = owner
|
||||
if visibility == 'public':
|
||||
scope_id = "%s:%s:%s" % (type_name, name, version)
|
||||
|
||||
lock = self.lock_engine.acquire(context, scope_id)
|
||||
|
||||
try:
|
||||
@ -214,6 +216,7 @@ class Engine(object):
|
||||
# acquire scoped lock and execute artifact create
|
||||
with self._create_scoped_lock(context, type_name, af.name,
|
||||
af.version, context.tenant):
|
||||
quota.verify_artifact_count(context, type_name)
|
||||
for field_name, value in values.items():
|
||||
if af.is_blob(field_name) or af.is_blob_dict(field_name):
|
||||
msg = _("Cannot add blob with this request. "
|
||||
@ -384,6 +387,10 @@ class Engine(object):
|
||||
value = folder
|
||||
return af.update_blob(context, af.id, field_name, value)
|
||||
|
||||
@staticmethod
|
||||
def _generate_blob_name(field_name, blob_key=None):
|
||||
return "%s[%s]" % (field_name, blob_key) if blob_key else field_name
|
||||
|
||||
def add_blob_location(self, context, type_name, artifact_id, field_name,
|
||||
location, blob_meta, blob_key=None):
|
||||
"""Add external/internal location to blob.
|
||||
@ -398,8 +405,7 @@ class Engine(object):
|
||||
in this dict
|
||||
:return: dict representation of updated artifact
|
||||
"""
|
||||
blob_name = "%s[%s]" % (field_name, blob_key)\
|
||||
if blob_key else field_name
|
||||
blob_name = self._generate_blob_name(field_name, blob_key)
|
||||
|
||||
location_type = blob_meta.pop('location_type', 'external')
|
||||
|
||||
@ -444,12 +450,13 @@ class Engine(object):
|
||||
Notifier.notify(context, action_name, modified_af)
|
||||
return modified_af.to_dict()
|
||||
|
||||
@staticmethod
|
||||
def _calculate_allowed_space(context, af, field_name, content_length=None,
|
||||
blob_key=None):
|
||||
"""Calculate the maximum amount of data user can upload to a blob."""
|
||||
def _calculate_allowed_space(self, context, af, field_name,
|
||||
content_length=None, blob_key=None):
|
||||
"""Calculate the maximum amount of data user can upload to the blob."""
|
||||
# As a default we take the maximum blob size
|
||||
max_allowed_size = af.get_max_blob_size(field_name)
|
||||
blob_name = self._generate_blob_name(field_name, blob_key)
|
||||
|
||||
max_blob_size = af.get_max_blob_size(field_name)
|
||||
|
||||
if blob_key is not None:
|
||||
# For folders we also compare it with the maximum folder size
|
||||
@ -457,18 +464,30 @@ class Engine(object):
|
||||
overall_folder_size = sum(
|
||||
blob["size"] for blob in blobs_dict.values()
|
||||
if blob["size"] is not None)
|
||||
max_folder_size_allowed = af.get_max_folder_size(
|
||||
available_folder_space = af.get_max_folder_size(
|
||||
field_name) - overall_folder_size # always non-negative
|
||||
max_allowed_size = min(max_allowed_size,
|
||||
max_folder_size_allowed)
|
||||
max_blob_size = min(max_blob_size, available_folder_space)
|
||||
|
||||
# check quotas
|
||||
quota_size = quota.verify_uploaded_data_amount(
|
||||
context, af.get_type_name(), content_length)
|
||||
|
||||
if content_length is None:
|
||||
# if no content_length was provided we have to allocate
|
||||
# all allowed space for the blob
|
||||
size = max_allowed_size
|
||||
# all allowed space for the blob. It's minimum of max blob size
|
||||
# and available quota limit. -1 means that user don't have upload
|
||||
# limits.
|
||||
size = max_blob_size if quota_size == -1 else min(
|
||||
max_blob_size, quota_size)
|
||||
else:
|
||||
if content_length > max_allowed_size:
|
||||
raise exception.RequestEntityTooLarge()
|
||||
if content_length > max_blob_size:
|
||||
msg = _("Can't upload %(content_length)d bytes of data to "
|
||||
"blob %(blob_name)s. Its max allowed size is "
|
||||
"%(max_blob_size)d") % {
|
||||
'content_length': content_length,
|
||||
'blob_name': blob_name,
|
||||
'max_blob_size': max_blob_size}
|
||||
raise exception.RequestEntityTooLarge(msg)
|
||||
size = content_length
|
||||
|
||||
return size
|
||||
@ -488,9 +507,7 @@ class Engine(object):
|
||||
in this dictionary
|
||||
:return: dict representation of updated artifact
|
||||
"""
|
||||
|
||||
blob_name = "%s[%s]" % (field_name, blob_key) \
|
||||
if blob_key else field_name
|
||||
blob_name = self._generate_blob_name(field_name, blob_key)
|
||||
blob_id = uuidutils.generate_uuid()
|
||||
|
||||
lock_key = "%s:%s" % (type_name, artifact_id)
|
||||
@ -586,8 +603,7 @@ class Engine(object):
|
||||
read_only=True)
|
||||
policy.authorize("artifact:download", af.to_dict(), context)
|
||||
|
||||
blob_name = "%s[%s]" % (field_name, blob_key)\
|
||||
if blob_key else field_name
|
||||
blob_name = self._generate_blob_name(field_name, blob_key)
|
||||
|
||||
if af.status == 'deleted':
|
||||
msg = _("Cannot download data when artifact is deleted")
|
||||
@ -642,8 +658,7 @@ class Engine(object):
|
||||
action_name = 'artifact:delete_blob'
|
||||
policy.authorize(action_name, af.to_dict(), context)
|
||||
|
||||
blob_name = "%s[%s]" % (field_name, blob_key)\
|
||||
if blob_key else field_name
|
||||
blob_name = self._generate_blob_name(field_name, blob_key)
|
||||
|
||||
blob = self._get_blob_info(af, field_name, blob_key)
|
||||
if blob is None:
|
||||
|
@ -27,6 +27,18 @@ from glare.objects.meta import validators
|
||||
from glare.objects.meta import wrappers
|
||||
|
||||
global_artifact_opts = [
|
||||
cfg.IntOpt('max_uploaded_data', default=1099511627776, # 1 Terabyte
|
||||
min=-1,
|
||||
help=_("Defines how many bytes of data user can upload to "
|
||||
"storage. This parameter is global and doesn't take "
|
||||
"into account data of what type was uploaded. "
|
||||
"Value -1 means no limit.")),
|
||||
cfg.IntOpt('max_artifact_number', default=100,
|
||||
min=-1,
|
||||
help=_("Defines how many artifacts user can have. This "
|
||||
"parameter is global and doesn't take "
|
||||
"into account artifacts of what type were created. "
|
||||
"Value -1 means no limit.")),
|
||||
cfg.BoolOpt('delayed_delete', default=False,
|
||||
help=_("If False defines that artifacts must be deleted "
|
||||
"immediately after the user call. Otherwise they just "
|
||||
@ -125,6 +137,12 @@ class BaseArtifact(base.VersionedObject):
|
||||
}
|
||||
|
||||
artifact_type_opts = [
|
||||
cfg.IntOpt('max_uploaded_data', min=-1,
|
||||
help=_("Defines how many bytes of data of this type user "
|
||||
"can upload to storage. Value -1 means no limit.")),
|
||||
cfg.IntOpt('max_artifact_number', min=-1,
|
||||
help=_("Defines how many artifacts of this type user can "
|
||||
"have. Value -1 means no limit.")),
|
||||
cfg.BoolOpt('delayed_delete',
|
||||
help=_(
|
||||
"If False defines that artifacts must be deleted "
|
||||
@ -160,7 +178,7 @@ Possible values:
|
||||
* cinder
|
||||
* vsphere
|
||||
* database
|
||||
"""))
|
||||
"""))
|
||||
]
|
||||
|
||||
@classmethod
|
||||
|
109
glare/quota.py
Normal file
109
glare/quota.py
Normal file
@ -0,0 +1,109 @@
|
||||
# Copyright 2017 - Nokia Networks
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from glare.common import exception
|
||||
from glare.db.sqlalchemy import api
|
||||
from glare.i18n import _
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
def verify_artifact_count(context, type_name):
|
||||
"""Verify if user can upload data based on his quota limits.
|
||||
|
||||
:param context: user context
|
||||
:param type_name: name of artifact type
|
||||
"""
|
||||
global_limit = CONF.max_artifact_number
|
||||
type_limit = getattr(
|
||||
CONF, 'artifact_type:' + type_name).max_artifact_number
|
||||
|
||||
session = api.get_session()
|
||||
# the whole amount of created artifacts
|
||||
whole_number = api.count_artifact_number(context, session)
|
||||
|
||||
if global_limit != -1 and whole_number >= global_limit:
|
||||
msg = _("Can't create artifact because of global quota "
|
||||
"limit is %(global_limit)d artifacts. "
|
||||
"You have %(whole_number)d artifact(s).") % {
|
||||
'global_limit': global_limit, 'whole_number': whole_number}
|
||||
raise exception.Forbidden(msg)
|
||||
|
||||
if type_limit is not None:
|
||||
# the amount of artifacts for specific type
|
||||
type_number = api.count_artifact_number(
|
||||
context, session, type_name)
|
||||
|
||||
if type_limit != -1 and type_number >= type_limit:
|
||||
msg = _("Can't create artifact because of quota limit for "
|
||||
"artifact type '%(type_name)s' is %(type_limit)d "
|
||||
"artifacts. You have %(type_number)d artifact(s) "
|
||||
"of this type.") % {
|
||||
'type_name': type_name,
|
||||
'type_limit': type_limit,
|
||||
'type_number': type_number}
|
||||
raise exception.Forbidden(msg)
|
||||
|
||||
|
||||
def verify_uploaded_data_amount(context, type_name, data_amount=None):
|
||||
"""Verify if user can upload data based on his quota limits.
|
||||
|
||||
:param context: user context
|
||||
:param type_name: name of artifact type
|
||||
:param data_amount: number of bytes user wants to upload. Value None means
|
||||
that user hasn't specified data amount. In this case don't raise an
|
||||
exception, but just return the amount of data he is able to upload.
|
||||
:return: number of bytes user can upload if data_amount isn't specified
|
||||
"""
|
||||
global_limit = CONF.max_uploaded_data
|
||||
type_limit = getattr(CONF, 'artifact_type:' + type_name).max_uploaded_data
|
||||
|
||||
session = api.get_session()
|
||||
# the whole amount of created artifacts
|
||||
whole_number = api.calculate_uploaded_data(context, session)
|
||||
res = -1
|
||||
|
||||
if global_limit != -1:
|
||||
if data_amount is None:
|
||||
res = global_limit - whole_number
|
||||
elif whole_number + data_amount > global_limit:
|
||||
msg = _("Can't upload %(data_amount)d byte(s) because of global "
|
||||
"quota limit: %(global_limit)d. "
|
||||
"You have %(whole_number)d bytes uploaded.") % {
|
||||
'data_amount': data_amount,
|
||||
'global_limit': global_limit,
|
||||
'whole_number': whole_number}
|
||||
raise exception.RequestEntityTooLarge(msg)
|
||||
|
||||
if type_limit is not None:
|
||||
# the amount of artifacts for specific type
|
||||
type_number = api.calculate_uploaded_data(
|
||||
context, session, type_name)
|
||||
if type_limit != -1:
|
||||
if data_amount is None:
|
||||
available = type_limit - type_number
|
||||
res = available if res == -1 else min(res, available)
|
||||
elif type_number + data_amount > type_limit:
|
||||
msg = _("Can't upload %(data_amount)d byte(s) because of "
|
||||
"quota limit for artifact type '%(type_name)s': "
|
||||
"%(type_limit)d. You have %(type_number)d bytes "
|
||||
"uploaded for this type.") % {
|
||||
'data_amount': data_amount,
|
||||
'type_name': type_name,
|
||||
'type_limit': type_limit,
|
||||
'type_number': type_number}
|
||||
raise exception.RequestEntityTooLarge(msg)
|
||||
return res
|
@ -280,9 +280,10 @@ class GlareServer(Server):
|
||||
default_sql_connection)
|
||||
self.lock_path = self.test_dir
|
||||
|
||||
self.send_identity_headers = False
|
||||
self.enabled_artifact_types = ''
|
||||
self.custom_artifact_types_modules = ''
|
||||
self.max_uploaded_data = '1099511627776'
|
||||
self.max_artifact_number = '100'
|
||||
self.artifact_type_section = ''
|
||||
|
||||
self.conf_base = """[DEFAULT]
|
||||
@ -298,6 +299,8 @@ workers = %(workers)s
|
||||
lock_path = %(lock_path)s
|
||||
enabled_artifact_types = %(enabled_artifact_types)s
|
||||
custom_artifact_types_modules = %(custom_artifact_types_modules)s
|
||||
max_uploaded_data = %(max_uploaded_data)s
|
||||
max_artifact_number = %(max_artifact_number)s
|
||||
[oslo_policy]
|
||||
policy_file = %(policy_file)s
|
||||
policy_default_rule = %(policy_default_rule)s
|
||||
|
160
glare/tests/functional/test_quotas.py
Normal file
160
glare/tests/functional/test_quotas.py
Normal file
@ -0,0 +1,160 @@
|
||||
# Copyright 2017 - Nokia Networks
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from glare.tests.functional import base
|
||||
|
||||
|
||||
class TestStaticQuotas(base.TestArtifact):
|
||||
"""Test static quota limits."""
|
||||
|
||||
def setUp(self):
|
||||
base.functional.FunctionalTest.setUp(self)
|
||||
|
||||
self.set_user('user1')
|
||||
self.glare_server.deployment_flavor = 'noauth'
|
||||
self.glare_server.max_uploaded_data = '1000'
|
||||
self.glare_server.max_artifact_number = '10'
|
||||
|
||||
self.glare_server.enabled_artifact_types = 'images,' \
|
||||
'heat_templates,' \
|
||||
'murano_packages,' \
|
||||
'sample_artifact'
|
||||
self.glare_server.custom_artifact_types_modules = (
|
||||
'glare.tests.sample_artifact')
|
||||
self.glare_server.artifact_type_section = """
|
||||
[artifact_type:sample_artifact]
|
||||
default_store = database
|
||||
max_uploaded_data = 300
|
||||
[artifact_type:images]
|
||||
max_uploaded_data = 1500
|
||||
max_artifact_number = 3
|
||||
[artifact_type:heat_templates]
|
||||
max_artifact_number = 15
|
||||
[artifact_type:murano_packages]
|
||||
max_uploaded_data = 1000
|
||||
max_artifact_number = 10
|
||||
"""
|
||||
self.start_servers(**self.__dict__.copy())
|
||||
|
||||
def test_count_artifact_number(self):
|
||||
# initially there are no artifacts
|
||||
result = self.get('/all')
|
||||
self.assertEqual([], result['all'])
|
||||
|
||||
# create 3 images for user1
|
||||
for i in range(3):
|
||||
img = self.create_artifact(
|
||||
data={'name': 'img%d' % i}, type_name='images')
|
||||
|
||||
# creation of another image fails because of artifact type limit
|
||||
self.create_artifact(
|
||||
data={'name': 'img4'}, type_name='images', status=403)
|
||||
|
||||
# create 7 murano packages
|
||||
for i in range(7):
|
||||
self.create_artifact(
|
||||
data={'name': 'mp%d' % i}, type_name='murano_packages')
|
||||
|
||||
# creation of another package fails because of global limit
|
||||
self.create_artifact(
|
||||
data={'name': 'mp8'}, type_name='murano_packages', status=403)
|
||||
|
||||
# delete an image and create another murano package work
|
||||
self.delete('/images/%s' % img['id'])
|
||||
self.create_artifact(
|
||||
data={'name': 'mp8'}, type_name='murano_packages')
|
||||
|
||||
# admin can create his own artifacts
|
||||
self.set_user('admin')
|
||||
for i in range(10):
|
||||
self.create_artifact(
|
||||
data={'name': 'ht%d' % i}, type_name='heat_templates')
|
||||
|
||||
# creation of another heat template fails because of global limit
|
||||
self.create_artifact(
|
||||
data={'name': 'ht11'}, type_name='heat_templates', status=403)
|
||||
|
||||
def test_calculate_uploaded_data(self):
|
||||
headers = {'Content-Type': 'application/octet-stream'}
|
||||
|
||||
# initially there are no artifacts
|
||||
result = self.get('/all')
|
||||
self.assertEqual([], result['all'])
|
||||
|
||||
# create 2 sample artifacts for user1
|
||||
art1 = self.create_artifact(data={'name': 'art1'})
|
||||
art2 = self.create_artifact(data={'name': 'art2'})
|
||||
|
||||
# create 2 images for user1
|
||||
img1 = self.create_artifact(data={'name': 'img1'}, type_name='images')
|
||||
img2 = self.create_artifact(data={'name': 'img2'}, type_name='images')
|
||||
|
||||
# upload to art1 fails now because of type limit
|
||||
data = 'a' * 301
|
||||
self.put(url='/sample_artifact/%s/blob' % art1['id'],
|
||||
data=data,
|
||||
status=413,
|
||||
headers=headers)
|
||||
|
||||
# upload to img1 fails now because of global limit
|
||||
data = 'a' * 1001
|
||||
self.put(url='/images/%s/image' % img1['id'],
|
||||
data=data,
|
||||
status=413,
|
||||
headers=headers)
|
||||
|
||||
# upload 300 bytes to 'blob' of art1
|
||||
data = 'a' * 300
|
||||
self.put(url='/sample_artifact/%s/blob' % art1['id'],
|
||||
data=data,
|
||||
headers=headers)
|
||||
|
||||
# upload another blob to art1 fails because of type limit
|
||||
self.put(url='/sample_artifact/%s/dict_of_blobs/blob' % art1['id'],
|
||||
data='a',
|
||||
status=413,
|
||||
headers=headers)
|
||||
|
||||
# upload to art2 fails now because of type limit
|
||||
self.put(url='/sample_artifact/%s/dict_of_blobs/blob' % art2['id'],
|
||||
data='a',
|
||||
status=413,
|
||||
headers=headers)
|
||||
|
||||
# delete art1 and check that upload to art2 works
|
||||
data = 'a' * 300
|
||||
self.delete('/sample_artifact/%s' % art1['id'])
|
||||
self.put(url='/sample_artifact/%s/dict_of_blobs/blob' % art2['id'],
|
||||
data=data,
|
||||
headers=headers)
|
||||
|
||||
# upload 700 bytes to img1 works
|
||||
data = 'a' * 700
|
||||
self.put(url='/images/%s/image' % img1['id'],
|
||||
data=data,
|
||||
headers=headers)
|
||||
|
||||
# upload to img2 fails because of global limit
|
||||
self.put(url='/images/%s/image' % img2['id'],
|
||||
data='a',
|
||||
status=413,
|
||||
headers=headers)
|
||||
|
||||
# admin can upload data to his images
|
||||
self.set_user('admin')
|
||||
img1 = self.create_artifact(data={'name': 'img1'}, type_name='images')
|
||||
data = 'a' * 1000
|
||||
self.put(url='/images/%s/image' % img1['id'],
|
||||
data=data,
|
||||
headers=headers)
|
194
glare/tests/unit/test_quotas.py
Normal file
194
glare/tests/unit/test_quotas.py
Normal file
@ -0,0 +1,194 @@
|
||||
# Copyright 2017 - Nokia Networks
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from six import BytesIO
|
||||
|
||||
from glare.common import exception
|
||||
from glare.tests.unit import base
|
||||
|
||||
|
||||
class TestStaticQuotas(base.BaseTestArtifactAPI):
|
||||
"""Test static quota limits."""
|
||||
|
||||
def test_count_artifact_number(self):
|
||||
user1_req = self.get_fake_request(self.users['user1'])
|
||||
user2_req = self.get_fake_request(self.users['user2'])
|
||||
# initially there are no artifacts
|
||||
self.assertEqual(
|
||||
0, len(self.controller.list(user1_req, 'all')['artifacts']))
|
||||
self.assertEqual(
|
||||
0, len(self.controller.list(user2_req, 'all')['artifacts']))
|
||||
|
||||
# set global limit on 10 artifacts
|
||||
self.config(max_artifact_number=10)
|
||||
# 3 images, 15 heat templates, 10 murano packages
|
||||
self.config(max_artifact_number=3,
|
||||
group='artifact_type:images')
|
||||
self.config(max_artifact_number=15,
|
||||
group='artifact_type:heat_templates')
|
||||
self.config(max_artifact_number=10,
|
||||
group='artifact_type:murano_packages')
|
||||
|
||||
# create 3 images for user1
|
||||
for i in range(3):
|
||||
img = self.controller.create(
|
||||
user1_req, 'images', {'name': 'img%d' % i})
|
||||
|
||||
# creation of another image fails because of artifact type limit
|
||||
self.assertRaises(exception.Forbidden, self.controller.create,
|
||||
user1_req, 'images', {'name': 'img4'})
|
||||
|
||||
# create 7 murano packages
|
||||
for i in range(7):
|
||||
self.controller.create(
|
||||
user1_req, 'murano_packages', {'name': 'mp%d' % i})
|
||||
|
||||
# creation of another package fails because of global limit
|
||||
self.assertRaises(exception.Forbidden, self.controller.create,
|
||||
user1_req, 'murano_packages', {'name': 'mp8'})
|
||||
|
||||
# delete an image and create another murano package work
|
||||
self.controller.delete(user1_req, 'images', img['id'])
|
||||
self.controller.create(user1_req, 'murano_packages', {'name': 'mp8'})
|
||||
|
||||
# user2 can create his own artifacts
|
||||
for i in range(10):
|
||||
self.controller.create(
|
||||
user2_req, 'heat_templates', {'name': 'ht%d' % i})
|
||||
|
||||
# creation of another heat template fails because of global limit
|
||||
self.assertRaises(exception.Forbidden, self.controller.create,
|
||||
user2_req, 'heat_templates', {'name': 'ht11'})
|
||||
|
||||
# disable global limit and try to create 15 heat templates
|
||||
self.config(max_artifact_number=-1)
|
||||
for i in range(15):
|
||||
self.controller.create(
|
||||
user1_req, 'heat_templates', {'name': 'ht%d' % i})
|
||||
|
||||
# creation of another heat template fails because of type limit
|
||||
self.assertRaises(exception.Forbidden, self.controller.create,
|
||||
user1_req, 'heat_templates', {'name': 'ht16'})
|
||||
|
||||
# disable type limit for heat templates and create 1 heat templates
|
||||
self.config(max_artifact_number=-1,
|
||||
group='artifact_type:heat_templates')
|
||||
self.controller.create(
|
||||
user1_req, 'heat_templates', {'name': 'ht16'})
|
||||
|
||||
def test_calculate_uploaded_data(self):
|
||||
user1_req = self.get_fake_request(self.users['user1'])
|
||||
user2_req = self.get_fake_request(self.users['user2'])
|
||||
# initially there are no artifacts
|
||||
self.assertEqual(
|
||||
0, len(self.controller.list(user1_req, 'all')['artifacts']))
|
||||
self.assertEqual(
|
||||
0, len(self.controller.list(user2_req, 'all')['artifacts']))
|
||||
|
||||
# set global limit on 1000 bytes
|
||||
self.config(max_uploaded_data=1000)
|
||||
# 300 for sample artifact, 1500 for images, 1000 for murano packages
|
||||
self.config(max_uploaded_data=300,
|
||||
group='artifact_type:sample_artifact')
|
||||
self.config(max_uploaded_data=1500,
|
||||
group='artifact_type:images')
|
||||
self.config(max_uploaded_data=1000,
|
||||
group='artifact_type:murano_packages')
|
||||
|
||||
# create 2 sample artifacts for user 1
|
||||
art1 = self.controller.create(
|
||||
user1_req, 'sample_artifact', {'name': 'art1'})
|
||||
art2 = self.controller.create(
|
||||
user1_req, 'sample_artifact', {'name': 'art2'})
|
||||
|
||||
# create 3 images for user1
|
||||
img1 = self.controller.create(
|
||||
user1_req, 'images', {'name': 'img1'})
|
||||
img2 = self.controller.create(
|
||||
user1_req, 'images', {'name': 'img2'})
|
||||
img3 = self.controller.create(
|
||||
user1_req, 'images', {'name': 'img3'})
|
||||
|
||||
# upload to art1 fails now because of type limit
|
||||
self.assertRaises(
|
||||
exception.RequestEntityTooLarge, self.controller.upload_blob,
|
||||
user1_req, 'sample_artifact', art1['id'], 'blob',
|
||||
BytesIO(b'a' * 301), 'application/octet-stream', 301)
|
||||
|
||||
# upload to img1 fails now because of global limit
|
||||
self.assertRaises(
|
||||
exception.RequestEntityTooLarge, self.controller.upload_blob,
|
||||
user1_req, 'images', img1['id'], 'image',
|
||||
BytesIO(b'a' * 1001), 'application/octet-stream', 1001)
|
||||
|
||||
# upload 300 bytes to 'blob' of art1
|
||||
self.controller.upload_blob(
|
||||
user1_req, 'sample_artifact', art1['id'], 'blob',
|
||||
BytesIO(b'a' * 300), 'application/octet-stream',
|
||||
content_length=300)
|
||||
|
||||
# upload another blob to art1 fails because of type limit
|
||||
self.assertRaises(
|
||||
exception.RequestEntityTooLarge, self.controller.upload_blob,
|
||||
user1_req, 'sample_artifact', art1['id'],
|
||||
'dict_of_blobs/blob', BytesIO(b'a'),
|
||||
'application/octet-stream', 1)
|
||||
|
||||
# upload to art2 fails now because of type limit
|
||||
self.assertRaises(
|
||||
exception.RequestEntityTooLarge, self.controller.upload_blob,
|
||||
user1_req, 'sample_artifact', art2['id'], 'blob',
|
||||
BytesIO(b'a'), 'application/octet-stream', 1)
|
||||
|
||||
# delete art1 and check that upload to art2 works
|
||||
self.controller.delete(user1_req, 'sample_artifact', art1['id'])
|
||||
self.controller.upload_blob(
|
||||
user1_req, 'sample_artifact', art2['id'], 'blob',
|
||||
BytesIO(b'a' * 300), 'application/octet-stream', 300)
|
||||
|
||||
# upload 700 bytes to img1 works
|
||||
self.controller.upload_blob(
|
||||
user1_req, 'images', img1['id'], 'image',
|
||||
BytesIO(b'a' * 700), 'application/octet-stream', 700)
|
||||
|
||||
# upload to img2 fails because of global limit
|
||||
self.assertRaises(
|
||||
exception.RequestEntityTooLarge, self.controller.upload_blob,
|
||||
user1_req, 'images', img2['id'], 'image',
|
||||
BytesIO(b'a'), 'application/octet-stream', 1)
|
||||
|
||||
# user2 can upload data to images
|
||||
img1 = self.controller.create(
|
||||
user2_req, 'images', {'name': 'img1'})
|
||||
self.controller.upload_blob(
|
||||
user2_req, 'images', img1['id'], 'image',
|
||||
BytesIO(b'a' * 1000), 'application/octet-stream', 1000)
|
||||
|
||||
# disable global limit and try upload data from user1 again
|
||||
self.config(max_uploaded_data=-1)
|
||||
self.controller.upload_blob(
|
||||
user1_req, 'images', img2['id'], 'image',
|
||||
BytesIO(b'a' * 800), 'application/octet-stream', 800)
|
||||
|
||||
# uploading more fails because of image type limit
|
||||
self.assertRaises(
|
||||
exception.RequestEntityTooLarge, self.controller.upload_blob,
|
||||
user1_req, 'images', img3['id'], 'image',
|
||||
BytesIO(b'a'), 'application/octet-stream', 1)
|
||||
|
||||
# disable type limit and try upload data from user1 again
|
||||
self.config(max_uploaded_data=-1, group='artifact_type:images')
|
||||
self.controller.upload_blob(
|
||||
user1_req, 'images', img3['id'], 'image',
|
||||
BytesIO(b'a' * 1000), 'application/octet-stream', 1000)
|
Loading…
Reference in New Issue
Block a user