Merge "Add max_folder_size"
This commit is contained in:
commit
9b50f5d839
@ -316,10 +316,21 @@ class Engine(object):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise exception.BadRequest(message=str(e))
|
raise exception.BadRequest(message=str(e))
|
||||||
|
|
||||||
|
max_allowed_size = af.get_max_blob_size(field_name)
|
||||||
|
# Check if we wanna upload to a folder (and not just to a Blob)
|
||||||
|
if blob_key is not None:
|
||||||
|
blobs_dict = getattr(af, field_name)
|
||||||
|
overall_folder_size = sum(blob["size"] for blob
|
||||||
|
in blobs_dict.values())
|
||||||
|
max_folder_size_allowed_ = af.get_max_folder_size(field_name) \
|
||||||
|
- overall_folder_size # always non-negative
|
||||||
|
max_allowed_size = min(max_allowed_size,
|
||||||
|
max_folder_size_allowed_)
|
||||||
|
|
||||||
default_store = af.get_default_store(
|
default_store = af.get_default_store(
|
||||||
context, af, field_name, blob_key)
|
context, af, field_name, blob_key)
|
||||||
location_uri, size, checksums = store_api.save_blob_to_store(
|
location_uri, size, checksums = store_api.save_blob_to_store(
|
||||||
blob_id, fd, context, af.get_max_blob_size(field_name),
|
blob_id, fd, context, max_allowed_size,
|
||||||
store_type=default_store)
|
store_type=default_store)
|
||||||
except Exception:
|
except Exception:
|
||||||
# if upload failed remove blob from db and storage
|
# if upload failed remove blob from db and storage
|
||||||
|
@ -754,6 +754,15 @@ class BaseArtifact(base.VersionedObject):
|
|||||||
"""
|
"""
|
||||||
return getattr(cls.fields[field_name], 'max_blob_size')
|
return getattr(cls.fields[field_name], 'max_blob_size')
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_max_folder_size(cls, field_name):
|
||||||
|
"""Get the maximum allowed folder size in bytes.
|
||||||
|
|
||||||
|
:param field_name: folder (blob dict) field name
|
||||||
|
:return: maximum folder size in bytes
|
||||||
|
"""
|
||||||
|
return getattr(cls.fields[field_name], 'max_folder_size')
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def validate_upload_allowed(cls, af, field_name, blob_key=None):
|
def validate_upload_allowed(cls, af, field_name, blob_key=None):
|
||||||
"""Validate if given blob is ready for uploading.
|
"""Validate if given blob is ready for uploading.
|
||||||
|
@ -24,7 +24,8 @@ FILTERS = (
|
|||||||
FILTER_EQ, FILTER_NEQ, FILTER_IN, FILTER_GT, FILTER_GTE, FILTER_LT,
|
FILTER_EQ, FILTER_NEQ, FILTER_IN, FILTER_GT, FILTER_GTE, FILTER_LT,
|
||||||
FILTER_LTE) = ('eq', 'neq', 'in', 'gt', 'gte', 'lt', 'lte')
|
FILTER_LTE) = ('eq', 'neq', 'in', 'gt', 'gte', 'lt', 'lte')
|
||||||
|
|
||||||
DEFAULT_MAX_BLOB_SIZE = 10485760
|
DEFAULT_MAX_BLOB_SIZE = 10485760 # 10 Megabytes
|
||||||
|
DEFAULT_MAX_FOLDER_SIZE = 2673868800 # 2550 Megabytes
|
||||||
|
|
||||||
|
|
||||||
class Field(object):
|
class Field(object):
|
||||||
@ -246,11 +247,14 @@ class BlobField(Field):
|
|||||||
|
|
||||||
|
|
||||||
class FolderField(DictField):
|
class FolderField(DictField):
|
||||||
def __init__(self, max_blob_size=DEFAULT_MAX_BLOB_SIZE, **kwargs):
|
def __init__(self, max_blob_size=DEFAULT_MAX_BLOB_SIZE,
|
||||||
|
max_folder_size=DEFAULT_MAX_FOLDER_SIZE, **kwargs):
|
||||||
super(FolderField, self).__init__(
|
super(FolderField, self).__init__(
|
||||||
element_type=glare_fields.BlobFieldType, **kwargs)
|
element_type=glare_fields.BlobFieldType, **kwargs)
|
||||||
self.max_blob_size = int(max_blob_size)
|
self.max_blob_size = int(max_blob_size)
|
||||||
|
self.max_folder_size = int(max_folder_size)
|
||||||
self.field_props.append('max_blob_size')
|
self.field_props.append('max_blob_size')
|
||||||
|
self.field_props.append('max_folder_size')
|
||||||
|
|
||||||
# Classes below added for backward compatibility. They shouldn't be used
|
# Classes below added for backward compatibility. They shouldn't be used
|
||||||
|
|
||||||
|
@ -88,6 +88,7 @@ class SampleArtifact(base_artifact.BaseArtifact):
|
|||||||
required_on_activate=False,
|
required_on_activate=False,
|
||||||
filter_ops=(wrappers.FILTER_EQ,)),
|
filter_ops=(wrappers.FILTER_EQ,)),
|
||||||
'dict_of_blobs': Folder(required_on_activate=False,
|
'dict_of_blobs': Folder(required_on_activate=False,
|
||||||
|
max_folder_size=2000,
|
||||||
validators=[
|
validators=[
|
||||||
validators.MaxDictKeyLen(1000)]),
|
validators.MaxDictKeyLen(1000)]),
|
||||||
'string_mutable': Field(fields.StringField,
|
'string_mutable': Field(fields.StringField,
|
||||||
|
@ -40,7 +40,7 @@ class TestArtifactUpload(base.BaseTestArtifactAPI):
|
|||||||
self.assertEqual(3, artifact['blob']['size'])
|
self.assertEqual(3, artifact['blob']['size'])
|
||||||
self.assertEqual('active', artifact['blob']['status'])
|
self.assertEqual('active', artifact['blob']['status'])
|
||||||
|
|
||||||
def test_size_too_big(self):
|
def test_blob_size_too_big(self):
|
||||||
# small blob size is limited by 10 bytes
|
# small blob size is limited by 10 bytes
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
exc.RequestEntityTooLarge, self.controller.upload_blob,
|
exc.RequestEntityTooLarge, self.controller.upload_blob,
|
||||||
@ -116,6 +116,42 @@ class TestArtifactUpload(base.BaseTestArtifactAPI):
|
|||||||
self.assertEqual(3, artifact['dict_of_blobs']['blb2']['size'])
|
self.assertEqual(3, artifact['dict_of_blobs']['blb2']['size'])
|
||||||
self.assertEqual('active', artifact['dict_of_blobs']['blb2']['status'])
|
self.assertEqual('active', artifact['dict_of_blobs']['blb2']['status'])
|
||||||
|
|
||||||
|
def test_upload_oversized_blob_dict(self):
|
||||||
|
self.controller.upload_blob(
|
||||||
|
self.req, 'sample_artifact', self.sample_artifact['id'],
|
||||||
|
'dict_of_blobs/a',
|
||||||
|
BytesIO(1800 * b'a'), 'application/octet-stream')
|
||||||
|
artifact = self.controller.show(self.req, 'sample_artifact',
|
||||||
|
self.sample_artifact['id'])
|
||||||
|
self.assertEqual(1800, artifact['dict_of_blobs']['a']['size'])
|
||||||
|
self.assertEqual('active', artifact['dict_of_blobs']['a']['status'])
|
||||||
|
|
||||||
|
# upload another one
|
||||||
|
self.controller.upload_blob(
|
||||||
|
self.req, 'sample_artifact', self.sample_artifact['id'],
|
||||||
|
'dict_of_blobs/b',
|
||||||
|
BytesIO(199 * b'b'), 'application/octet-stream')
|
||||||
|
artifact = self.controller.show(self.req, 'sample_artifact',
|
||||||
|
self.sample_artifact['id'])
|
||||||
|
self.assertEqual(199, artifact['dict_of_blobs']['b']['size'])
|
||||||
|
self.assertEqual('active', artifact['dict_of_blobs']['b']['status'])
|
||||||
|
|
||||||
|
# upload to have size of 2000 bytes exactly
|
||||||
|
self.controller.upload_blob(
|
||||||
|
self.req, 'sample_artifact', self.sample_artifact['id'],
|
||||||
|
'dict_of_blobs/c',
|
||||||
|
BytesIO(b'c'), 'application/octet-stream')
|
||||||
|
artifact = self.controller.show(self.req, 'sample_artifact',
|
||||||
|
self.sample_artifact['id'])
|
||||||
|
self.assertEqual(1, artifact['dict_of_blobs']['c']['size'])
|
||||||
|
self.assertEqual('active', artifact['dict_of_blobs']['c']['status'])
|
||||||
|
|
||||||
|
# Upload to have more than max folder limit, more than 2000
|
||||||
|
self.assertRaises(
|
||||||
|
exc.RequestEntityTooLarge, self.controller.upload_blob,
|
||||||
|
self.req, 'sample_artifact', self.sample_artifact['id'],
|
||||||
|
'dict_of_blobs/d', BytesIO(b'd'), 'application/octet-stream')
|
||||||
|
|
||||||
def test_existing_blob_dict_key(self):
|
def test_existing_blob_dict_key(self):
|
||||||
self.controller.upload_blob(
|
self.controller.upload_blob(
|
||||||
self.req, 'sample_artifact', self.sample_artifact['id'],
|
self.req, 'sample_artifact', self.sample_artifact['id'],
|
||||||
|
Loading…
x
Reference in New Issue
Block a user