New algorithm that supports s3v4 was added. What I did in this patch in detail: - Implements v4 related code into mix-in class to provide some methods for authentication algorithms (e.g. string_to_sign) - S3Timestamp everywhere. Old code take a lot of complicated timestamp translation from/to datetime, time, date header format (str). This patch gathers the translation into "timestamp" property method which should be actually handled in the validatation. - Run functional tests for both v2/v4 authentication in the same environment at the same time which shows evidence that we have complete backword compatibilities and we can adopt v4 w/o anything broken. *Bonus* - Fix some minger bugs for singed urls (almostly expired timestamp), for header/query mixture and for unit test case mistake. The reason I implemented this from Andrey's original patch is the signature v4 stuff is too complicated if we mixes the process/routine into same class because of a bunch of if/elif/else statements for header handling. (e.g. if 'X-Amz-Date' in req.headers) Note that it is not his issue, just AWS is getting complicated algorithms. However, for maintainansibility, we need more clear code to find easily which statement is supported on v2/v4 to prevent merge buggy code into master. That is why I tried to do this. Hopefully this code fits the original author's intention. NOTE for operators: - Signature V4 is supported only for keystone auth. - Set the same value of "region" configuration in keystone to "location" in swift3 conf file to enable SigV4. - Sigv2 and SigV4 can be used at the same cluster configuration. - This stuff has been supported since Keystone 9.0.0.0b1. (We probably need to bump the minimum version for keystone in requirements) Change-Id: I386abd4ead40f55855657e354fd8ef3fd0d13aa7 Co-Authored-By: Andrey Pavlov <andrey-mp@yandex.ru> Closes-Bug: #1411078
632 lines
24 KiB
Python
632 lines
24 KiB
Python
# Copyright (c) 2010-2014 OpenStack Foundation.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
# implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
"""
|
|
Implementation of S3 Multipart Upload.
|
|
|
|
This module implements S3 Multipart Upload APIs with the Swift SLO feature.
|
|
The following explains how swift3 uses swift container and objects to store S3
|
|
upload information:
|
|
|
|
- [bucket]+segments
|
|
|
|
A container to store upload information. [bucket] is the original bucket
|
|
where multipart upload is initiated.
|
|
|
|
- [bucket]+segments/[upload_id]
|
|
|
|
A object of the ongoing upload id. The object is empty and used for
|
|
checking the target upload status. If the object exists, it means that the
|
|
upload is initiated but not either completed or aborted.
|
|
|
|
|
|
- [bucket]+segments/[upload_id]/1
|
|
[bucket]+segments/[upload_id]/2
|
|
[bucket]+segments/[upload_id]/3
|
|
.
|
|
.
|
|
|
|
Uploaded part objects. Those objects are directly used as segments of Swift
|
|
Static Large Object.
|
|
"""
|
|
|
|
import os
|
|
import re
|
|
import sys
|
|
|
|
from swift.common.swob import Range
|
|
from swift.common.utils import json
|
|
from swift.common.db import utf8encode
|
|
|
|
from six.moves.urllib.parse import urlparse # pylint: disable=F0401
|
|
|
|
from swift3.controllers.base import Controller, bucket_operation, \
|
|
object_operation, check_container_existence
|
|
from swift3.response import InvalidArgument, ErrorResponse, MalformedXML, \
|
|
InvalidPart, BucketAlreadyExists, EntityTooSmall, InvalidPartOrder, \
|
|
InvalidRequest, HTTPOk, HTTPNoContent, NoSuchKey, NoSuchUpload, \
|
|
NoSuchBucket
|
|
from swift3.exception import BadSwiftRequest
|
|
from swift3.utils import LOGGER, unique_id, MULTIUPLOAD_SUFFIX, S3Timestamp
|
|
from swift3.etree import Element, SubElement, fromstring, tostring, \
|
|
XMLSyntaxError, DocumentInvalid
|
|
from swift3.cfg import CONF
|
|
|
|
DEFAULT_MAX_PARTS_LISTING = 1000
|
|
DEFAULT_MAX_UPLOADS = 1000
|
|
|
|
MAX_COMPLETE_UPLOAD_BODY_SIZE = 2048 * 1024
|
|
|
|
|
|
def _get_upload_info(req, app, upload_id):
|
|
|
|
container = req.container_name + MULTIUPLOAD_SUFFIX
|
|
obj = '%s/%s' % (req.object_name, upload_id)
|
|
|
|
try:
|
|
return req.get_response(app, 'HEAD', container=container, obj=obj)
|
|
except NoSuchKey:
|
|
raise NoSuchUpload(upload_id=upload_id)
|
|
|
|
|
|
def _check_upload_info(req, app, upload_id):
|
|
|
|
_get_upload_info(req, app, upload_id)
|
|
|
|
|
|
class PartController(Controller):
|
|
"""
|
|
Handles the following APIs:
|
|
|
|
- Upload Part
|
|
- Upload Part - Copy
|
|
|
|
Those APIs are logged as PART operations in the S3 server log.
|
|
"""
|
|
@object_operation
|
|
@check_container_existence
|
|
def PUT(self, req):
|
|
"""
|
|
Handles Upload Part and Upload Part Copy.
|
|
"""
|
|
|
|
if 'uploadId' not in req.params:
|
|
raise InvalidArgument('ResourceType', 'partNumber',
|
|
'Unexpected query string parameter')
|
|
|
|
try:
|
|
part_number = int(req.params['partNumber'])
|
|
if part_number < 1 or CONF.max_upload_part_num < part_number:
|
|
raise Exception()
|
|
except Exception:
|
|
err_msg = 'Part number must be an integer between 1 and %d,' \
|
|
' inclusive' % CONF.max_upload_part_num
|
|
raise InvalidArgument('partNumber', req.params['partNumber'],
|
|
err_msg)
|
|
|
|
upload_id = req.params['uploadId']
|
|
_check_upload_info(req, self.app, upload_id)
|
|
|
|
req.container_name += MULTIUPLOAD_SUFFIX
|
|
req.object_name = '%s/%s/%d' % (req.object_name, upload_id,
|
|
part_number)
|
|
|
|
req_timestamp = S3Timestamp.now()
|
|
req.headers['X-Timestamp'] = req_timestamp.internal
|
|
source_resp = req.check_copy_source(self.app)
|
|
if 'X-Amz-Copy-Source' in req.headers and \
|
|
'X-Amz-Copy-Source-Range' in req.headers:
|
|
rng = req.headers['X-Amz-Copy-Source-Range']
|
|
|
|
header_valid = True
|
|
try:
|
|
rng_obj = Range(rng)
|
|
if len(rng_obj.ranges) != 1:
|
|
header_valid = False
|
|
except ValueError:
|
|
header_valid = False
|
|
if not header_valid:
|
|
err_msg = ('The x-amz-copy-source-range value must be of the '
|
|
'form bytes=first-last where first and last are '
|
|
'the zero-based offsets of the first and last '
|
|
'bytes to copy')
|
|
raise InvalidArgument('x-amz-source-range', rng, err_msg)
|
|
|
|
source_size = int(source_resp.headers['Content-Length'])
|
|
if not rng_obj.ranges_for_length(source_size):
|
|
err_msg = ('Range specified is not valid for source object '
|
|
'of size: %s' % source_size)
|
|
raise InvalidArgument('x-amz-source-range', rng, err_msg)
|
|
|
|
req.headers['Range'] = rng
|
|
del req.headers['X-Amz-Copy-Source-Range']
|
|
resp = req.get_response(self.app)
|
|
|
|
if 'X-Amz-Copy-Source' in req.headers:
|
|
resp.append_copy_resp_body(req.controller_name,
|
|
req_timestamp.s3xmlformat)
|
|
|
|
resp.status = 200
|
|
return resp
|
|
|
|
|
|
class UploadsController(Controller):
|
|
"""
|
|
Handles the following APIs:
|
|
|
|
- List Multipart Uploads
|
|
- Initiate Multipart Upload
|
|
|
|
Those APIs are logged as UPLOADS operations in the S3 server log.
|
|
"""
|
|
@bucket_operation(err_resp=InvalidRequest,
|
|
err_msg="Key is not expected for the GET method "
|
|
"?uploads subresource")
|
|
@check_container_existence
|
|
def GET(self, req):
|
|
"""
|
|
Handles List Multipart Uploads
|
|
"""
|
|
|
|
def separate_uploads(uploads, prefix, delimiter):
|
|
"""
|
|
separate_uploads will separate uploads into non_delimited_uploads
|
|
(a subset of uploads) and common_prefixes according to the
|
|
specified delimiter. non_delimited_uploads is a list of uploads
|
|
which exclude the delimiter. common_prefixes is a set of prefixes
|
|
prior to the specified delimiter. Note that the prefix in the
|
|
common_prefixes includes the delimiter itself.
|
|
|
|
i.e. if '/' delimiter specified and then the uploads is consists of
|
|
['foo', 'foo/bar'], this function will return (['foo'], ['foo/']).
|
|
|
|
:param uploads: A list of uploads dictionary
|
|
:param prefix: A string of prefix reserved on the upload path.
|
|
(i.e. the delimiter must be searched behind the
|
|
prefix)
|
|
:param delimiter: A string of delimiter to split the path in each
|
|
upload
|
|
|
|
:return (non_delimited_uploads, common_prefixes)
|
|
"""
|
|
(prefix, delimiter) = \
|
|
utf8encode(prefix, delimiter)
|
|
non_delimited_uploads = []
|
|
common_prefixes = set()
|
|
for upload in uploads:
|
|
key = upload['key']
|
|
end = key.find(delimiter, len(prefix))
|
|
if end >= 0:
|
|
common_prefix = key[:end + len(delimiter)]
|
|
common_prefixes.add(common_prefix)
|
|
else:
|
|
non_delimited_uploads.append(upload)
|
|
return non_delimited_uploads, sorted(common_prefixes)
|
|
|
|
encoding_type = req.params.get('encoding-type')
|
|
if encoding_type is not None and encoding_type != 'url':
|
|
err_msg = 'Invalid Encoding Method specified in Request'
|
|
raise InvalidArgument('encoding-type', encoding_type, err_msg)
|
|
|
|
keymarker = req.params.get('key-marker', '')
|
|
uploadid = req.params.get('upload-id-marker', '')
|
|
maxuploads = req.get_validated_param(
|
|
'max-uploads', DEFAULT_MAX_UPLOADS, DEFAULT_MAX_UPLOADS)
|
|
|
|
query = {
|
|
'format': 'json',
|
|
'limit': maxuploads + 1,
|
|
}
|
|
|
|
if uploadid and keymarker:
|
|
query.update({'marker': '%s/%s' % (keymarker, uploadid)})
|
|
elif keymarker:
|
|
query.update({'marker': '%s/~' % (keymarker)})
|
|
if 'prefix' in req.params:
|
|
query.update({'prefix': req.params['prefix']})
|
|
|
|
container = req.container_name + MULTIUPLOAD_SUFFIX
|
|
try:
|
|
resp = req.get_response(self.app, container=container, query=query)
|
|
objects = json.loads(resp.body)
|
|
except NoSuchBucket:
|
|
# Assume NoSuchBucket as no uploads
|
|
objects = []
|
|
|
|
def object_to_upload(object_info):
|
|
obj, upid = object_info['name'].rsplit('/', 1)
|
|
obj_dict = {'key': obj,
|
|
'upload_id': upid,
|
|
'last_modified': object_info['last_modified']}
|
|
return obj_dict
|
|
|
|
# uploads is a list consists of dict, {key, upload_id, last_modified}
|
|
# Note that pattern matcher willd drop whole segments objects like as
|
|
# object_name/upload_id/1.
|
|
pattern = re.compile('/[0-9]+$')
|
|
uploads = [object_to_upload(obj) for obj in objects if
|
|
pattern.search(obj.get('name', '')) is None]
|
|
|
|
prefixes = []
|
|
if 'delimiter' in req.params:
|
|
prefix = req.params.get('prefix', '')
|
|
delimiter = req.params['delimiter']
|
|
uploads, prefixes = \
|
|
separate_uploads(uploads, prefix, delimiter)
|
|
|
|
if len(uploads) > maxuploads:
|
|
uploads = uploads[:maxuploads]
|
|
truncated = True
|
|
else:
|
|
truncated = False
|
|
|
|
nextkeymarker = ''
|
|
nextuploadmarker = ''
|
|
if len(uploads) > 1:
|
|
nextuploadmarker = uploads[-1]['upload_id']
|
|
nextkeymarker = uploads[-1]['key']
|
|
|
|
result_elem = Element('ListMultipartUploadsResult')
|
|
SubElement(result_elem, 'Bucket').text = req.container_name
|
|
SubElement(result_elem, 'KeyMarker').text = keymarker
|
|
SubElement(result_elem, 'UploadIdMarker').text = uploadid
|
|
SubElement(result_elem, 'NextKeyMarker').text = nextkeymarker
|
|
SubElement(result_elem, 'NextUploadIdMarker').text = nextuploadmarker
|
|
if 'delimiter' in req.params:
|
|
SubElement(result_elem, 'Delimiter').text = \
|
|
req.params['delimiter']
|
|
if 'prefix' in req.params:
|
|
SubElement(result_elem, 'Prefix').text = req.params['prefix']
|
|
SubElement(result_elem, 'MaxUploads').text = str(maxuploads)
|
|
if encoding_type is not None:
|
|
SubElement(result_elem, 'EncodingType').text = encoding_type
|
|
SubElement(result_elem, 'IsTruncated').text = \
|
|
'true' if truncated else 'false'
|
|
|
|
# TODO: don't show uploads which are initiated before this bucket is
|
|
# created.
|
|
for u in uploads:
|
|
upload_elem = SubElement(result_elem, 'Upload')
|
|
SubElement(upload_elem, 'Key').text = u['key']
|
|
SubElement(upload_elem, 'UploadId').text = u['upload_id']
|
|
initiator_elem = SubElement(upload_elem, 'Initiator')
|
|
SubElement(initiator_elem, 'ID').text = req.user_id
|
|
SubElement(initiator_elem, 'DisplayName').text = req.user_id
|
|
owner_elem = SubElement(upload_elem, 'Owner')
|
|
SubElement(owner_elem, 'ID').text = req.user_id
|
|
SubElement(owner_elem, 'DisplayName').text = req.user_id
|
|
SubElement(upload_elem, 'StorageClass').text = 'STANDARD'
|
|
SubElement(upload_elem, 'Initiated').text = \
|
|
u['last_modified'][:-3] + 'Z'
|
|
|
|
for p in prefixes:
|
|
elem = SubElement(result_elem, 'CommonPrefixes')
|
|
SubElement(elem, 'Prefix').text = p
|
|
|
|
body = tostring(result_elem, encoding_type=encoding_type)
|
|
|
|
return HTTPOk(body=body, content_type='application/xml')
|
|
|
|
@object_operation
|
|
@check_container_existence
|
|
def POST(self, req):
|
|
"""
|
|
Handles Initiate Multipart Upload.
|
|
"""
|
|
|
|
# Create a unique S3 upload id from UUID to avoid duplicates.
|
|
upload_id = unique_id()
|
|
|
|
container = req.container_name + MULTIUPLOAD_SUFFIX
|
|
try:
|
|
req.get_response(self.app, 'PUT', container, '')
|
|
except BucketAlreadyExists:
|
|
pass
|
|
|
|
obj = '%s/%s' % (req.object_name, upload_id)
|
|
|
|
req.get_response(self.app, 'PUT', container, obj, body='')
|
|
|
|
result_elem = Element('InitiateMultipartUploadResult')
|
|
SubElement(result_elem, 'Bucket').text = req.container_name
|
|
SubElement(result_elem, 'Key').text = req.object_name
|
|
SubElement(result_elem, 'UploadId').text = upload_id
|
|
|
|
body = tostring(result_elem)
|
|
|
|
return HTTPOk(body=body, content_type='application/xml')
|
|
|
|
|
|
class UploadController(Controller):
|
|
"""
|
|
Handles the following APIs:
|
|
|
|
- List Parts
|
|
- Abort Multipart Upload
|
|
- Complete Multipart Upload
|
|
|
|
Those APIs are logged as UPLOAD operations in the S3 server log.
|
|
"""
|
|
@object_operation
|
|
@check_container_existence
|
|
def GET(self, req):
|
|
"""
|
|
Handles List Parts.
|
|
"""
|
|
def filter_part_num_marker(o):
|
|
try:
|
|
num = int(os.path.basename(o['name']))
|
|
return num > part_num_marker
|
|
except ValueError:
|
|
return False
|
|
|
|
encoding_type = req.params.get('encoding-type')
|
|
if encoding_type is not None and encoding_type != 'url':
|
|
err_msg = 'Invalid Encoding Method specified in Request'
|
|
raise InvalidArgument('encoding-type', encoding_type, err_msg)
|
|
|
|
upload_id = req.params['uploadId']
|
|
_check_upload_info(req, self.app, upload_id)
|
|
|
|
maxparts = req.get_validated_param(
|
|
'max-parts', DEFAULT_MAX_PARTS_LISTING, CONF.max_parts_listing)
|
|
part_num_marker = req.get_validated_param(
|
|
'part-number-marker', 0)
|
|
|
|
query = {
|
|
'format': 'json',
|
|
'limit': maxparts + 1,
|
|
'prefix': '%s/%s/' % (req.object_name, upload_id),
|
|
'delimiter': '/'
|
|
}
|
|
|
|
container = req.container_name + MULTIUPLOAD_SUFFIX
|
|
resp = req.get_response(self.app, container=container, obj='',
|
|
query=query)
|
|
objects = json.loads(resp.body)
|
|
|
|
last_part = 0
|
|
|
|
# If the caller requested a list starting at a specific part number,
|
|
# construct a sub-set of the object list.
|
|
objList = filter(filter_part_num_marker, objects)
|
|
|
|
# pylint: disable-msg=E1103
|
|
objList.sort(key=lambda o: int(o['name'].split('/')[-1]))
|
|
|
|
if len(objList) > maxparts:
|
|
objList = objList[:maxparts]
|
|
truncated = True
|
|
else:
|
|
truncated = False
|
|
# TODO: We have to retrieve object list again when truncated is True
|
|
# and some objects filtered by invalid name because there could be no
|
|
# enough objects for limit defined by maxparts.
|
|
|
|
if objList:
|
|
o = objList[-1]
|
|
last_part = os.path.basename(o['name'])
|
|
|
|
result_elem = Element('ListPartsResult')
|
|
SubElement(result_elem, 'Bucket').text = req.container_name
|
|
SubElement(result_elem, 'Key').text = req.object_name
|
|
SubElement(result_elem, 'UploadId').text = upload_id
|
|
|
|
initiator_elem = SubElement(result_elem, 'Initiator')
|
|
SubElement(initiator_elem, 'ID').text = req.user_id
|
|
SubElement(initiator_elem, 'DisplayName').text = req.user_id
|
|
owner_elem = SubElement(result_elem, 'Owner')
|
|
SubElement(owner_elem, 'ID').text = req.user_id
|
|
SubElement(owner_elem, 'DisplayName').text = req.user_id
|
|
|
|
SubElement(result_elem, 'StorageClass').text = 'STANDARD'
|
|
SubElement(result_elem, 'PartNumberMarker').text = str(part_num_marker)
|
|
SubElement(result_elem, 'NextPartNumberMarker').text = str(last_part)
|
|
SubElement(result_elem, 'MaxParts').text = str(maxparts)
|
|
if 'encoding-type' in req.params:
|
|
SubElement(result_elem, 'EncodingType').text = \
|
|
req.params['encoding-type']
|
|
SubElement(result_elem, 'IsTruncated').text = \
|
|
'true' if truncated else 'false'
|
|
|
|
for i in objList:
|
|
part_elem = SubElement(result_elem, 'Part')
|
|
SubElement(part_elem, 'PartNumber').text = i['name'].split('/')[-1]
|
|
SubElement(part_elem, 'LastModified').text = \
|
|
i['last_modified'][:-3] + 'Z'
|
|
SubElement(part_elem, 'ETag').text = '"%s"' % i['hash']
|
|
SubElement(part_elem, 'Size').text = str(i['bytes'])
|
|
|
|
body = tostring(result_elem, encoding_type=encoding_type)
|
|
|
|
return HTTPOk(body=body, content_type='application/xml')
|
|
|
|
@object_operation
|
|
@check_container_existence
|
|
def DELETE(self, req):
|
|
"""
|
|
Handles Abort Multipart Upload.
|
|
"""
|
|
upload_id = req.params['uploadId']
|
|
_check_upload_info(req, self.app, upload_id)
|
|
|
|
# First check to see if this multi-part upload was already
|
|
# completed. Look in the primary container, if the object exists,
|
|
# then it was completed and we return an error here.
|
|
container = req.container_name + MULTIUPLOAD_SUFFIX
|
|
obj = '%s/%s' % (req.object_name, upload_id)
|
|
req.get_response(self.app, container=container, obj=obj)
|
|
|
|
# The completed object was not found so this
|
|
# must be a multipart upload abort.
|
|
# We must delete any uploaded segments for this UploadID and then
|
|
# delete the object in the main container as well
|
|
query = {
|
|
'format': 'json',
|
|
'prefix': '%s/%s/' % (req.object_name, upload_id),
|
|
'delimiter': '/',
|
|
}
|
|
|
|
resp = req.get_response(self.app, 'GET', container, '', query=query)
|
|
|
|
# Iterate over the segment objects and delete them individually
|
|
objects = json.loads(resp.body)
|
|
for o in objects:
|
|
container = req.container_name + MULTIUPLOAD_SUFFIX
|
|
req.get_response(self.app, container=container, obj=o['name'])
|
|
|
|
return HTTPNoContent()
|
|
|
|
@object_operation
|
|
@check_container_existence
|
|
def POST(self, req):
|
|
"""
|
|
Handles Complete Multipart Upload.
|
|
"""
|
|
upload_id = req.params['uploadId']
|
|
resp = _get_upload_info(req, self.app, upload_id)
|
|
headers = {}
|
|
for key, val in resp.headers.iteritems():
|
|
_key = key.lower()
|
|
if _key.startswith('x-amz-meta-'):
|
|
headers['x-object-meta-' + _key[11:]] = val
|
|
elif _key == 'content-type':
|
|
headers['Content-Type'] = val
|
|
|
|
# Query for the objects in the segments area to make sure it completed
|
|
query = {
|
|
'format': 'json',
|
|
'prefix': '%s/%s/' % (req.object_name, upload_id),
|
|
'delimiter': '/'
|
|
}
|
|
|
|
container = req.container_name + MULTIUPLOAD_SUFFIX
|
|
resp = req.get_response(self.app, 'GET', container, '', query=query)
|
|
objinfo = json.loads(resp.body)
|
|
objtable = dict((o['name'],
|
|
{'path': '/'.join(['', container, o['name']]),
|
|
'etag': o['hash'],
|
|
'size_bytes': o['bytes']}) for o in objinfo)
|
|
|
|
manifest = []
|
|
previous_number = 0
|
|
try:
|
|
xml = req.xml(MAX_COMPLETE_UPLOAD_BODY_SIZE)
|
|
complete_elem = fromstring(xml, 'CompleteMultipartUpload')
|
|
for part_elem in complete_elem.iterchildren('Part'):
|
|
part_number = int(part_elem.find('./PartNumber').text)
|
|
|
|
if part_number <= previous_number:
|
|
raise InvalidPartOrder(upload_id=upload_id)
|
|
previous_number = part_number
|
|
|
|
etag = part_elem.find('./ETag').text
|
|
if len(etag) >= 2 and etag[0] == '"' and etag[-1] == '"':
|
|
# strip double quotes
|
|
etag = etag[1:-1]
|
|
|
|
info = objtable.get("%s/%s/%s" % (req.object_name, upload_id,
|
|
part_number))
|
|
if info is None or info['etag'] != etag:
|
|
raise InvalidPart(upload_id=upload_id,
|
|
part_number=part_number)
|
|
|
|
manifest.append(info)
|
|
except (XMLSyntaxError, DocumentInvalid):
|
|
raise MalformedXML()
|
|
except ErrorResponse:
|
|
raise
|
|
except Exception as e:
|
|
exc_type, exc_value, exc_traceback = sys.exc_info()
|
|
LOGGER.error(e)
|
|
raise exc_type, exc_value, exc_traceback
|
|
|
|
# Following swift commit 7f636a5, zero-byte segments aren't allowed,
|
|
# even as the final segment
|
|
if int(info['size_bytes']) == 0:
|
|
manifest.pop()
|
|
|
|
# Ordinarily, we just let SLO check segment sizes. However, we
|
|
# just popped off a zero-byte segment; if there was a second
|
|
# zero-byte segment and it was at the end, it would succeed on
|
|
# Swift < 2.6.0 and fail on newer Swift. It seems reasonable that
|
|
# it should always fail.
|
|
if manifest and int(manifest[-1]['size_bytes']) == 0:
|
|
raise EntityTooSmall()
|
|
|
|
try:
|
|
# TODO: add support for versioning
|
|
if manifest:
|
|
resp = req.get_response(self.app, 'PUT',
|
|
body=json.dumps(manifest),
|
|
query={'multipart-manifest': 'put'},
|
|
headers=headers)
|
|
else:
|
|
# the upload must have consisted of a single zero-length part
|
|
# just write it directly
|
|
resp = req.get_response(self.app, 'PUT', body='',
|
|
headers=headers)
|
|
except BadSwiftRequest as e:
|
|
msg = str(e)
|
|
msg_pre_260 = 'Each segment, except the last, must be at least '
|
|
# see https://github.com/openstack/swift/commit/c0866ce
|
|
msg_260 = ('too small; each segment, except the last, must be '
|
|
'at least ')
|
|
# see https://github.com/openstack/swift/commit/7f636a5
|
|
msg_post_260 = 'too small; each segment must be at least 1 byte'
|
|
if msg.startswith(msg_pre_260) or \
|
|
msg_260 in msg or msg_post_260 in msg:
|
|
# FIXME: AWS S3 allows a smaller object than 5 MB if there is
|
|
# only one part. Use a COPY request to copy the part object
|
|
# from the segments container instead.
|
|
raise EntityTooSmall(msg)
|
|
else:
|
|
raise
|
|
|
|
if int(info['size_bytes']) == 0:
|
|
# clean up the zero-byte segment
|
|
empty_seg_cont, empty_seg_name = info['path'].split('/', 2)[1:]
|
|
req.get_response(self.app, 'DELETE',
|
|
container=empty_seg_cont, obj=empty_seg_name)
|
|
|
|
# clean up the multipart-upload record
|
|
obj = '%s/%s' % (req.object_name, upload_id)
|
|
req.get_response(self.app, 'DELETE', container, obj)
|
|
|
|
result_elem = Element('CompleteMultipartUploadResult')
|
|
|
|
# NOTE: boto with sig v4 appends port to HTTP_HOST value at the
|
|
# request header when the port is non default value and it makes
|
|
# req.host_url like as http://localhost:8080:8080/path
|
|
# that obviously invalid. Probably it should be resolved at
|
|
# swift.common.swob though, tentatively we are parsing and
|
|
# reconstructing the correct host_url info here.
|
|
# in detail, https://github.com/boto/boto/pull/3513
|
|
parsed_url = urlparse(req.host_url)
|
|
host_url = '%s://%s' % (parsed_url.scheme, parsed_url.hostname)
|
|
if parsed_url.port:
|
|
host_url += ':%s' % parsed_url.port
|
|
|
|
SubElement(result_elem, 'Location').text = host_url + req.path
|
|
SubElement(result_elem, 'Bucket').text = req.container_name
|
|
SubElement(result_elem, 'Key').text = req.object_name
|
|
SubElement(result_elem, 'ETag').text = resp.etag
|
|
|
|
resp.body = tostring(result_elem)
|
|
resp.status = 200
|
|
resp.content_type = "application/xml"
|
|
|
|
return resp
|