s3api: Fix ETag when copying a MU part from another MU
Previously, we'd preserve the sysmeta that we wrote down with the original multipart-upload to track its S3-style etag on the new part, causing it to have an ETag like `<MD5>-<N>`. Later, when the client tried to complete the new multipart-upload, it would send that etag back to the server, which would reject the request because the ETag didn't look like a normal MD5. Now, have s3api include blank values in the copy request to overwrite the source sysmeta, and treat a blank etag override the same as a missing one. Change-Id: Id33a7ab9d0b8f33fede73eae540d6137708e1218 Closes-Bug: #1829959
This commit is contained in:
parent
b4c8cf192a
commit
e22960fd71
@ -175,6 +175,15 @@ class PartController(Controller):
|
|||||||
|
|
||||||
req.headers['Range'] = rng
|
req.headers['Range'] = rng
|
||||||
del req.headers['X-Amz-Copy-Source-Range']
|
del req.headers['X-Amz-Copy-Source-Range']
|
||||||
|
if 'X-Amz-Copy-Source' in req.headers:
|
||||||
|
# Clear some problematic headers that might be on the source
|
||||||
|
req.headers.update({
|
||||||
|
sysmeta_header('object', 'etag'): '',
|
||||||
|
'X-Object-Sysmeta-Swift3-Etag': '', # for legacy data
|
||||||
|
'X-Object-Sysmeta-Slo-Etag': '',
|
||||||
|
'X-Object-Sysmeta-Slo-Size': '',
|
||||||
|
'X-Object-Sysmeta-Container-Update-Override-Etag': '',
|
||||||
|
})
|
||||||
resp = req.get_response(self.app)
|
resp = req.get_response(self.app)
|
||||||
|
|
||||||
if 'X-Amz-Copy-Source' in req.headers:
|
if 'X-Amz-Copy-Source' in req.headers:
|
||||||
|
@ -136,7 +136,7 @@ class S3Response(S3ResponseBase, swob.Response):
|
|||||||
# Check whether we stored the AWS-style etag on upload
|
# Check whether we stored the AWS-style etag on upload
|
||||||
override_etag = s3_sysmeta_headers.get(
|
override_etag = s3_sysmeta_headers.get(
|
||||||
sysmeta_header('object', 'etag'))
|
sysmeta_header('object', 'etag'))
|
||||||
if override_etag is not None:
|
if override_etag not in (None, ''):
|
||||||
# Multipart uploads in AWS have ETags like
|
# Multipart uploads in AWS have ETags like
|
||||||
# <MD5(part_etag1 || ... || part_etagN)>-<number of parts>
|
# <MD5(part_etag1 || ... || part_etagN)>-<number of parts>
|
||||||
headers['etag'] = override_etag
|
headers['etag'] = override_etag
|
||||||
|
@ -281,20 +281,6 @@ class TestS3ApiMultiUpload(S3ApiBase):
|
|||||||
self.assertEqual(self.min_segment_size, int(p.find('Size').text))
|
self.assertEqual(self.min_segment_size, int(p.find('Size').text))
|
||||||
etags.append(p.find('ETag').text)
|
etags.append(p.find('ETag').text)
|
||||||
|
|
||||||
# Abort Multipart Uploads
|
|
||||||
# note that uploads[1] has part data while uploads[2] does not
|
|
||||||
for key, upload_id in uploads[1:]:
|
|
||||||
query = 'uploadId=%s' % upload_id
|
|
||||||
status, headers, body = \
|
|
||||||
self.conn.make_request('DELETE', bucket, key, query=query)
|
|
||||||
self.assertEqual(status, 204)
|
|
||||||
self.assertCommonResponseHeaders(headers)
|
|
||||||
self.assertTrue('content-type' in headers)
|
|
||||||
self.assertEqual(headers['content-type'],
|
|
||||||
'text/html; charset=UTF-8')
|
|
||||||
self.assertTrue('content-length' in headers)
|
|
||||||
self.assertEqual(headers['content-length'], '0')
|
|
||||||
|
|
||||||
# Complete Multipart Upload
|
# Complete Multipart Upload
|
||||||
key, upload_id = uploads[0]
|
key, upload_id = uploads[0]
|
||||||
xml = self._gen_comp_xml(etags)
|
xml = self._gen_comp_xml(etags)
|
||||||
@ -329,10 +315,61 @@ class TestS3ApiMultiUpload(S3ApiBase):
|
|||||||
swift_etag = '"%s"' % md5(concatted_etags).hexdigest()
|
swift_etag = '"%s"' % md5(concatted_etags).hexdigest()
|
||||||
# TODO: GET via swift api, check against swift_etag
|
# TODO: GET via swift api, check against swift_etag
|
||||||
|
|
||||||
|
# Upload Part Copy -- MU as source
|
||||||
|
key, upload_id = uploads[1]
|
||||||
|
status, headers, body, resp_etag = \
|
||||||
|
self._upload_part_copy(bucket, keys[0], bucket,
|
||||||
|
key, upload_id, part_num=2)
|
||||||
|
self.assertEqual(status, 200)
|
||||||
|
self.assertCommonResponseHeaders(headers)
|
||||||
|
self.assertIn('content-type', headers)
|
||||||
|
self.assertEqual(headers['content-type'], 'application/xml')
|
||||||
|
self.assertIn('content-length', headers)
|
||||||
|
self.assertEqual(headers['content-length'], str(len(body)))
|
||||||
|
self.assertNotIn('etag', headers)
|
||||||
|
elem = fromstring(body, 'CopyPartResult')
|
||||||
|
|
||||||
|
last_modified = elem.find('LastModified').text
|
||||||
|
self.assertIsNotNone(last_modified)
|
||||||
|
|
||||||
|
exp_content = 'a' * self.min_segment_size
|
||||||
|
etag = md5(exp_content).hexdigest()
|
||||||
|
self.assertEqual(resp_etag, etag)
|
||||||
|
|
||||||
|
# Also check that the etag is correct in part listings
|
||||||
|
query = 'uploadId=%s' % upload_id
|
||||||
|
status, headers, body = \
|
||||||
|
self.conn.make_request('GET', bucket, key, query=query)
|
||||||
|
self.assertEqual(status, 200)
|
||||||
|
self.assertCommonResponseHeaders(headers)
|
||||||
|
self.assertTrue('content-type' in headers)
|
||||||
|
self.assertEqual(headers['content-type'], 'application/xml')
|
||||||
|
self.assertTrue('content-length' in headers)
|
||||||
|
self.assertEqual(headers['content-length'], str(len(body)))
|
||||||
|
elem = fromstring(body, 'ListPartsResult')
|
||||||
|
self.assertEqual(len(elem.findall('Part')), 2)
|
||||||
|
self.assertEqual(elem.findall('Part')[1].find('PartNumber').text, '2')
|
||||||
|
self.assertEqual(elem.findall('Part')[1].find('ETag').text,
|
||||||
|
'"%s"' % etag)
|
||||||
|
|
||||||
|
# Abort Multipart Uploads
|
||||||
|
# note that uploads[1] has part data while uploads[2] does not
|
||||||
|
for key, upload_id in uploads[1:]:
|
||||||
|
query = 'uploadId=%s' % upload_id
|
||||||
|
status, headers, body = \
|
||||||
|
self.conn.make_request('DELETE', bucket, key, query=query)
|
||||||
|
self.assertEqual(status, 204)
|
||||||
|
self.assertCommonResponseHeaders(headers)
|
||||||
|
self.assertTrue('content-type' in headers)
|
||||||
|
self.assertEqual(headers['content-type'],
|
||||||
|
'text/html; charset=UTF-8')
|
||||||
|
self.assertTrue('content-length' in headers)
|
||||||
|
self.assertEqual(headers['content-length'], '0')
|
||||||
|
|
||||||
# Check object
|
# Check object
|
||||||
def check_obj(req_headers, exp_status):
|
def check_obj(req_headers, exp_status):
|
||||||
status, headers, body = \
|
status, headers, body = \
|
||||||
self.conn.make_request('HEAD', bucket, key, req_headers)
|
self.conn.make_request('HEAD', bucket, keys[0], req_headers)
|
||||||
self.assertEqual(status, exp_status)
|
self.assertEqual(status, exp_status)
|
||||||
self.assertCommonResponseHeaders(headers)
|
self.assertCommonResponseHeaders(headers)
|
||||||
self.assertIn('content-length', headers)
|
self.assertIn('content-length', headers)
|
||||||
@ -364,20 +401,20 @@ class TestS3ApiMultiUpload(S3ApiBase):
|
|||||||
self.assertEqual(status, 200)
|
self.assertEqual(status, 200)
|
||||||
|
|
||||||
elem = fromstring(body, 'ListBucketResult')
|
elem = fromstring(body, 'ListBucketResult')
|
||||||
resp_objects = elem.findall('./Contents')
|
resp_objects = list(elem.findall('./Contents'))
|
||||||
self.assertEqual(len(list(resp_objects)), 1)
|
self.assertEqual(len(resp_objects), 1)
|
||||||
for o in resp_objects:
|
o = resp_objects[0]
|
||||||
self.assertEqual(o.find('Key').text, key)
|
self.assertEqual(o.find('Key').text, keys[0])
|
||||||
self.assertIsNotNone(o.find('LastModified').text)
|
self.assertIsNotNone(o.find('LastModified').text)
|
||||||
self.assertRegexpMatches(
|
self.assertRegexpMatches(
|
||||||
o.find('LastModified').text,
|
o.find('LastModified').text,
|
||||||
r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$')
|
r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$')
|
||||||
self.assertEqual(o.find('ETag').text, exp_etag)
|
self.assertEqual(o.find('ETag').text, exp_etag)
|
||||||
self.assertEqual(o.find('Size').text, str(exp_size))
|
self.assertEqual(o.find('Size').text, str(exp_size))
|
||||||
self.assertIsNotNone(o.find('StorageClass').text is not None)
|
self.assertIsNotNone(o.find('StorageClass').text)
|
||||||
self.assertEqual(o.find('Owner/ID').text, self.conn.user_id)
|
self.assertEqual(o.find('Owner/ID').text, self.conn.user_id)
|
||||||
self.assertEqual(o.find('Owner/DisplayName').text,
|
self.assertEqual(o.find('Owner/DisplayName').text,
|
||||||
self.conn.user_id)
|
self.conn.user_id)
|
||||||
|
|
||||||
def test_initiate_multi_upload_error(self):
|
def test_initiate_multi_upload_error(self):
|
||||||
bucket = 'bucket'
|
bucket = 'bucket'
|
||||||
|
@ -1651,6 +1651,16 @@ class TestS3ApiMultiUpload(S3ApiTestCase):
|
|||||||
_, _, headers = self.swift.calls_with_headers[-1]
|
_, _, headers = self.swift.calls_with_headers[-1]
|
||||||
self.assertEqual(headers['X-Copy-From'], '/src_bucket/src_obj')
|
self.assertEqual(headers['X-Copy-From'], '/src_bucket/src_obj')
|
||||||
self.assertEqual(headers['Content-Length'], '0')
|
self.assertEqual(headers['Content-Length'], '0')
|
||||||
|
# Some headers *need* to get cleared in case we're copying from
|
||||||
|
# another multipart upload
|
||||||
|
for header in (
|
||||||
|
'X-Object-Sysmeta-S3api-Etag',
|
||||||
|
'X-Object-Sysmeta-Slo-Etag',
|
||||||
|
'X-Object-Sysmeta-Slo-Size',
|
||||||
|
'X-Object-Sysmeta-Container-Update-Override-Etag',
|
||||||
|
'X-Object-Sysmeta-Swift3-Etag',
|
||||||
|
):
|
||||||
|
self.assertEqual(headers[header], '')
|
||||||
|
|
||||||
@s3acl(s3acl_only=True)
|
@s3acl(s3acl_only=True)
|
||||||
def test_upload_part_copy_acl_with_owner_permission(self):
|
def test_upload_part_copy_acl_with_owner_permission(self):
|
||||||
|
Loading…
Reference in New Issue
Block a user