2010-07-12 17:03:45 -05:00
|
|
|
#!/usr/bin/python -u
|
2013-09-20 01:00:54 +08:00
|
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
|
2010-07-12 17:03:45 -05:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
|
|
# implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
from datetime import datetime
|
2015-03-25 14:59:43 -07:00
|
|
|
import email.parser
|
2013-11-18 13:17:48 -08:00
|
|
|
import hashlib
|
2014-03-06 13:11:03 -08:00
|
|
|
import hmac
|
Add the ability to specify ranges for SLO segments
Users can now include an optional 'range' field in segment descriptions
to specify which bytes from the underlying object should be used for the
segment data. Only one range may be specified per segment. Note that the
'etag' and 'size_bytes' fields still describe the backing object as a
whole. So, if a user uploads a manifest like:
[{"path": "/con/obj_seg_1", "etag": null, "size_bytes": 1048576,
"range": "0-1023"},
{"path": "/con/obj_seg_2", "etag": null, "size_bytes": 1048576,
"range": "512-4095"},
{"path": "/con/obj_seg_1", "etag": null, "size_bytes": 1048576,
"range": "-2048"}]
then the segment will consist of the first 1024 bytes of /con/obj_seg_1,
followed by bytes 513 through 4096 (inclusive) of /con/obj_seg_2, and
finally bytes 1046528 through 1048576 (i.e., the last 2048 bytes) of
/con/obj_seg_1.
ETag generation for SLOs had been updated to prevent collisions when
using different ranges for the same set of objects.
Additionally, there are two performance enhancements:
* On download, multiple sequential requests for segments from the same
underlying object will be coalesced into a single ranged request,
provided it still does not meet Swift's "egregious range requests"
critieria.
* On upload, multiple sequential segments referencing the same object
will be validated against the response from a single HEAD request.
Change-Id: Ia21d51c2cef4e2ee5162161dd2c1d3069009b52c
DocImpact
2015-08-11 00:42:30 -05:00
|
|
|
import itertools
|
2013-11-18 13:17:48 -08:00
|
|
|
import json
|
2010-07-12 17:03:45 -05:00
|
|
|
import locale
|
|
|
|
import random
|
2015-05-27 17:27:47 +02:00
|
|
|
import six
|
2015-10-08 15:03:52 +02:00
|
|
|
from six.moves import urllib
|
2010-07-12 17:03:45 -05:00
|
|
|
import time
|
2015-12-16 15:28:25 +00:00
|
|
|
import unittest2
|
2014-03-06 13:11:03 -08:00
|
|
|
import uuid
|
2014-04-30 15:00:49 +03:00
|
|
|
from copy import deepcopy
|
2014-04-07 13:01:44 -04:00
|
|
|
import eventlet
|
2015-12-16 15:28:25 +00:00
|
|
|
from unittest2 import SkipTest
|
2014-11-25 14:42:42 +00:00
|
|
|
from swift.common.http import is_success, is_client_error
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2014-04-10 15:37:15 -04:00
|
|
|
from test.functional import normalized_urls, load_constraint, cluster_info
|
2015-06-02 19:51:39 +00:00
|
|
|
from test.functional import check_response, retry, requires_acls
|
2014-03-31 23:22:49 -04:00
|
|
|
import test.functional as tf
|
2012-09-05 20:49:50 -07:00
|
|
|
from test.functional.swift_test_client import Account, Connection, File, \
|
|
|
|
ResponseError
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2011-02-22 22:25:38 -06:00
|
|
|
|
2015-08-07 18:14:13 -05:00
|
|
|
def setUpModule():
|
|
|
|
tf.setup_package()
|
|
|
|
|
|
|
|
|
|
|
|
def tearDownModule():
|
|
|
|
tf.teardown_package()
|
|
|
|
|
|
|
|
|
Privileged acct ACL header, new ACL syntax, TempAuth impl.
* Introduce a new privileged account header: X-Account-Access-Control
* Introduce JSON-based version 2 ACL syntax -- see below for discussion
* Implement account ACL authorization in TempAuth
X-Account-Access-Control Header
-------------------------------
Accounts now have a new privileged header to represent ACLs or any other
form of account-level access control. The value of the header is an opaque
string to be interpreted by the auth system, but it must be a JSON-encoded
dictionary. A reference implementation is given in TempAuth, with the
knowledge that historically other auth systems often use TempAuth as a
starting point.
The reference implementation describes three levels of account access:
"admin", "read-write", and "read-only". Adding new access control
features in a future patch (e.g. "write-only" account access) will
automatically be forward- and backward-compatible, due to the JSON
dictionary header format.
The privileged X-Account-Access-Control header may only be read or written
by a user with "swift_owner" status, traditionally the account owner but
now also any user on the "admin" ACL.
Access Levels:
Read-only access is intended to indicate to the auth system that this
list of identities can read everything (except privileged headers) in
the account. Specifically, a user with read-only account access can get
a list of containers in the account, list the contents of any container,
retrieve any object, and see the (non-privileged) headers of the
account, any container, or any object.
Read-write access is intended to indicate to the auth system that this
list of identities can read or write (or create) any container. A user
with read-write account access can create new containers, set any
unprivileged container headers, overwrite objects, delete containers,
etc. A read-write user can NOT set account headers (or perform any
PUT/POST/DELETE requests on the account).
Admin access is intended to indicate to the auth system that this list of
identities has "swift_owner" privileges. A user with admin account access
can do anything the account owner can, including setting account headers
and any privileged headers -- and thus changing the value of
X-Account-Access-Control and thereby granting read-only, read-write, or
admin access to other users.
The auth system is responsible for making decisions based on this header,
if it chooses to support its use. Therefore the above access level
descriptions are necessarily advisory only for other auth systems.
When setting the value of the header, callers are urged to use the new
format_acl() method, described below.
New ACL Format
--------------
The account ACLs introduce a new format for ACLs, rather than reusing the
existing format from X-Container-Read/X-Container-Write. There are several
reasons for this:
* Container ACL format does not support Unicode
* Container ACLs have a different structure than account ACLs
+ account ACLs have no concept of referrers or rlistings
+ accounts have additional "admin" access level
+ account access levels are structured as admin > rw > ro, which seems more
appropriate for how people access accounts, rather than reusing
container ACLs' orthogonal read and write access
In addition, the container ACL syntax is a bit arbitrary and highly custom,
so instead of parsing additional custom syntax, I'd rather propose a next
version and introduce a means for migration. The V2 ACL syntax has the
following benefits:
* JSON is a well-known standard syntax with parsers in all languages
* no artificial value restrictions (you can grant access to a user named
".rlistings" if you want)
* forward and backward compatibility: you may have extraneous keys, but
your attempt to parse the header won't raise an exception
I've introduced hooks in parse_acl and format_acl which currently default
to the old V1 syntax but tolerate the V2 syntax and can easily be flipped
to default to V2. I'm not changing the default or adding code to rewrite
V1 ACLs to V2, because this patch has suffered a lot of scope creep already,
but this seems like a sensible milestone in the migration.
TempAuth Account ACL Implementation
-----------------------------------
As stated above, core Swift is responsible for privileging the
X-Account-Access-Control header (making it only accessible to swift_owners),
for translating it to -sysmeta-* headers to trigger persistence by the
account server, and for including the header in the responses to requests
by privileged users. Core Swift puts no expectation on the *content* of
this header. Auth systems (including TempAuth) are responsible for
defining the content of the header and taking action based on it.
In addition to the changes described above, this patch defines a format
to be used by TempAuth for these headers in the common.middleware.acl
module, in the methods format_v2_acl() and parse_v2_acl(). This patch
also teaches TempAuth to take action based on the header contents. TempAuth
now sets swift_owner=True if the user is on the Admin ACL, authorizes
GET/HEAD/OPTIONS requests if the user is on any ACL, authorizes
PUT/POST/DELETE requests if the user is on the admin or read-write ACL, etc.
Note that the action of setting swift_owner=True triggers core Swift to
add or strip the privileged headers from the responses. Core Swift (not
the auth system) is responsible for that.
DocImpact: Documentation for the new ACL usage and format appears in
summary form in doc/source/overview_auth.rst, and in more detail in
swift/common/middleware/tempauth.py in the TempAuth class docstring.
I leave it to the Swift doc team to determine whether more is needed.
Change-Id: I836a99eaaa6bb0e92dc03e1ca46a474522e6e826
2013-11-13 20:55:14 +00:00
|
|
|
class Utils(object):
|
2010-07-12 17:03:45 -05:00
|
|
|
@classmethod
|
|
|
|
def create_ascii_name(cls, length=None):
|
|
|
|
return uuid.uuid4().hex
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def create_utf8_name(cls, length=None):
|
2012-01-04 14:43:16 +08:00
|
|
|
if length is None:
|
2010-07-12 17:03:45 -05:00
|
|
|
length = 15
|
|
|
|
else:
|
|
|
|
length = int(length)
|
|
|
|
|
|
|
|
utf8_chars = u'\uF10F\uD20D\uB30B\u9409\u8508\u5605\u3703\u1801'\
|
|
|
|
u'\u0900\uF110\uD20E\uB30C\u940A\u8509\u5606\u3704'\
|
|
|
|
u'\u1802\u0901\uF111\uD20F\uB30D\u940B\u850A\u5607'\
|
|
|
|
u'\u3705\u1803\u0902\uF112\uD210\uB30E\u940C\u850B'\
|
|
|
|
u'\u5608\u3706\u1804\u0903\u03A9\u2603'
|
2012-09-03 23:30:52 +08:00
|
|
|
return ''.join([random.choice(utf8_chars)
|
2015-05-25 18:28:02 +02:00
|
|
|
for x in range(length)]).encode('utf-8')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
create_name = create_ascii_name
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2015-12-16 15:28:25 +00:00
|
|
|
class Base(unittest2.TestCase):
|
2010-07-12 17:03:45 -05:00
|
|
|
def setUp(self):
|
|
|
|
cls = type(self)
|
|
|
|
if not cls.set_up:
|
|
|
|
cls.env.setUp()
|
|
|
|
cls.set_up = True
|
|
|
|
|
|
|
|
def assert_body(self, body):
|
|
|
|
response_body = self.env.conn.response.read()
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(response_body == body,
|
|
|
|
'Body returned: %s' % (response_body))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2010-09-02 21:50:16 -07:00
|
|
|
def assert_status(self, status_or_statuses):
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(
|
|
|
|
self.env.conn.response.status == status_or_statuses or
|
|
|
|
(hasattr(status_or_statuses, '__iter__') and
|
|
|
|
self.env.conn.response.status in status_or_statuses),
|
|
|
|
'Status returned: %d Expected: %s' %
|
|
|
|
(self.env.conn.response.status, status_or_statuses))
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2016-03-16 17:41:30 +00:00
|
|
|
def assert_header(self, header_name, expected_value):
|
|
|
|
try:
|
|
|
|
actual_value = self.env.conn.response.getheader(header_name)
|
|
|
|
except KeyError:
|
|
|
|
self.fail(
|
|
|
|
'Expected header name %r not found in response.' % header_name)
|
|
|
|
self.assertEqual(expected_value, actual_value)
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
class Base2(object):
|
|
|
|
def setUp(self):
|
|
|
|
Utils.create_name = Utils.create_utf8_name
|
|
|
|
super(Base2, self).setUp()
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
Utils.create_name = Utils.create_ascii_name
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
Privileged acct ACL header, new ACL syntax, TempAuth impl.
* Introduce a new privileged account header: X-Account-Access-Control
* Introduce JSON-based version 2 ACL syntax -- see below for discussion
* Implement account ACL authorization in TempAuth
X-Account-Access-Control Header
-------------------------------
Accounts now have a new privileged header to represent ACLs or any other
form of account-level access control. The value of the header is an opaque
string to be interpreted by the auth system, but it must be a JSON-encoded
dictionary. A reference implementation is given in TempAuth, with the
knowledge that historically other auth systems often use TempAuth as a
starting point.
The reference implementation describes three levels of account access:
"admin", "read-write", and "read-only". Adding new access control
features in a future patch (e.g. "write-only" account access) will
automatically be forward- and backward-compatible, due to the JSON
dictionary header format.
The privileged X-Account-Access-Control header may only be read or written
by a user with "swift_owner" status, traditionally the account owner but
now also any user on the "admin" ACL.
Access Levels:
Read-only access is intended to indicate to the auth system that this
list of identities can read everything (except privileged headers) in
the account. Specifically, a user with read-only account access can get
a list of containers in the account, list the contents of any container,
retrieve any object, and see the (non-privileged) headers of the
account, any container, or any object.
Read-write access is intended to indicate to the auth system that this
list of identities can read or write (or create) any container. A user
with read-write account access can create new containers, set any
unprivileged container headers, overwrite objects, delete containers,
etc. A read-write user can NOT set account headers (or perform any
PUT/POST/DELETE requests on the account).
Admin access is intended to indicate to the auth system that this list of
identities has "swift_owner" privileges. A user with admin account access
can do anything the account owner can, including setting account headers
and any privileged headers -- and thus changing the value of
X-Account-Access-Control and thereby granting read-only, read-write, or
admin access to other users.
The auth system is responsible for making decisions based on this header,
if it chooses to support its use. Therefore the above access level
descriptions are necessarily advisory only for other auth systems.
When setting the value of the header, callers are urged to use the new
format_acl() method, described below.
New ACL Format
--------------
The account ACLs introduce a new format for ACLs, rather than reusing the
existing format from X-Container-Read/X-Container-Write. There are several
reasons for this:
* Container ACL format does not support Unicode
* Container ACLs have a different structure than account ACLs
+ account ACLs have no concept of referrers or rlistings
+ accounts have additional "admin" access level
+ account access levels are structured as admin > rw > ro, which seems more
appropriate for how people access accounts, rather than reusing
container ACLs' orthogonal read and write access
In addition, the container ACL syntax is a bit arbitrary and highly custom,
so instead of parsing additional custom syntax, I'd rather propose a next
version and introduce a means for migration. The V2 ACL syntax has the
following benefits:
* JSON is a well-known standard syntax with parsers in all languages
* no artificial value restrictions (you can grant access to a user named
".rlistings" if you want)
* forward and backward compatibility: you may have extraneous keys, but
your attempt to parse the header won't raise an exception
I've introduced hooks in parse_acl and format_acl which currently default
to the old V1 syntax but tolerate the V2 syntax and can easily be flipped
to default to V2. I'm not changing the default or adding code to rewrite
V1 ACLs to V2, because this patch has suffered a lot of scope creep already,
but this seems like a sensible milestone in the migration.
TempAuth Account ACL Implementation
-----------------------------------
As stated above, core Swift is responsible for privileging the
X-Account-Access-Control header (making it only accessible to swift_owners),
for translating it to -sysmeta-* headers to trigger persistence by the
account server, and for including the header in the responses to requests
by privileged users. Core Swift puts no expectation on the *content* of
this header. Auth systems (including TempAuth) are responsible for
defining the content of the header and taking action based on it.
In addition to the changes described above, this patch defines a format
to be used by TempAuth for these headers in the common.middleware.acl
module, in the methods format_v2_acl() and parse_v2_acl(). This patch
also teaches TempAuth to take action based on the header contents. TempAuth
now sets swift_owner=True if the user is on the Admin ACL, authorizes
GET/HEAD/OPTIONS requests if the user is on any ACL, authorizes
PUT/POST/DELETE requests if the user is on the admin or read-write ACL, etc.
Note that the action of setting swift_owner=True triggers core Swift to
add or strip the privileged headers from the responses. Core Swift (not
the auth system) is responsible for that.
DocImpact: Documentation for the new ACL usage and format appears in
summary form in doc/source/overview_auth.rst, and in more detail in
swift/common/middleware/tempauth.py in the TempAuth class docstring.
I leave it to the Swift doc team to determine whether more is needed.
Change-Id: I836a99eaaa6bb0e92dc03e1ca46a474522e6e826
2013-11-13 20:55:14 +00:00
|
|
|
class TestAccountEnv(object):
|
2010-07-12 17:03:45 -05:00
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.conn = Connection(tf.config)
|
2010-07-12 17:03:45 -05:00
|
|
|
cls.conn.authenticate()
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.account = Account(cls.conn, tf.config.get('account',
|
|
|
|
tf.config['username']))
|
2010-07-12 17:03:45 -05:00
|
|
|
cls.account.delete_containers()
|
|
|
|
|
|
|
|
cls.containers = []
|
|
|
|
for i in range(10):
|
|
|
|
cont = cls.account.container(Utils.create_name())
|
|
|
|
if not cont.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
cls.containers.append(cont)
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestAccountDev(Base):
|
|
|
|
env = TestAccountEnv
|
|
|
|
set_up = False
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestAccountDevUTF8(Base2, TestAccountDev):
|
|
|
|
set_up = False
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestAccount(Base):
|
|
|
|
env = TestAccountEnv
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
def testNoAuthToken(self):
|
|
|
|
self.assertRaises(ResponseError, self.env.account.info,
|
2012-09-03 23:30:52 +08:00
|
|
|
cfg={'no_auth_token': True})
|
2010-09-02 21:50:16 -07:00
|
|
|
self.assert_status([401, 412])
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
self.assertRaises(ResponseError, self.env.account.containers,
|
2012-09-03 23:30:52 +08:00
|
|
|
cfg={'no_auth_token': True})
|
2010-09-02 21:50:16 -07:00
|
|
|
self.assert_status([401, 412])
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testInvalidUTF8Path(self):
|
|
|
|
invalid_utf8 = Utils.create_utf8_name()[::-1]
|
|
|
|
container = self.env.account.container(invalid_utf8)
|
2015-07-28 10:31:54 +01:00
|
|
|
self.assertFalse(container.create(cfg={'no_path_quote': True}))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
2013-01-12 06:54:17 +00:00
|
|
|
self.assert_body('Invalid UTF8 or contains NULL')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testVersionOnlyPath(self):
|
|
|
|
self.env.account.conn.make_request('PUT',
|
2012-09-03 23:30:52 +08:00
|
|
|
cfg={'version_only_path': True})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
|
|
|
self.assert_body('Bad URL')
|
|
|
|
|
2010-10-29 13:30:34 -07:00
|
|
|
def testInvalidPath(self):
|
|
|
|
was_url = self.env.account.conn.storage_url
|
2013-03-04 23:38:48 +02:00
|
|
|
if (normalized_urls):
|
|
|
|
self.env.account.conn.storage_url = '/'
|
|
|
|
else:
|
|
|
|
self.env.account.conn.storage_url = "/%s" % was_url
|
2010-10-29 13:30:34 -07:00
|
|
|
self.env.account.conn.make_request('GET')
|
|
|
|
try:
|
|
|
|
self.assert_status(404)
|
|
|
|
finally:
|
|
|
|
self.env.account.conn.storage_url = was_url
|
|
|
|
|
2016-05-13 16:43:50 -05:00
|
|
|
def testPUTError(self):
|
|
|
|
if load_constraint('allow_account_management'):
|
|
|
|
raise SkipTest("Allow account management is enabled")
|
2010-07-12 17:03:45 -05:00
|
|
|
self.env.account.conn.make_request('PUT')
|
2010-09-10 13:40:43 -07:00
|
|
|
self.assert_status([403, 405])
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testAccountHead(self):
|
|
|
|
try_count = 0
|
|
|
|
while try_count < 5:
|
|
|
|
try_count += 1
|
|
|
|
|
|
|
|
info = self.env.account.info()
|
|
|
|
for field in ['object_count', 'container_count', 'bytes_used']:
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(info[field] >= 0)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
if info['container_count'] == len(self.env.containers):
|
|
|
|
break
|
|
|
|
|
|
|
|
if try_count < 5:
|
|
|
|
time.sleep(1)
|
|
|
|
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(info['container_count'], len(self.env.containers))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(204)
|
|
|
|
|
|
|
|
def testContainerSerializedInfo(self):
|
|
|
|
container_info = {}
|
|
|
|
for container in self.env.containers:
|
|
|
|
info = {'bytes': 0}
|
|
|
|
info['count'] = random.randint(10, 30)
|
|
|
|
for i in range(info['count']):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = container.file(Utils.create_name())
|
2010-07-12 17:03:45 -05:00
|
|
|
bytes = random.randint(1, 32768)
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.write_random(bytes)
|
2010-07-12 17:03:45 -05:00
|
|
|
info['bytes'] += bytes
|
|
|
|
|
|
|
|
container_info[container.name] = info
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in ['json', 'xml']:
|
|
|
|
for a in self.env.account.containers(
|
2013-08-31 20:25:25 -04:00
|
|
|
parms={'format': format_type}):
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(a['count'] >= 0)
|
|
|
|
self.assertTrue(a['bytes'] >= 0)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
headers = dict(self.env.conn.response.getheaders())
|
2013-08-04 11:15:53 +08:00
|
|
|
if format_type == 'json':
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(headers['content-type'],
|
|
|
|
'application/json; charset=utf-8')
|
2013-08-04 11:15:53 +08:00
|
|
|
elif format_type == 'xml':
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(headers['content-type'],
|
|
|
|
'application/xml; charset=utf-8')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testListingLimit(self):
|
2012-09-05 20:49:50 -07:00
|
|
|
limit = load_constraint('account_listing_limit')
|
2012-09-03 23:30:52 +08:00
|
|
|
for l in (1, 100, limit / 2, limit - 1, limit, limit + 1, limit * 2):
|
|
|
|
p = {'limit': l}
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
if l <= limit:
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(len(self.env.account.containers(parms=p)) <= l)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(200)
|
|
|
|
else:
|
|
|
|
self.assertRaises(ResponseError,
|
2012-09-03 23:30:52 +08:00
|
|
|
self.env.account.containers, parms=p)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
|
|
|
|
|
|
|
def testContainerListing(self):
|
|
|
|
a = sorted([c.name for c in self.env.containers])
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
|
|
|
b = self.env.account.containers(parms={'format': format_type})
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
if isinstance(b[0], dict):
|
|
|
|
b = [x['name'] for x in b]
|
|
|
|
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(a, b)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2015-11-24 14:47:30 +00:00
|
|
|
def testListDelimiter(self):
|
|
|
|
delimiter = '-'
|
|
|
|
containers = ['test', delimiter.join(['test', 'bar']),
|
|
|
|
delimiter.join(['test', 'foo'])]
|
|
|
|
for c in containers:
|
|
|
|
cont = self.env.account.container(c)
|
|
|
|
self.assertTrue(cont.create())
|
|
|
|
|
|
|
|
results = self.env.account.containers(parms={'delimiter': delimiter})
|
|
|
|
expected = ['test', 'test-']
|
|
|
|
results = [r for r in results if r in expected]
|
|
|
|
self.assertEqual(expected, results)
|
|
|
|
|
|
|
|
results = self.env.account.containers(parms={'delimiter': delimiter,
|
|
|
|
'reverse': 'yes'})
|
|
|
|
expected.reverse()
|
|
|
|
results = [r for r in results if r in expected]
|
|
|
|
self.assertEqual(expected, results)
|
|
|
|
|
|
|
|
def testListDelimiterAndPrefix(self):
|
|
|
|
delimiter = 'a'
|
|
|
|
containers = ['bar', 'bazar']
|
|
|
|
for c in containers:
|
|
|
|
cont = self.env.account.container(c)
|
|
|
|
self.assertTrue(cont.create())
|
|
|
|
|
|
|
|
results = self.env.account.containers(parms={'delimiter': delimiter,
|
|
|
|
'prefix': 'ba'})
|
|
|
|
expected = ['bar', 'baza']
|
|
|
|
results = [r for r in results if r in expected]
|
|
|
|
self.assertEqual(expected, results)
|
|
|
|
|
|
|
|
results = self.env.account.containers(parms={'delimiter': delimiter,
|
|
|
|
'prefix': 'ba',
|
|
|
|
'reverse': 'yes'})
|
|
|
|
expected.reverse()
|
|
|
|
results = [r for r in results if r in expected]
|
|
|
|
self.assertEqual(expected, results)
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def testInvalidAuthToken(self):
|
|
|
|
hdrs = {'X-Auth-Token': 'bogus_auth_token'}
|
|
|
|
self.assertRaises(ResponseError, self.env.account.info, hdrs=hdrs)
|
|
|
|
self.assert_status(401)
|
|
|
|
|
|
|
|
def testLastContainerMarker(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
|
|
|
containers = self.env.account.containers({'format': format_type})
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(len(containers), len(self.env.containers))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(200)
|
|
|
|
|
|
|
|
containers = self.env.account.containers(
|
2013-08-04 11:15:53 +08:00
|
|
|
parms={'format': format_type, 'marker': containers[-1]})
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(len(containers), 0)
|
2013-08-04 11:15:53 +08:00
|
|
|
if format_type is None:
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(204)
|
|
|
|
else:
|
|
|
|
self.assert_status(200)
|
|
|
|
|
|
|
|
def testMarkerLimitContainerList(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
2012-09-03 23:30:52 +08:00
|
|
|
for marker in ['0', 'A', 'I', 'R', 'Z', 'a', 'i', 'r', 'z',
|
|
|
|
'abc123', 'mnop', 'xyz']:
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
limit = random.randint(2, 9)
|
|
|
|
containers = self.env.account.containers(
|
2013-08-04 11:15:53 +08:00
|
|
|
parms={'format': format_type,
|
|
|
|
'marker': marker,
|
|
|
|
'limit': limit})
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(len(containers) <= limit)
|
2010-07-12 17:03:45 -05:00
|
|
|
if containers:
|
2012-09-03 23:30:52 +08:00
|
|
|
if isinstance(containers[0], dict):
|
|
|
|
containers = [x['name'] for x in containers]
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(locale.strcoll(containers[0], marker) > 0)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testContainersOrderedByName(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
2010-07-12 17:03:45 -05:00
|
|
|
containers = self.env.account.containers(
|
2013-08-04 11:15:53 +08:00
|
|
|
parms={'format': format_type})
|
2010-07-12 17:03:45 -05:00
|
|
|
if isinstance(containers[0], dict):
|
|
|
|
containers = [x['name'] for x in containers]
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(sorted(containers, cmp=locale.strcoll),
|
|
|
|
containers)
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2014-06-06 11:46:41 -07:00
|
|
|
def testQuotedWWWAuthenticateHeader(self):
|
2014-09-12 10:20:19 +01:00
|
|
|
# check that the www-authenticate header value with the swift realm
|
|
|
|
# is correctly quoted.
|
2014-06-06 11:46:41 -07:00
|
|
|
conn = Connection(tf.config)
|
|
|
|
conn.authenticate()
|
|
|
|
inserted_html = '<b>Hello World'
|
|
|
|
hax = 'AUTH_haxx"\nContent-Length: %d\n\n%s' % (len(inserted_html),
|
|
|
|
inserted_html)
|
2015-10-08 15:03:52 +02:00
|
|
|
quoted_hax = urllib.parse.quote(hax)
|
2014-06-06 11:46:41 -07:00
|
|
|
conn.connection.request('GET', '/v1/' + quoted_hax, None, {})
|
|
|
|
resp = conn.connection.getresponse()
|
2014-09-11 10:23:32 +01:00
|
|
|
resp_headers = dict(resp.getheaders())
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn('www-authenticate', resp_headers)
|
2014-09-11 10:23:32 +01:00
|
|
|
actual = resp_headers['www-authenticate']
|
|
|
|
expected = 'Swift realm="%s"' % quoted_hax
|
2014-09-12 10:20:19 +01:00
|
|
|
# other middleware e.g. auth_token may also set www-authenticate
|
|
|
|
# headers in which case actual values will be a comma separated list.
|
|
|
|
# check that expected value is among the actual values
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(expected, actual)
|
2014-06-06 11:46:41 -07:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
class TestAccountUTF8(Base2, TestAccount):
|
|
|
|
set_up = False
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
Privileged acct ACL header, new ACL syntax, TempAuth impl.
* Introduce a new privileged account header: X-Account-Access-Control
* Introduce JSON-based version 2 ACL syntax -- see below for discussion
* Implement account ACL authorization in TempAuth
X-Account-Access-Control Header
-------------------------------
Accounts now have a new privileged header to represent ACLs or any other
form of account-level access control. The value of the header is an opaque
string to be interpreted by the auth system, but it must be a JSON-encoded
dictionary. A reference implementation is given in TempAuth, with the
knowledge that historically other auth systems often use TempAuth as a
starting point.
The reference implementation describes three levels of account access:
"admin", "read-write", and "read-only". Adding new access control
features in a future patch (e.g. "write-only" account access) will
automatically be forward- and backward-compatible, due to the JSON
dictionary header format.
The privileged X-Account-Access-Control header may only be read or written
by a user with "swift_owner" status, traditionally the account owner but
now also any user on the "admin" ACL.
Access Levels:
Read-only access is intended to indicate to the auth system that this
list of identities can read everything (except privileged headers) in
the account. Specifically, a user with read-only account access can get
a list of containers in the account, list the contents of any container,
retrieve any object, and see the (non-privileged) headers of the
account, any container, or any object.
Read-write access is intended to indicate to the auth system that this
list of identities can read or write (or create) any container. A user
with read-write account access can create new containers, set any
unprivileged container headers, overwrite objects, delete containers,
etc. A read-write user can NOT set account headers (or perform any
PUT/POST/DELETE requests on the account).
Admin access is intended to indicate to the auth system that this list of
identities has "swift_owner" privileges. A user with admin account access
can do anything the account owner can, including setting account headers
and any privileged headers -- and thus changing the value of
X-Account-Access-Control and thereby granting read-only, read-write, or
admin access to other users.
The auth system is responsible for making decisions based on this header,
if it chooses to support its use. Therefore the above access level
descriptions are necessarily advisory only for other auth systems.
When setting the value of the header, callers are urged to use the new
format_acl() method, described below.
New ACL Format
--------------
The account ACLs introduce a new format for ACLs, rather than reusing the
existing format from X-Container-Read/X-Container-Write. There are several
reasons for this:
* Container ACL format does not support Unicode
* Container ACLs have a different structure than account ACLs
+ account ACLs have no concept of referrers or rlistings
+ accounts have additional "admin" access level
+ account access levels are structured as admin > rw > ro, which seems more
appropriate for how people access accounts, rather than reusing
container ACLs' orthogonal read and write access
In addition, the container ACL syntax is a bit arbitrary and highly custom,
so instead of parsing additional custom syntax, I'd rather propose a next
version and introduce a means for migration. The V2 ACL syntax has the
following benefits:
* JSON is a well-known standard syntax with parsers in all languages
* no artificial value restrictions (you can grant access to a user named
".rlistings" if you want)
* forward and backward compatibility: you may have extraneous keys, but
your attempt to parse the header won't raise an exception
I've introduced hooks in parse_acl and format_acl which currently default
to the old V1 syntax but tolerate the V2 syntax and can easily be flipped
to default to V2. I'm not changing the default or adding code to rewrite
V1 ACLs to V2, because this patch has suffered a lot of scope creep already,
but this seems like a sensible milestone in the migration.
TempAuth Account ACL Implementation
-----------------------------------
As stated above, core Swift is responsible for privileging the
X-Account-Access-Control header (making it only accessible to swift_owners),
for translating it to -sysmeta-* headers to trigger persistence by the
account server, and for including the header in the responses to requests
by privileged users. Core Swift puts no expectation on the *content* of
this header. Auth systems (including TempAuth) are responsible for
defining the content of the header and taking action based on it.
In addition to the changes described above, this patch defines a format
to be used by TempAuth for these headers in the common.middleware.acl
module, in the methods format_v2_acl() and parse_v2_acl(). This patch
also teaches TempAuth to take action based on the header contents. TempAuth
now sets swift_owner=True if the user is on the Admin ACL, authorizes
GET/HEAD/OPTIONS requests if the user is on any ACL, authorizes
PUT/POST/DELETE requests if the user is on the admin or read-write ACL, etc.
Note that the action of setting swift_owner=True triggers core Swift to
add or strip the privileged headers from the responses. Core Swift (not
the auth system) is responsible for that.
DocImpact: Documentation for the new ACL usage and format appears in
summary form in doc/source/overview_auth.rst, and in more detail in
swift/common/middleware/tempauth.py in the TempAuth class docstring.
I leave it to the Swift doc team to determine whether more is needed.
Change-Id: I836a99eaaa6bb0e92dc03e1ca46a474522e6e826
2013-11-13 20:55:14 +00:00
|
|
|
class TestAccountNoContainersEnv(object):
|
2010-07-12 17:03:45 -05:00
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.conn = Connection(tf.config)
|
2010-07-12 17:03:45 -05:00
|
|
|
cls.conn.authenticate()
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.account = Account(cls.conn, tf.config.get('account',
|
|
|
|
tf.config['username']))
|
2010-07-12 17:03:45 -05:00
|
|
|
cls.account.delete_containers()
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestAccountNoContainers(Base):
|
|
|
|
env = TestAccountNoContainersEnv
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
def testGetRequest(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
2015-07-28 10:31:54 +01:00
|
|
|
self.assertFalse(self.env.account.containers(
|
2013-08-04 11:15:53 +08:00
|
|
|
parms={'format': format_type}))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
if format_type is None:
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(204)
|
|
|
|
else:
|
|
|
|
self.assert_status(200)
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestAccountNoContainersUTF8(Base2, TestAccountNoContainers):
|
|
|
|
set_up = False
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2014-09-11 16:51:51 +10:00
|
|
|
class TestAccountSortingEnv(object):
|
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
|
|
|
cls.conn = Connection(tf.config)
|
|
|
|
cls.conn.authenticate()
|
|
|
|
cls.account = Account(cls.conn, tf.config.get('account',
|
|
|
|
tf.config['username']))
|
|
|
|
cls.account.delete_containers()
|
|
|
|
|
|
|
|
postfix = Utils.create_name()
|
|
|
|
cls.cont_items = ('a1', 'a2', 'A3', 'b1', 'B2', 'a10', 'b10', 'zz')
|
|
|
|
cls.cont_items = ['%s%s' % (x, postfix) for x in cls.cont_items]
|
|
|
|
|
|
|
|
for container in cls.cont_items:
|
|
|
|
c = cls.account.container(container)
|
|
|
|
if not c.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
|
|
|
|
class TestAccountSorting(Base):
|
|
|
|
env = TestAccountSortingEnv
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
def testAccountContainerListSorting(self):
|
2015-11-24 14:47:30 +00:00
|
|
|
# name (byte order) sorting.
|
|
|
|
cont_list = sorted(self.env.cont_items)
|
|
|
|
for reverse in ('false', 'no', 'off', '', 'garbage'):
|
|
|
|
cont_listing = self.env.account.containers(
|
|
|
|
parms={'reverse': reverse})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(cont_list, cont_listing,
|
|
|
|
'Expected %s but got %s with reverse param %r'
|
|
|
|
% (cont_list, cont_listing, reverse))
|
|
|
|
|
|
|
|
def testAccountContainerListSortingReverse(self):
|
2014-09-11 16:51:51 +10:00
|
|
|
# name (byte order) sorting.
|
|
|
|
cont_list = sorted(self.env.cont_items)
|
|
|
|
cont_list.reverse()
|
2015-11-24 14:47:30 +00:00
|
|
|
for reverse in ('true', '1', 'yes', 'on', 't', 'y'):
|
|
|
|
cont_listing = self.env.account.containers(
|
|
|
|
parms={'reverse': reverse})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(cont_list, cont_listing,
|
|
|
|
'Expected %s but got %s with reverse param %r'
|
|
|
|
% (cont_list, cont_listing, reverse))
|
2014-09-11 16:51:51 +10:00
|
|
|
|
|
|
|
def testAccountContainerListSortingByPrefix(self):
|
|
|
|
cont_list = sorted(c for c in self.env.cont_items if c.startswith('a'))
|
|
|
|
cont_list.reverse()
|
|
|
|
cont_listing = self.env.account.containers(parms={
|
|
|
|
'reverse': 'on', 'prefix': 'a'})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(cont_list, cont_listing)
|
|
|
|
|
|
|
|
def testAccountContainerListSortingByMarkersExclusive(self):
|
|
|
|
first_item = self.env.cont_items[3] # 'b1' + postfix
|
|
|
|
last_item = self.env.cont_items[4] # 'B2' + postfix
|
|
|
|
|
|
|
|
cont_list = sorted(c for c in self.env.cont_items
|
|
|
|
if last_item < c < first_item)
|
|
|
|
cont_list.reverse()
|
|
|
|
cont_listing = self.env.account.containers(parms={
|
|
|
|
'reverse': 'on', 'marker': first_item, 'end_marker': last_item})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(cont_list, cont_listing)
|
|
|
|
|
|
|
|
def testAccountContainerListSortingByMarkersInclusive(self):
|
|
|
|
first_item = self.env.cont_items[3] # 'b1' + postfix
|
|
|
|
last_item = self.env.cont_items[4] # 'B2' + postfix
|
|
|
|
|
|
|
|
cont_list = sorted(c for c in self.env.cont_items
|
|
|
|
if last_item <= c <= first_item)
|
|
|
|
cont_list.reverse()
|
|
|
|
cont_listing = self.env.account.containers(parms={
|
|
|
|
'reverse': 'on', 'marker': first_item + '\x00',
|
|
|
|
'end_marker': last_item[:-1] + chr(ord(last_item[-1]) - 1)})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(cont_list, cont_listing)
|
|
|
|
|
|
|
|
def testAccountContainerListSortingByReversedMarkers(self):
|
|
|
|
cont_listing = self.env.account.containers(parms={
|
|
|
|
'reverse': 'on', 'marker': 'B', 'end_marker': 'b1'})
|
|
|
|
self.assert_status(204)
|
|
|
|
self.assertEqual([], cont_listing)
|
|
|
|
|
|
|
|
|
Privileged acct ACL header, new ACL syntax, TempAuth impl.
* Introduce a new privileged account header: X-Account-Access-Control
* Introduce JSON-based version 2 ACL syntax -- see below for discussion
* Implement account ACL authorization in TempAuth
X-Account-Access-Control Header
-------------------------------
Accounts now have a new privileged header to represent ACLs or any other
form of account-level access control. The value of the header is an opaque
string to be interpreted by the auth system, but it must be a JSON-encoded
dictionary. A reference implementation is given in TempAuth, with the
knowledge that historically other auth systems often use TempAuth as a
starting point.
The reference implementation describes three levels of account access:
"admin", "read-write", and "read-only". Adding new access control
features in a future patch (e.g. "write-only" account access) will
automatically be forward- and backward-compatible, due to the JSON
dictionary header format.
The privileged X-Account-Access-Control header may only be read or written
by a user with "swift_owner" status, traditionally the account owner but
now also any user on the "admin" ACL.
Access Levels:
Read-only access is intended to indicate to the auth system that this
list of identities can read everything (except privileged headers) in
the account. Specifically, a user with read-only account access can get
a list of containers in the account, list the contents of any container,
retrieve any object, and see the (non-privileged) headers of the
account, any container, or any object.
Read-write access is intended to indicate to the auth system that this
list of identities can read or write (or create) any container. A user
with read-write account access can create new containers, set any
unprivileged container headers, overwrite objects, delete containers,
etc. A read-write user can NOT set account headers (or perform any
PUT/POST/DELETE requests on the account).
Admin access is intended to indicate to the auth system that this list of
identities has "swift_owner" privileges. A user with admin account access
can do anything the account owner can, including setting account headers
and any privileged headers -- and thus changing the value of
X-Account-Access-Control and thereby granting read-only, read-write, or
admin access to other users.
The auth system is responsible for making decisions based on this header,
if it chooses to support its use. Therefore the above access level
descriptions are necessarily advisory only for other auth systems.
When setting the value of the header, callers are urged to use the new
format_acl() method, described below.
New ACL Format
--------------
The account ACLs introduce a new format for ACLs, rather than reusing the
existing format from X-Container-Read/X-Container-Write. There are several
reasons for this:
* Container ACL format does not support Unicode
* Container ACLs have a different structure than account ACLs
+ account ACLs have no concept of referrers or rlistings
+ accounts have additional "admin" access level
+ account access levels are structured as admin > rw > ro, which seems more
appropriate for how people access accounts, rather than reusing
container ACLs' orthogonal read and write access
In addition, the container ACL syntax is a bit arbitrary and highly custom,
so instead of parsing additional custom syntax, I'd rather propose a next
version and introduce a means for migration. The V2 ACL syntax has the
following benefits:
* JSON is a well-known standard syntax with parsers in all languages
* no artificial value restrictions (you can grant access to a user named
".rlistings" if you want)
* forward and backward compatibility: you may have extraneous keys, but
your attempt to parse the header won't raise an exception
I've introduced hooks in parse_acl and format_acl which currently default
to the old V1 syntax but tolerate the V2 syntax and can easily be flipped
to default to V2. I'm not changing the default or adding code to rewrite
V1 ACLs to V2, because this patch has suffered a lot of scope creep already,
but this seems like a sensible milestone in the migration.
TempAuth Account ACL Implementation
-----------------------------------
As stated above, core Swift is responsible for privileging the
X-Account-Access-Control header (making it only accessible to swift_owners),
for translating it to -sysmeta-* headers to trigger persistence by the
account server, and for including the header in the responses to requests
by privileged users. Core Swift puts no expectation on the *content* of
this header. Auth systems (including TempAuth) are responsible for
defining the content of the header and taking action based on it.
In addition to the changes described above, this patch defines a format
to be used by TempAuth for these headers in the common.middleware.acl
module, in the methods format_v2_acl() and parse_v2_acl(). This patch
also teaches TempAuth to take action based on the header contents. TempAuth
now sets swift_owner=True if the user is on the Admin ACL, authorizes
GET/HEAD/OPTIONS requests if the user is on any ACL, authorizes
PUT/POST/DELETE requests if the user is on the admin or read-write ACL, etc.
Note that the action of setting swift_owner=True triggers core Swift to
add or strip the privileged headers from the responses. Core Swift (not
the auth system) is responsible for that.
DocImpact: Documentation for the new ACL usage and format appears in
summary form in doc/source/overview_auth.rst, and in more detail in
swift/common/middleware/tempauth.py in the TempAuth class docstring.
I leave it to the Swift doc team to determine whether more is needed.
Change-Id: I836a99eaaa6bb0e92dc03e1ca46a474522e6e826
2013-11-13 20:55:14 +00:00
|
|
|
class TestContainerEnv(object):
|
2010-07-12 17:03:45 -05:00
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.conn = Connection(tf.config)
|
2010-07-12 17:03:45 -05:00
|
|
|
cls.conn.authenticate()
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.account = Account(cls.conn, tf.config.get('account',
|
|
|
|
tf.config['username']))
|
2010-07-12 17:03:45 -05:00
|
|
|
cls.account.delete_containers()
|
|
|
|
|
|
|
|
cls.container = cls.account.container(Utils.create_name())
|
|
|
|
if not cls.container.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
cls.file_count = 10
|
|
|
|
cls.file_size = 128
|
|
|
|
cls.files = list()
|
|
|
|
for x in range(cls.file_count):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cls.container.file(Utils.create_name())
|
|
|
|
file_item.write_random(cls.file_size)
|
|
|
|
cls.files.append(file_item.name)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestContainerDev(Base):
|
|
|
|
env = TestContainerEnv
|
|
|
|
set_up = False
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestContainerDevUTF8(Base2, TestContainerDev):
|
|
|
|
set_up = False
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestContainer(Base):
|
|
|
|
env = TestContainerEnv
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
def testContainerNameLimit(self):
|
2012-09-05 20:49:50 -07:00
|
|
|
limit = load_constraint('max_container_name_length')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
for l in (limit - 100, limit - 10, limit - 1, limit,
|
|
|
|
limit + 1, limit + 10, limit + 100):
|
|
|
|
cont = self.env.account.container('a' * l)
|
2010-07-12 17:03:45 -05:00
|
|
|
if l <= limit:
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(cont.create())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
|
|
|
else:
|
2015-07-28 10:31:54 +01:00
|
|
|
self.assertFalse(cont.create())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(400)
|
|
|
|
|
|
|
|
def testFileThenContainerDelete(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(cont.create())
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cont.file(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.write_random())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.delete())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(204)
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertNotIn(file_item.name, cont.files())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(cont.delete())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(204)
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertNotIn(cont.name, self.env.account.containers())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testFileListingLimitMarkerPrefix(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(cont.create())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2015-05-25 18:28:02 +02:00
|
|
|
files = sorted([Utils.create_name() for x in range(10)])
|
2010-07-12 17:03:45 -05:00
|
|
|
for f in files:
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cont.file(f)
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.write_random())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2015-05-25 18:28:02 +02:00
|
|
|
for i in range(len(files)):
|
2010-07-12 17:03:45 -05:00
|
|
|
f = files[i]
|
2015-05-25 18:28:02 +02:00
|
|
|
for j in range(1, len(files) - i):
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(
|
|
|
|
cont.files(parms={'limit': j, 'marker': f}) ==
|
|
|
|
files[i + 1: i + j + 1])
|
|
|
|
self.assertTrue(cont.files(parms={'marker': f}) == files[i + 1:])
|
|
|
|
self.assertTrue(cont.files(parms={'marker': f, 'prefix': f}) == [])
|
|
|
|
self.assertTrue(cont.files(parms={'prefix': f}) == [f])
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testPrefixAndLimit(self):
|
2012-09-05 20:49:50 -07:00
|
|
|
load_constraint('container_listing_limit')
|
2010-07-12 17:03:45 -05:00
|
|
|
cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(cont.create())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
prefix_file_count = 10
|
|
|
|
limit_count = 2
|
|
|
|
prefixs = ['alpha/', 'beta/', 'kappa/']
|
|
|
|
prefix_files = {}
|
|
|
|
|
|
|
|
for prefix in prefixs:
|
|
|
|
prefix_files[prefix] = []
|
|
|
|
|
|
|
|
for i in range(prefix_file_count):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cont.file(prefix + Utils.create_name())
|
|
|
|
file_item.write()
|
|
|
|
prefix_files[prefix].append(file_item.name)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
2010-07-12 17:03:45 -05:00
|
|
|
for prefix in prefixs:
|
2012-09-03 23:30:52 +08:00
|
|
|
files = cont.files(parms={'prefix': prefix})
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(files, sorted(prefix_files[prefix]))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
2010-07-12 17:03:45 -05:00
|
|
|
for prefix in prefixs:
|
2012-09-03 23:30:52 +08:00
|
|
|
files = cont.files(parms={'limit': limit_count,
|
|
|
|
'prefix': prefix})
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(len(files), limit_count)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in files:
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.startswith(prefix))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2015-07-14 16:56:44 -07:00
|
|
|
def testListDelimiter(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
2015-08-16 10:34:26 +02:00
|
|
|
self.assertTrue(cont.create())
|
2015-07-14 16:56:44 -07:00
|
|
|
|
|
|
|
delimiter = '-'
|
|
|
|
files = ['test', delimiter.join(['test', 'bar']),
|
|
|
|
delimiter.join(['test', 'foo'])]
|
|
|
|
for f in files:
|
|
|
|
file_item = cont.file(f)
|
2015-08-16 10:34:26 +02:00
|
|
|
self.assertTrue(file_item.write_random())
|
2015-07-14 16:56:44 -07:00
|
|
|
|
|
|
|
results = cont.files()
|
|
|
|
results = cont.files(parms={'delimiter': delimiter})
|
|
|
|
self.assertEqual(results, ['test', 'test-'])
|
|
|
|
|
2015-11-24 14:47:30 +00:00
|
|
|
results = cont.files(parms={'delimiter': delimiter, 'reverse': 'yes'})
|
|
|
|
self.assertEqual(results, ['test-', 'test'])
|
|
|
|
|
2015-07-14 16:56:44 -07:00
|
|
|
def testListDelimiterAndPrefix(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
2015-08-16 10:34:26 +02:00
|
|
|
self.assertTrue(cont.create())
|
2015-07-14 16:56:44 -07:00
|
|
|
|
|
|
|
delimiter = 'a'
|
|
|
|
files = ['bar', 'bazar']
|
|
|
|
for f in files:
|
|
|
|
file_item = cont.file(f)
|
2015-08-16 10:34:26 +02:00
|
|
|
self.assertTrue(file_item.write_random())
|
2015-07-14 16:56:44 -07:00
|
|
|
|
|
|
|
results = cont.files(parms={'delimiter': delimiter, 'prefix': 'ba'})
|
|
|
|
self.assertEqual(results, ['bar', 'baza'])
|
|
|
|
|
2015-11-24 14:47:30 +00:00
|
|
|
results = cont.files(parms={'delimiter': delimiter,
|
|
|
|
'prefix': 'ba',
|
|
|
|
'reverse': 'yes'})
|
|
|
|
self.assertEqual(results, ['baza', 'bar'])
|
|
|
|
|
2015-07-15 14:22:45 -07:00
|
|
|
def testLeadingDelimiter(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
|
|
|
self.assertTrue(cont.create())
|
|
|
|
|
|
|
|
delimiter = '/'
|
|
|
|
files = ['test', delimiter.join(['', 'test', 'bar']),
|
|
|
|
delimiter.join(['', 'test', 'bar', 'foo'])]
|
|
|
|
for f in files:
|
|
|
|
file_item = cont.file(f)
|
|
|
|
self.assertTrue(file_item.write_random())
|
|
|
|
|
|
|
|
results = cont.files(parms={'delimiter': delimiter})
|
|
|
|
self.assertEqual(results, [delimiter, 'test'])
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def testCreate(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(cont.create())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(cont.name, self.env.account.containers())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testContainerFileListOnContainerThatDoesNotExist(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
2010-07-12 17:03:45 -05:00
|
|
|
container = self.env.account.container(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, container.files,
|
2013-08-04 11:15:53 +08:00
|
|
|
parms={'format': format_type})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
def testUtf8Container(self):
|
|
|
|
valid_utf8 = Utils.create_utf8_name()
|
|
|
|
invalid_utf8 = valid_utf8[::-1]
|
|
|
|
container = self.env.account.container(valid_utf8)
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(container.create(cfg={'no_path_quote': True}))
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(container.name, self.env.account.containers())
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(container.files(), [])
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(container.delete())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
container = self.env.account.container(invalid_utf8)
|
2015-07-28 10:31:54 +01:00
|
|
|
self.assertFalse(container.create(cfg={'no_path_quote': True}))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
|
|
|
self.assertRaises(ResponseError, container.files,
|
2012-09-03 23:30:52 +08:00
|
|
|
cfg={'no_path_quote': True})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
|
|
|
|
|
|
|
def testCreateOnExisting(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(cont.create())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(cont.create())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(202)
|
|
|
|
|
|
|
|
def testSlashInName(self):
|
|
|
|
if Utils.create_name == Utils.create_utf8_name:
|
2015-10-08 13:08:45 +02:00
|
|
|
cont_name = list(six.text_type(Utils.create_name(), 'utf-8'))
|
2010-07-12 17:03:45 -05:00
|
|
|
else:
|
|
|
|
cont_name = list(Utils.create_name())
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
cont_name[random.randint(2, len(cont_name) - 2)] = '/'
|
2010-07-12 17:03:45 -05:00
|
|
|
cont_name = ''.join(cont_name)
|
|
|
|
|
|
|
|
if Utils.create_name == Utils.create_utf8_name:
|
|
|
|
cont_name = cont_name.encode('utf-8')
|
|
|
|
|
|
|
|
cont = self.env.account.container(cont_name)
|
2015-07-28 10:31:54 +01:00
|
|
|
self.assertFalse(cont.create(cfg={'no_path_quote': True}),
|
|
|
|
'created container with name %s' % (cont_name))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertNotIn(cont.name, self.env.account.containers())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testDelete(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(cont.create())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(cont.delete())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(204)
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertNotIn(cont.name, self.env.account.containers())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testDeleteOnContainerThatDoesNotExist(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
2015-07-28 10:31:54 +01:00
|
|
|
self.assertFalse(cont.delete())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
def testDeleteOnContainerWithFiles(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(cont.create())
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cont.file(Utils.create_name())
|
|
|
|
file_item.write_random(self.env.file_size)
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(file_item.name, cont.files())
|
2015-07-28 10:31:54 +01:00
|
|
|
self.assertFalse(cont.delete())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(409)
|
|
|
|
|
|
|
|
def testFileCreateInContainerThatDoesNotExist(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = File(self.env.conn, self.env.account, Utils.create_name(),
|
2013-08-31 20:25:25 -04:00
|
|
|
Utils.create_name())
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.write)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
def testLastFileMarker(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
|
|
|
files = self.env.container.files({'format': format_type})
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(len(files), len(self.env.files))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(200)
|
|
|
|
|
|
|
|
files = self.env.container.files(
|
2013-08-04 11:15:53 +08:00
|
|
|
parms={'format': format_type, 'marker': files[-1]})
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(len(files), 0)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
if format_type is None:
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(204)
|
|
|
|
else:
|
|
|
|
self.assert_status(200)
|
|
|
|
|
|
|
|
def testContainerFileList(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
|
|
|
files = self.env.container.files(parms={'format': format_type})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(200)
|
|
|
|
if isinstance(files[0], dict):
|
|
|
|
files = [x['name'] for x in files]
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in self.env.files:
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(file_item, files)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in files:
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(file_item, self.env.files)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testMarkerLimitFileList(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
2012-09-03 23:30:52 +08:00
|
|
|
for marker in ['0', 'A', 'I', 'R', 'Z', 'a', 'i', 'r', 'z',
|
|
|
|
'abc123', 'mnop', 'xyz']:
|
|
|
|
limit = random.randint(2, self.env.file_count - 1)
|
2013-08-04 11:15:53 +08:00
|
|
|
files = self.env.container.files(parms={'format': format_type,
|
2012-09-03 23:30:52 +08:00
|
|
|
'marker': marker,
|
|
|
|
'limit': limit})
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
if not files:
|
|
|
|
continue
|
|
|
|
|
|
|
|
if isinstance(files[0], dict):
|
|
|
|
files = [x['name'] for x in files]
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(len(files) <= limit)
|
2010-07-12 17:03:45 -05:00
|
|
|
if files:
|
|
|
|
if isinstance(files[0], dict):
|
|
|
|
files = [x['name'] for x in files]
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(locale.strcoll(files[0], marker) > 0)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testFileOrder(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
|
|
|
files = self.env.container.files(parms={'format': format_type})
|
2010-07-12 17:03:45 -05:00
|
|
|
if isinstance(files[0], dict):
|
|
|
|
files = [x['name'] for x in files]
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(sorted(files, cmp=locale.strcoll), files)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testContainerInfo(self):
|
|
|
|
info = self.env.container.info()
|
|
|
|
self.assert_status(204)
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(info['object_count'], self.env.file_count)
|
|
|
|
self.assertEqual(info['bytes_used'],
|
|
|
|
self.env.file_count * self.env.file_size)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testContainerInfoOnContainerThatDoesNotExist(self):
|
|
|
|
container = self.env.account.container(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, container.info)
|
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
def testContainerFileListWithLimit(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
|
|
|
files = self.env.container.files(parms={'format': format_type,
|
2012-09-03 23:30:52 +08:00
|
|
|
'limit': 2})
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(len(files), 2)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testTooLongName(self):
|
2012-09-03 23:30:52 +08:00
|
|
|
cont = self.env.account.container('x' * 257)
|
2015-07-28 10:31:54 +01:00
|
|
|
self.assertFalse(cont.create(),
|
|
|
|
'created container with name %s' % (cont.name))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(400)
|
|
|
|
|
|
|
|
def testContainerExistenceCachingProblem(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, cont.files)
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(cont.create())
|
2010-07-12 17:03:45 -05:00
|
|
|
cont.files()
|
|
|
|
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, cont.files)
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(cont.create())
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cont.file(Utils.create_name())
|
|
|
|
file_item.write_random()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestContainerUTF8(Base2, TestContainer):
|
|
|
|
set_up = False
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2014-09-11 16:51:51 +10:00
|
|
|
class TestContainerSortingEnv(object):
|
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
|
|
|
cls.conn = Connection(tf.config)
|
|
|
|
cls.conn.authenticate()
|
|
|
|
cls.account = Account(cls.conn, tf.config.get('account',
|
|
|
|
tf.config['username']))
|
|
|
|
cls.account.delete_containers()
|
|
|
|
|
|
|
|
cls.container = cls.account.container(Utils.create_name())
|
|
|
|
if not cls.container.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
cls.file_items = ('a1', 'a2', 'A3', 'b1', 'B2', 'a10', 'b10', 'zz')
|
|
|
|
cls.files = list()
|
|
|
|
cls.file_size = 128
|
|
|
|
for name in cls.file_items:
|
|
|
|
file_item = cls.container.file(name)
|
|
|
|
file_item.write_random(cls.file_size)
|
|
|
|
cls.files.append(file_item.name)
|
|
|
|
|
|
|
|
|
|
|
|
class TestContainerSorting(Base):
|
|
|
|
env = TestContainerSortingEnv
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
def testContainerFileListSortingReversed(self):
|
|
|
|
file_list = list(sorted(self.env.file_items))
|
|
|
|
file_list.reverse()
|
2015-11-24 14:47:30 +00:00
|
|
|
for reverse in ('true', '1', 'yes', 'on', 't', 'y'):
|
|
|
|
cont_files = self.env.container.files(parms={'reverse': reverse})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(file_list, cont_files,
|
|
|
|
'Expected %s but got %s with reverse param %r'
|
|
|
|
% (file_list, cont_files, reverse))
|
2014-09-11 16:51:51 +10:00
|
|
|
|
|
|
|
def testContainerFileSortingByPrefixReversed(self):
|
|
|
|
cont_list = sorted(c for c in self.env.file_items if c.startswith('a'))
|
|
|
|
cont_list.reverse()
|
|
|
|
cont_listing = self.env.container.files(parms={
|
|
|
|
'reverse': 'on', 'prefix': 'a'})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(cont_list, cont_listing)
|
|
|
|
|
|
|
|
def testContainerFileSortingByMarkersExclusiveReversed(self):
|
|
|
|
first_item = self.env.file_items[3] # 'b1' + postfix
|
|
|
|
last_item = self.env.file_items[4] # 'B2' + postfix
|
|
|
|
|
|
|
|
cont_list = sorted(c for c in self.env.file_items
|
|
|
|
if last_item < c < first_item)
|
|
|
|
cont_list.reverse()
|
|
|
|
cont_listing = self.env.container.files(parms={
|
|
|
|
'reverse': 'on', 'marker': first_item, 'end_marker': last_item})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(cont_list, cont_listing)
|
|
|
|
|
|
|
|
def testContainerFileSortingByMarkersInclusiveReversed(self):
|
|
|
|
first_item = self.env.file_items[3] # 'b1' + postfix
|
|
|
|
last_item = self.env.file_items[4] # 'B2' + postfix
|
|
|
|
|
|
|
|
cont_list = sorted(c for c in self.env.file_items
|
|
|
|
if last_item <= c <= first_item)
|
|
|
|
cont_list.reverse()
|
|
|
|
cont_listing = self.env.container.files(parms={
|
|
|
|
'reverse': 'on', 'marker': first_item + '\x00',
|
|
|
|
'end_marker': last_item[:-1] + chr(ord(last_item[-1]) - 1)})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(cont_list, cont_listing)
|
|
|
|
|
|
|
|
def testContainerFileSortingByReversedMarkersReversed(self):
|
|
|
|
cont_listing = self.env.container.files(parms={
|
|
|
|
'reverse': 'on', 'marker': 'B', 'end_marker': 'b1'})
|
|
|
|
self.assert_status(204)
|
|
|
|
self.assertEqual([], cont_listing)
|
|
|
|
|
|
|
|
def testContainerFileListSorting(self):
|
|
|
|
file_list = list(sorted(self.env.file_items))
|
|
|
|
cont_files = self.env.container.files()
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(file_list, cont_files)
|
|
|
|
|
|
|
|
# Lets try again but with reverse is specifically turned off
|
|
|
|
cont_files = self.env.container.files(parms={'reverse': 'off'})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(file_list, cont_files)
|
|
|
|
|
|
|
|
cont_files = self.env.container.files(parms={'reverse': 'false'})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(file_list, cont_files)
|
|
|
|
|
|
|
|
cont_files = self.env.container.files(parms={'reverse': 'no'})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(file_list, cont_files)
|
|
|
|
|
|
|
|
cont_files = self.env.container.files(parms={'reverse': ''})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(file_list, cont_files)
|
|
|
|
|
|
|
|
# Lets try again but with a incorrect reverse values
|
|
|
|
cont_files = self.env.container.files(parms={'reverse': 'foo'})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(file_list, cont_files)
|
|
|
|
|
|
|
|
cont_files = self.env.container.files(parms={'reverse': 'hai'})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(file_list, cont_files)
|
|
|
|
|
|
|
|
cont_files = self.env.container.files(parms={'reverse': 'o=[]::::>'})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(file_list, cont_files)
|
|
|
|
|
|
|
|
|
Privileged acct ACL header, new ACL syntax, TempAuth impl.
* Introduce a new privileged account header: X-Account-Access-Control
* Introduce JSON-based version 2 ACL syntax -- see below for discussion
* Implement account ACL authorization in TempAuth
X-Account-Access-Control Header
-------------------------------
Accounts now have a new privileged header to represent ACLs or any other
form of account-level access control. The value of the header is an opaque
string to be interpreted by the auth system, but it must be a JSON-encoded
dictionary. A reference implementation is given in TempAuth, with the
knowledge that historically other auth systems often use TempAuth as a
starting point.
The reference implementation describes three levels of account access:
"admin", "read-write", and "read-only". Adding new access control
features in a future patch (e.g. "write-only" account access) will
automatically be forward- and backward-compatible, due to the JSON
dictionary header format.
The privileged X-Account-Access-Control header may only be read or written
by a user with "swift_owner" status, traditionally the account owner but
now also any user on the "admin" ACL.
Access Levels:
Read-only access is intended to indicate to the auth system that this
list of identities can read everything (except privileged headers) in
the account. Specifically, a user with read-only account access can get
a list of containers in the account, list the contents of any container,
retrieve any object, and see the (non-privileged) headers of the
account, any container, or any object.
Read-write access is intended to indicate to the auth system that this
list of identities can read or write (or create) any container. A user
with read-write account access can create new containers, set any
unprivileged container headers, overwrite objects, delete containers,
etc. A read-write user can NOT set account headers (or perform any
PUT/POST/DELETE requests on the account).
Admin access is intended to indicate to the auth system that this list of
identities has "swift_owner" privileges. A user with admin account access
can do anything the account owner can, including setting account headers
and any privileged headers -- and thus changing the value of
X-Account-Access-Control and thereby granting read-only, read-write, or
admin access to other users.
The auth system is responsible for making decisions based on this header,
if it chooses to support its use. Therefore the above access level
descriptions are necessarily advisory only for other auth systems.
When setting the value of the header, callers are urged to use the new
format_acl() method, described below.
New ACL Format
--------------
The account ACLs introduce a new format for ACLs, rather than reusing the
existing format from X-Container-Read/X-Container-Write. There are several
reasons for this:
* Container ACL format does not support Unicode
* Container ACLs have a different structure than account ACLs
+ account ACLs have no concept of referrers or rlistings
+ accounts have additional "admin" access level
+ account access levels are structured as admin > rw > ro, which seems more
appropriate for how people access accounts, rather than reusing
container ACLs' orthogonal read and write access
In addition, the container ACL syntax is a bit arbitrary and highly custom,
so instead of parsing additional custom syntax, I'd rather propose a next
version and introduce a means for migration. The V2 ACL syntax has the
following benefits:
* JSON is a well-known standard syntax with parsers in all languages
* no artificial value restrictions (you can grant access to a user named
".rlistings" if you want)
* forward and backward compatibility: you may have extraneous keys, but
your attempt to parse the header won't raise an exception
I've introduced hooks in parse_acl and format_acl which currently default
to the old V1 syntax but tolerate the V2 syntax and can easily be flipped
to default to V2. I'm not changing the default or adding code to rewrite
V1 ACLs to V2, because this patch has suffered a lot of scope creep already,
but this seems like a sensible milestone in the migration.
TempAuth Account ACL Implementation
-----------------------------------
As stated above, core Swift is responsible for privileging the
X-Account-Access-Control header (making it only accessible to swift_owners),
for translating it to -sysmeta-* headers to trigger persistence by the
account server, and for including the header in the responses to requests
by privileged users. Core Swift puts no expectation on the *content* of
this header. Auth systems (including TempAuth) are responsible for
defining the content of the header and taking action based on it.
In addition to the changes described above, this patch defines a format
to be used by TempAuth for these headers in the common.middleware.acl
module, in the methods format_v2_acl() and parse_v2_acl(). This patch
also teaches TempAuth to take action based on the header contents. TempAuth
now sets swift_owner=True if the user is on the Admin ACL, authorizes
GET/HEAD/OPTIONS requests if the user is on any ACL, authorizes
PUT/POST/DELETE requests if the user is on the admin or read-write ACL, etc.
Note that the action of setting swift_owner=True triggers core Swift to
add or strip the privileged headers from the responses. Core Swift (not
the auth system) is responsible for that.
DocImpact: Documentation for the new ACL usage and format appears in
summary form in doc/source/overview_auth.rst, and in more detail in
swift/common/middleware/tempauth.py in the TempAuth class docstring.
I leave it to the Swift doc team to determine whether more is needed.
Change-Id: I836a99eaaa6bb0e92dc03e1ca46a474522e6e826
2013-11-13 20:55:14 +00:00
|
|
|
class TestContainerPathsEnv(object):
|
2010-07-12 17:03:45 -05:00
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.conn = Connection(tf.config)
|
2010-07-12 17:03:45 -05:00
|
|
|
cls.conn.authenticate()
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.account = Account(cls.conn, tf.config.get('account',
|
|
|
|
tf.config['username']))
|
2010-07-12 17:03:45 -05:00
|
|
|
cls.account.delete_containers()
|
|
|
|
|
|
|
|
cls.file_size = 8
|
|
|
|
|
|
|
|
cls.container = cls.account.container(Utils.create_name())
|
|
|
|
if not cls.container.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
cls.files = [
|
|
|
|
'/file1',
|
|
|
|
'/file A',
|
|
|
|
'/dir1/',
|
|
|
|
'/dir2/',
|
|
|
|
'/dir1/file2',
|
|
|
|
'/dir1/subdir1/',
|
|
|
|
'/dir1/subdir2/',
|
|
|
|
'/dir1/subdir1/file2',
|
|
|
|
'/dir1/subdir1/file3',
|
|
|
|
'/dir1/subdir1/file4',
|
|
|
|
'/dir1/subdir1/subsubdir1/',
|
|
|
|
'/dir1/subdir1/subsubdir1/file5',
|
|
|
|
'/dir1/subdir1/subsubdir1/file6',
|
|
|
|
'/dir1/subdir1/subsubdir1/file7',
|
|
|
|
'/dir1/subdir1/subsubdir1/file8',
|
|
|
|
'/dir1/subdir1/subsubdir2/',
|
|
|
|
'/dir1/subdir1/subsubdir2/file9',
|
|
|
|
'/dir1/subdir1/subsubdir2/file0',
|
|
|
|
'file1',
|
|
|
|
'dir1/',
|
|
|
|
'dir2/',
|
|
|
|
'dir1/file2',
|
|
|
|
'dir1/subdir1/',
|
|
|
|
'dir1/subdir2/',
|
|
|
|
'dir1/subdir1/file2',
|
|
|
|
'dir1/subdir1/file3',
|
|
|
|
'dir1/subdir1/file4',
|
|
|
|
'dir1/subdir1/subsubdir1/',
|
|
|
|
'dir1/subdir1/subsubdir1/file5',
|
|
|
|
'dir1/subdir1/subsubdir1/file6',
|
|
|
|
'dir1/subdir1/subsubdir1/file7',
|
|
|
|
'dir1/subdir1/subsubdir1/file8',
|
|
|
|
'dir1/subdir1/subsubdir2/',
|
|
|
|
'dir1/subdir1/subsubdir2/file9',
|
|
|
|
'dir1/subdir1/subsubdir2/file0',
|
|
|
|
'dir1/subdir with spaces/',
|
|
|
|
'dir1/subdir with spaces/file B',
|
|
|
|
'dir1/subdir+with{whatever/',
|
|
|
|
'dir1/subdir+with{whatever/file D',
|
|
|
|
]
|
|
|
|
|
2013-03-04 23:38:48 +02:00
|
|
|
stored_files = set()
|
2010-07-12 17:03:45 -05:00
|
|
|
for f in cls.files:
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cls.container.file(f)
|
2010-07-12 17:03:45 -05:00
|
|
|
if f.endswith('/'):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.write(hdrs={'Content-Type': 'application/directory'})
|
2010-07-12 17:03:45 -05:00
|
|
|
else:
|
2013-08-31 20:25:25 -04:00
|
|
|
file_item.write_random(cls.file_size,
|
|
|
|
hdrs={'Content-Type':
|
|
|
|
'application/directory'})
|
2013-03-04 23:38:48 +02:00
|
|
|
if (normalized_urls):
|
|
|
|
nfile = '/'.join(filter(None, f.split('/')))
|
|
|
|
if (f[-1] == '/'):
|
|
|
|
nfile += '/'
|
|
|
|
stored_files.add(nfile)
|
|
|
|
else:
|
|
|
|
stored_files.add(f)
|
|
|
|
cls.stored_files = sorted(stored_files)
|
|
|
|
|
2013-03-26 20:42:26 +00:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestContainerPaths(Base):
|
|
|
|
env = TestContainerPathsEnv
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
def testTraverseContainer(self):
|
|
|
|
found_files = []
|
|
|
|
found_dirs = []
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def recurse_path(path, count=0):
|
|
|
|
if count > 10:
|
|
|
|
raise ValueError('too deep recursion')
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in self.env.container.files(parms={'path': path}):
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.startswith(path))
|
2013-08-04 11:15:53 +08:00
|
|
|
if file_item.endswith('/'):
|
|
|
|
recurse_path(file_item, count + 1)
|
|
|
|
found_dirs.append(file_item)
|
2010-07-12 17:03:45 -05:00
|
|
|
else:
|
2013-08-04 11:15:53 +08:00
|
|
|
found_files.append(file_item)
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
recurse_path('')
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in self.env.stored_files:
|
|
|
|
if file_item.startswith('/'):
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertNotIn(file_item, found_dirs)
|
|
|
|
self.assertNotIn(file_item, found_files)
|
2013-08-04 11:15:53 +08:00
|
|
|
elif file_item.endswith('/'):
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(file_item, found_dirs)
|
|
|
|
self.assertNotIn(file_item, found_files)
|
2010-07-12 17:03:45 -05:00
|
|
|
else:
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(file_item, found_files)
|
|
|
|
self.assertNotIn(file_item, found_dirs)
|
2013-03-04 23:38:48 +02:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
found_files = []
|
|
|
|
found_dirs = []
|
|
|
|
recurse_path('/')
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in self.env.stored_files:
|
|
|
|
if not file_item.startswith('/'):
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertNotIn(file_item, found_dirs)
|
|
|
|
self.assertNotIn(file_item, found_files)
|
2013-08-04 11:15:53 +08:00
|
|
|
elif file_item.endswith('/'):
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(file_item, found_dirs)
|
|
|
|
self.assertNotIn(file_item, found_files)
|
2010-07-12 17:03:45 -05:00
|
|
|
else:
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(file_item, found_files)
|
|
|
|
self.assertNotIn(file_item, found_dirs)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testContainerListing(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in (None, 'json', 'xml'):
|
|
|
|
files = self.env.container.files(parms={'format': format_type})
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
if isinstance(files[0], dict):
|
|
|
|
files = [str(x['name']) for x in files]
|
|
|
|
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(files, self.env.stored_files)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in ('json', 'xml'):
|
|
|
|
for file_item in self.env.container.files(parms={'format':
|
|
|
|
format_type}):
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(int(file_item['bytes']) >= 0)
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn('last_modified', file_item)
|
2013-08-04 11:15:53 +08:00
|
|
|
if file_item['name'].endswith('/'):
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(file_item['content_type'],
|
|
|
|
'application/directory')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testStructure(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
def assert_listing(path, file_list):
|
2012-09-03 23:30:52 +08:00
|
|
|
files = self.env.container.files(parms={'path': path})
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(sorted(file_list, cmp=locale.strcoll), files)
|
2013-03-04 23:38:48 +02:00
|
|
|
if not normalized_urls:
|
|
|
|
assert_listing('/', ['/dir1/', '/dir2/', '/file1', '/file A'])
|
|
|
|
assert_listing('/dir1',
|
|
|
|
['/dir1/file2', '/dir1/subdir1/', '/dir1/subdir2/'])
|
|
|
|
assert_listing('/dir1/',
|
|
|
|
['/dir1/file2', '/dir1/subdir1/', '/dir1/subdir2/'])
|
|
|
|
assert_listing('/dir1/subdir1',
|
|
|
|
['/dir1/subdir1/subsubdir2/', '/dir1/subdir1/file2',
|
|
|
|
'/dir1/subdir1/file3', '/dir1/subdir1/file4',
|
|
|
|
'/dir1/subdir1/subsubdir1/'])
|
|
|
|
assert_listing('/dir1/subdir2', [])
|
|
|
|
assert_listing('', ['file1', 'dir1/', 'dir2/'])
|
|
|
|
else:
|
|
|
|
assert_listing('', ['file1', 'dir1/', 'dir2/', 'file A'])
|
2010-07-12 17:03:45 -05:00
|
|
|
assert_listing('dir1', ['dir1/file2', 'dir1/subdir1/',
|
2012-09-03 23:30:52 +08:00
|
|
|
'dir1/subdir2/', 'dir1/subdir with spaces/',
|
|
|
|
'dir1/subdir+with{whatever/'])
|
2010-07-12 17:03:45 -05:00
|
|
|
assert_listing('dir1/subdir1',
|
2012-09-03 23:30:52 +08:00
|
|
|
['dir1/subdir1/file4', 'dir1/subdir1/subsubdir2/',
|
|
|
|
'dir1/subdir1/file2', 'dir1/subdir1/file3',
|
|
|
|
'dir1/subdir1/subsubdir1/'])
|
2010-07-12 17:03:45 -05:00
|
|
|
assert_listing('dir1/subdir1/subsubdir1',
|
2012-09-03 23:30:52 +08:00
|
|
|
['dir1/subdir1/subsubdir1/file7',
|
|
|
|
'dir1/subdir1/subsubdir1/file5',
|
|
|
|
'dir1/subdir1/subsubdir1/file8',
|
|
|
|
'dir1/subdir1/subsubdir1/file6'])
|
2010-07-12 17:03:45 -05:00
|
|
|
assert_listing('dir1/subdir1/subsubdir1/',
|
2012-09-03 23:30:52 +08:00
|
|
|
['dir1/subdir1/subsubdir1/file7',
|
|
|
|
'dir1/subdir1/subsubdir1/file5',
|
|
|
|
'dir1/subdir1/subsubdir1/file8',
|
|
|
|
'dir1/subdir1/subsubdir1/file6'])
|
2010-07-12 17:03:45 -05:00
|
|
|
assert_listing('dir1/subdir with spaces/',
|
2012-09-03 23:30:52 +08:00
|
|
|
['dir1/subdir with spaces/file B'])
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
|
Privileged acct ACL header, new ACL syntax, TempAuth impl.
* Introduce a new privileged account header: X-Account-Access-Control
* Introduce JSON-based version 2 ACL syntax -- see below for discussion
* Implement account ACL authorization in TempAuth
X-Account-Access-Control Header
-------------------------------
Accounts now have a new privileged header to represent ACLs or any other
form of account-level access control. The value of the header is an opaque
string to be interpreted by the auth system, but it must be a JSON-encoded
dictionary. A reference implementation is given in TempAuth, with the
knowledge that historically other auth systems often use TempAuth as a
starting point.
The reference implementation describes three levels of account access:
"admin", "read-write", and "read-only". Adding new access control
features in a future patch (e.g. "write-only" account access) will
automatically be forward- and backward-compatible, due to the JSON
dictionary header format.
The privileged X-Account-Access-Control header may only be read or written
by a user with "swift_owner" status, traditionally the account owner but
now also any user on the "admin" ACL.
Access Levels:
Read-only access is intended to indicate to the auth system that this
list of identities can read everything (except privileged headers) in
the account. Specifically, a user with read-only account access can get
a list of containers in the account, list the contents of any container,
retrieve any object, and see the (non-privileged) headers of the
account, any container, or any object.
Read-write access is intended to indicate to the auth system that this
list of identities can read or write (or create) any container. A user
with read-write account access can create new containers, set any
unprivileged container headers, overwrite objects, delete containers,
etc. A read-write user can NOT set account headers (or perform any
PUT/POST/DELETE requests on the account).
Admin access is intended to indicate to the auth system that this list of
identities has "swift_owner" privileges. A user with admin account access
can do anything the account owner can, including setting account headers
and any privileged headers -- and thus changing the value of
X-Account-Access-Control and thereby granting read-only, read-write, or
admin access to other users.
The auth system is responsible for making decisions based on this header,
if it chooses to support its use. Therefore the above access level
descriptions are necessarily advisory only for other auth systems.
When setting the value of the header, callers are urged to use the new
format_acl() method, described below.
New ACL Format
--------------
The account ACLs introduce a new format for ACLs, rather than reusing the
existing format from X-Container-Read/X-Container-Write. There are several
reasons for this:
* Container ACL format does not support Unicode
* Container ACLs have a different structure than account ACLs
+ account ACLs have no concept of referrers or rlistings
+ accounts have additional "admin" access level
+ account access levels are structured as admin > rw > ro, which seems more
appropriate for how people access accounts, rather than reusing
container ACLs' orthogonal read and write access
In addition, the container ACL syntax is a bit arbitrary and highly custom,
so instead of parsing additional custom syntax, I'd rather propose a next
version and introduce a means for migration. The V2 ACL syntax has the
following benefits:
* JSON is a well-known standard syntax with parsers in all languages
* no artificial value restrictions (you can grant access to a user named
".rlistings" if you want)
* forward and backward compatibility: you may have extraneous keys, but
your attempt to parse the header won't raise an exception
I've introduced hooks in parse_acl and format_acl which currently default
to the old V1 syntax but tolerate the V2 syntax and can easily be flipped
to default to V2. I'm not changing the default or adding code to rewrite
V1 ACLs to V2, because this patch has suffered a lot of scope creep already,
but this seems like a sensible milestone in the migration.
TempAuth Account ACL Implementation
-----------------------------------
As stated above, core Swift is responsible for privileging the
X-Account-Access-Control header (making it only accessible to swift_owners),
for translating it to -sysmeta-* headers to trigger persistence by the
account server, and for including the header in the responses to requests
by privileged users. Core Swift puts no expectation on the *content* of
this header. Auth systems (including TempAuth) are responsible for
defining the content of the header and taking action based on it.
In addition to the changes described above, this patch defines a format
to be used by TempAuth for these headers in the common.middleware.acl
module, in the methods format_v2_acl() and parse_v2_acl(). This patch
also teaches TempAuth to take action based on the header contents. TempAuth
now sets swift_owner=True if the user is on the Admin ACL, authorizes
GET/HEAD/OPTIONS requests if the user is on any ACL, authorizes
PUT/POST/DELETE requests if the user is on the admin or read-write ACL, etc.
Note that the action of setting swift_owner=True triggers core Swift to
add or strip the privileged headers from the responses. Core Swift (not
the auth system) is responsible for that.
DocImpact: Documentation for the new ACL usage and format appears in
summary form in doc/source/overview_auth.rst, and in more detail in
swift/common/middleware/tempauth.py in the TempAuth class docstring.
I leave it to the Swift doc team to determine whether more is needed.
Change-Id: I836a99eaaa6bb0e92dc03e1ca46a474522e6e826
2013-11-13 20:55:14 +00:00
|
|
|
class TestFileEnv(object):
|
2010-07-12 17:03:45 -05:00
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.conn = Connection(tf.config)
|
2010-07-12 17:03:45 -05:00
|
|
|
cls.conn.authenticate()
|
2014-04-30 15:00:49 +03:00
|
|
|
cls.account = Account(cls.conn, tf.config.get('account',
|
|
|
|
tf.config['username']))
|
|
|
|
# creating another account and connection
|
|
|
|
# for account to account copy tests
|
|
|
|
config2 = deepcopy(tf.config)
|
|
|
|
config2['account'] = tf.config['account2']
|
|
|
|
config2['username'] = tf.config['username2']
|
|
|
|
config2['password'] = tf.config['password2']
|
|
|
|
cls.conn2 = Connection(config2)
|
|
|
|
cls.conn2.authenticate()
|
|
|
|
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.account = Account(cls.conn, tf.config.get('account',
|
|
|
|
tf.config['username']))
|
2010-07-12 17:03:45 -05:00
|
|
|
cls.account.delete_containers()
|
2014-04-30 15:00:49 +03:00
|
|
|
cls.account2 = cls.conn2.get_account()
|
|
|
|
cls.account2.delete_containers()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
cls.container = cls.account.container(Utils.create_name())
|
|
|
|
if not cls.container.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
cls.file_size = 128
|
|
|
|
|
2015-07-16 09:35:37 +09:00
|
|
|
# With keystoneauth we need the accounts to have had the project
|
|
|
|
# domain id persisted as sysmeta prior to testing ACLs. This may
|
|
|
|
# not be the case if, for example, the account was created using
|
|
|
|
# a request with reseller_admin role, when project domain id may
|
|
|
|
# not have been known. So we ensure that the project domain id is
|
|
|
|
# in sysmeta by making a POST to the accounts using an admin role.
|
|
|
|
cls.account.update_metadata()
|
|
|
|
cls.account2.update_metadata()
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestFileDev(Base):
|
|
|
|
env = TestFileEnv
|
|
|
|
set_up = False
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestFileDevUTF8(Base2, TestFileDev):
|
|
|
|
set_up = False
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestFile(Base):
|
|
|
|
env = TestFileEnv
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
def testCopy(self):
|
2013-11-18 13:17:48 -08:00
|
|
|
# makes sure to test encoded characters
|
2011-10-19 09:21:14 -05:00
|
|
|
source_filename = 'dealde%2Fl04 011e%204c8df/flash.png'
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(source_filename)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
metadata = {}
|
|
|
|
for i in range(1):
|
2012-10-23 09:48:24 +02:00
|
|
|
metadata[Utils.create_ascii_name()] = Utils.create_name()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
data = file_item.write_random()
|
|
|
|
file_item.sync_metadata(metadata)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
dest_cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(dest_cont.create())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
# copy both from within and across containers
|
|
|
|
for cont in (self.env.container, dest_cont):
|
|
|
|
# copy both with and without initial slash
|
|
|
|
for prefix in ('', '/'):
|
|
|
|
dest_filename = Utils.create_name()
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(source_filename)
|
|
|
|
file_item.copy('%s%s' % (prefix, cont), dest_filename)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(dest_filename, cont.files())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cont.file(dest_filename)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(data == file_item.read())
|
|
|
|
self.assertTrue(file_item.initialize())
|
|
|
|
self.assertTrue(metadata == file_item.metadata)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2014-04-30 15:00:49 +03:00
|
|
|
def testCopyAccount(self):
|
|
|
|
# makes sure to test encoded characters
|
|
|
|
source_filename = 'dealde%2Fl04 011e%204c8df/flash.png'
|
|
|
|
file_item = self.env.container.file(source_filename)
|
|
|
|
|
|
|
|
metadata = {Utils.create_ascii_name(): Utils.create_name()}
|
|
|
|
|
|
|
|
data = file_item.write_random()
|
|
|
|
file_item.sync_metadata(metadata)
|
|
|
|
|
|
|
|
dest_cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(dest_cont.create())
|
2014-04-30 15:00:49 +03:00
|
|
|
|
|
|
|
acct = self.env.conn.account_name
|
|
|
|
# copy both from within and across containers
|
|
|
|
for cont in (self.env.container, dest_cont):
|
|
|
|
# copy both with and without initial slash
|
|
|
|
for prefix in ('', '/'):
|
|
|
|
dest_filename = Utils.create_name()
|
|
|
|
|
|
|
|
file_item = self.env.container.file(source_filename)
|
|
|
|
file_item.copy_account(acct,
|
|
|
|
'%s%s' % (prefix, cont),
|
|
|
|
dest_filename)
|
|
|
|
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(dest_filename, cont.files())
|
2014-04-30 15:00:49 +03:00
|
|
|
|
|
|
|
file_item = cont.file(dest_filename)
|
|
|
|
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(data == file_item.read())
|
|
|
|
self.assertTrue(file_item.initialize())
|
|
|
|
self.assertTrue(metadata == file_item.metadata)
|
2014-04-30 15:00:49 +03:00
|
|
|
|
|
|
|
dest_cont = self.env.account2.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(dest_cont.create(hdrs={
|
2014-04-30 15:00:49 +03:00
|
|
|
'X-Container-Write': self.env.conn.user_acl
|
|
|
|
}))
|
|
|
|
|
|
|
|
acct = self.env.conn2.account_name
|
|
|
|
# copy both with and without initial slash
|
|
|
|
for prefix in ('', '/'):
|
|
|
|
dest_filename = Utils.create_name()
|
|
|
|
|
|
|
|
file_item = self.env.container.file(source_filename)
|
|
|
|
file_item.copy_account(acct,
|
|
|
|
'%s%s' % (prefix, dest_cont),
|
|
|
|
dest_filename)
|
|
|
|
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(dest_filename, dest_cont.files())
|
2014-04-30 15:00:49 +03:00
|
|
|
|
|
|
|
file_item = dest_cont.file(dest_filename)
|
|
|
|
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(data == file_item.read())
|
|
|
|
self.assertTrue(file_item.initialize())
|
|
|
|
self.assertTrue(metadata == file_item.metadata)
|
2014-04-30 15:00:49 +03:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def testCopy404s(self):
|
|
|
|
source_filename = Utils.create_name()
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(source_filename)
|
|
|
|
file_item.write_random()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
dest_cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(dest_cont.create())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
for prefix in ('', '/'):
|
|
|
|
# invalid source container
|
|
|
|
source_cont = self.env.account.container(Utils.create_name())
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = source_cont.file(source_filename)
|
2015-07-28 10:31:54 +01:00
|
|
|
self.assertFalse(file_item.copy(
|
2013-08-31 20:25:25 -04:00
|
|
|
'%s%s' % (prefix, self.env.container),
|
|
|
|
Utils.create_name()))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
2015-07-28 10:31:54 +01:00
|
|
|
self.assertFalse(file_item.copy('%s%s' % (prefix, dest_cont),
|
|
|
|
Utils.create_name()))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
# invalid source object
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
2015-07-28 10:31:54 +01:00
|
|
|
self.assertFalse(file_item.copy(
|
2013-08-31 20:25:25 -04:00
|
|
|
'%s%s' % (prefix, self.env.container),
|
|
|
|
Utils.create_name()))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
2015-07-28 10:31:54 +01:00
|
|
|
self.assertFalse(file_item.copy('%s%s' % (prefix, dest_cont),
|
|
|
|
Utils.create_name()))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
# invalid destination container
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(source_filename)
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(
|
|
|
|
not file_item.copy(
|
|
|
|
'%s%s' % (prefix, Utils.create_name()),
|
|
|
|
Utils.create_name()))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2014-04-30 15:00:49 +03:00
|
|
|
def testCopyAccount404s(self):
|
|
|
|
acct = self.env.conn.account_name
|
|
|
|
acct2 = self.env.conn2.account_name
|
|
|
|
source_filename = Utils.create_name()
|
|
|
|
file_item = self.env.container.file(source_filename)
|
|
|
|
file_item.write_random()
|
|
|
|
|
|
|
|
dest_cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(dest_cont.create(hdrs={
|
2014-04-30 15:00:49 +03:00
|
|
|
'X-Container-Read': self.env.conn2.user_acl
|
|
|
|
}))
|
|
|
|
dest_cont2 = self.env.account2.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(dest_cont2.create(hdrs={
|
2014-04-30 15:00:49 +03:00
|
|
|
'X-Container-Write': self.env.conn.user_acl,
|
|
|
|
'X-Container-Read': self.env.conn.user_acl
|
|
|
|
}))
|
|
|
|
|
|
|
|
for acct, cont in ((acct, dest_cont), (acct2, dest_cont2)):
|
|
|
|
for prefix in ('', '/'):
|
|
|
|
# invalid source container
|
|
|
|
source_cont = self.env.account.container(Utils.create_name())
|
|
|
|
file_item = source_cont.file(source_filename)
|
2015-07-28 10:31:54 +01:00
|
|
|
self.assertFalse(file_item.copy_account(
|
2014-04-30 15:00:49 +03:00
|
|
|
acct,
|
|
|
|
'%s%s' % (prefix, self.env.container),
|
|
|
|
Utils.create_name()))
|
2015-02-18 11:59:31 +05:30
|
|
|
# there is no such source container but user has
|
|
|
|
# permissions to do a GET (done internally via COPY) for
|
|
|
|
# objects in his own account.
|
|
|
|
self.assert_status(404)
|
2014-04-30 15:00:49 +03:00
|
|
|
|
2015-07-28 10:31:54 +01:00
|
|
|
self.assertFalse(file_item.copy_account(
|
2014-04-30 15:00:49 +03:00
|
|
|
acct,
|
|
|
|
'%s%s' % (prefix, cont),
|
|
|
|
Utils.create_name()))
|
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
# invalid source object
|
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
2015-07-28 10:31:54 +01:00
|
|
|
self.assertFalse(file_item.copy_account(
|
2014-04-30 15:00:49 +03:00
|
|
|
acct,
|
|
|
|
'%s%s' % (prefix, self.env.container),
|
|
|
|
Utils.create_name()))
|
2015-02-18 11:59:31 +05:30
|
|
|
# there is no such source container but user has
|
|
|
|
# permissions to do a GET (done internally via COPY) for
|
|
|
|
# objects in his own account.
|
|
|
|
self.assert_status(404)
|
2014-04-30 15:00:49 +03:00
|
|
|
|
2015-07-28 10:31:54 +01:00
|
|
|
self.assertFalse(file_item.copy_account(
|
2014-04-30 15:00:49 +03:00
|
|
|
acct,
|
|
|
|
'%s%s' % (prefix, cont),
|
|
|
|
Utils.create_name()))
|
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
# invalid destination container
|
|
|
|
file_item = self.env.container.file(source_filename)
|
2015-07-28 10:31:54 +01:00
|
|
|
self.assertFalse(file_item.copy_account(
|
2014-04-30 15:00:49 +03:00
|
|
|
acct,
|
|
|
|
'%s%s' % (prefix, Utils.create_name()),
|
|
|
|
Utils.create_name()))
|
|
|
|
if acct == acct2:
|
|
|
|
# there is no such destination container
|
|
|
|
# and foreign user can have no permission to write there
|
|
|
|
self.assert_status(403)
|
|
|
|
else:
|
|
|
|
self.assert_status(404)
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def testCopyNoDestinationHeader(self):
|
|
|
|
source_filename = Utils.create_name()
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(source_filename)
|
|
|
|
file_item.write_random()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(source_filename)
|
2015-07-28 10:31:54 +01:00
|
|
|
self.assertFalse(file_item.copy(Utils.create_name(),
|
|
|
|
Utils.create_name(),
|
|
|
|
cfg={'no_destination': True}))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
|
|
|
|
|
|
|
def testCopyDestinationSlashProblems(self):
|
|
|
|
source_filename = Utils.create_name()
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(source_filename)
|
|
|
|
file_item.write_random()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
# no slash
|
2015-07-28 10:31:54 +01:00
|
|
|
self.assertFalse(file_item.copy(Utils.create_name(),
|
|
|
|
Utils.create_name(),
|
|
|
|
cfg={'destination': Utils.create_name()}))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
|
|
|
|
|
|
|
def testCopyFromHeader(self):
|
|
|
|
source_filename = Utils.create_name()
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(source_filename)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
metadata = {}
|
|
|
|
for i in range(1):
|
2012-10-23 09:48:24 +02:00
|
|
|
metadata[Utils.create_ascii_name()] = Utils.create_name()
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.metadata = metadata
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
data = file_item.write_random()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
dest_cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(dest_cont.create())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
# copy both from within and across containers
|
|
|
|
for cont in (self.env.container, dest_cont):
|
|
|
|
# copy both with and without initial slash
|
|
|
|
for prefix in ('', '/'):
|
|
|
|
dest_filename = Utils.create_name()
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cont.file(dest_filename)
|
2013-08-31 20:25:25 -04:00
|
|
|
file_item.write(hdrs={'X-Copy-From': '%s%s/%s' % (
|
|
|
|
prefix, self.env.container.name, source_filename)})
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(dest_filename, cont.files())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cont.file(dest_filename)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(data == file_item.read())
|
|
|
|
self.assertTrue(file_item.initialize())
|
|
|
|
self.assertTrue(metadata == file_item.metadata)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2014-04-30 15:00:49 +03:00
|
|
|
def testCopyFromAccountHeader(self):
|
|
|
|
acct = self.env.conn.account_name
|
|
|
|
src_cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(src_cont.create(hdrs={
|
2014-04-30 15:00:49 +03:00
|
|
|
'X-Container-Read': self.env.conn2.user_acl
|
|
|
|
}))
|
|
|
|
source_filename = Utils.create_name()
|
|
|
|
file_item = src_cont.file(source_filename)
|
|
|
|
|
|
|
|
metadata = {}
|
|
|
|
for i in range(1):
|
|
|
|
metadata[Utils.create_ascii_name()] = Utils.create_name()
|
|
|
|
file_item.metadata = metadata
|
|
|
|
|
|
|
|
data = file_item.write_random()
|
|
|
|
|
|
|
|
dest_cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(dest_cont.create())
|
2014-04-30 15:00:49 +03:00
|
|
|
dest_cont2 = self.env.account2.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(dest_cont2.create(hdrs={
|
2014-04-30 15:00:49 +03:00
|
|
|
'X-Container-Write': self.env.conn.user_acl
|
|
|
|
}))
|
|
|
|
|
|
|
|
for cont in (src_cont, dest_cont, dest_cont2):
|
|
|
|
# copy both with and without initial slash
|
|
|
|
for prefix in ('', '/'):
|
|
|
|
dest_filename = Utils.create_name()
|
|
|
|
|
|
|
|
file_item = cont.file(dest_filename)
|
|
|
|
file_item.write(hdrs={'X-Copy-From-Account': acct,
|
|
|
|
'X-Copy-From': '%s%s/%s' % (
|
|
|
|
prefix,
|
|
|
|
src_cont.name,
|
|
|
|
source_filename)})
|
|
|
|
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(dest_filename, cont.files())
|
2014-04-30 15:00:49 +03:00
|
|
|
|
|
|
|
file_item = cont.file(dest_filename)
|
|
|
|
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(data == file_item.read())
|
|
|
|
self.assertTrue(file_item.initialize())
|
|
|
|
self.assertTrue(metadata == file_item.metadata)
|
2014-04-30 15:00:49 +03:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def testCopyFromHeader404s(self):
|
|
|
|
source_filename = Utils.create_name()
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(source_filename)
|
|
|
|
file_item.write_random()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
for prefix in ('', '/'):
|
|
|
|
# invalid source container
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
2015-07-30 00:28:44 +02:00
|
|
|
copy_from = ('%s%s/%s'
|
|
|
|
% (prefix, Utils.create_name(), source_filename))
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.write,
|
2015-07-30 00:28:44 +02:00
|
|
|
hdrs={'X-Copy-From': copy_from})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
# invalid source object
|
2015-07-30 00:28:44 +02:00
|
|
|
copy_from = ('%s%s/%s'
|
|
|
|
% (prefix, self.env.container.name,
|
|
|
|
Utils.create_name()))
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.write,
|
2015-07-30 00:28:44 +02:00
|
|
|
hdrs={'X-Copy-From': copy_from})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
# invalid destination container
|
|
|
|
dest_cont = self.env.account.container(Utils.create_name())
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = dest_cont.file(Utils.create_name())
|
2015-07-30 00:28:44 +02:00
|
|
|
copy_from = ('%s%s/%s'
|
|
|
|
% (prefix, self.env.container.name, source_filename))
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.write,
|
2015-07-30 00:28:44 +02:00
|
|
|
hdrs={'X-Copy-From': copy_from})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
2014-04-30 15:00:49 +03:00
|
|
|
def testCopyFromAccountHeader404s(self):
|
|
|
|
acct = self.env.conn2.account_name
|
|
|
|
src_cont = self.env.account2.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(src_cont.create(hdrs={
|
2014-04-30 15:00:49 +03:00
|
|
|
'X-Container-Read': self.env.conn.user_acl
|
|
|
|
}))
|
|
|
|
source_filename = Utils.create_name()
|
|
|
|
file_item = src_cont.file(source_filename)
|
|
|
|
file_item.write_random()
|
|
|
|
dest_cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(dest_cont.create())
|
2014-04-30 15:00:49 +03:00
|
|
|
|
|
|
|
for prefix in ('', '/'):
|
|
|
|
# invalid source container
|
|
|
|
file_item = dest_cont.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.write,
|
|
|
|
hdrs={'X-Copy-From-Account': acct,
|
|
|
|
'X-Copy-From': '%s%s/%s' %
|
|
|
|
(prefix,
|
|
|
|
Utils.create_name(),
|
|
|
|
source_filename)})
|
|
|
|
# looks like cached responses leak "not found"
|
|
|
|
# to un-authorized users, not going to fix it now, but...
|
|
|
|
self.assert_status([403, 404])
|
|
|
|
|
|
|
|
# invalid source object
|
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.write,
|
|
|
|
hdrs={'X-Copy-From-Account': acct,
|
|
|
|
'X-Copy-From': '%s%s/%s' %
|
|
|
|
(prefix,
|
|
|
|
src_cont,
|
|
|
|
Utils.create_name())})
|
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
# invalid destination container
|
|
|
|
dest_cont = self.env.account.container(Utils.create_name())
|
|
|
|
file_item = dest_cont.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.write,
|
|
|
|
hdrs={'X-Copy-From-Account': acct,
|
|
|
|
'X-Copy-From': '%s%s/%s' %
|
|
|
|
(prefix,
|
|
|
|
src_cont,
|
|
|
|
source_filename)})
|
|
|
|
self.assert_status(404)
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def testNameLimit(self):
|
2012-09-05 20:49:50 -07:00
|
|
|
limit = load_constraint('max_object_name_length')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
for l in (1, 10, limit / 2, limit - 1, limit, limit + 1, limit * 2):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file('a' * l)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
if l <= limit:
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.write())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
|
|
|
else:
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.write)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(400)
|
|
|
|
|
|
|
|
def testQuestionMarkInName(self):
|
|
|
|
if Utils.create_name == Utils.create_ascii_name:
|
|
|
|
file_name = list(Utils.create_name())
|
2012-09-03 23:30:52 +08:00
|
|
|
file_name[random.randint(2, len(file_name) - 2)] = '?'
|
2010-07-12 17:03:45 -05:00
|
|
|
file_name = "".join(file_name)
|
|
|
|
else:
|
|
|
|
file_name = Utils.create_name(6) + '?' + Utils.create_name(6)
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(file_name)
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.write(cfg={'no_path_quote': True}))
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertNotIn(file_name, self.env.container.files())
|
|
|
|
self.assertIn(file_name.split('?')[0], self.env.container.files())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testDeleteThen404s(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.write_random())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
|
|
|
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.delete())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(204)
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.metadata = {Utils.create_ascii_name(): Utils.create_name()}
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for method in (file_item.info,
|
|
|
|
file_item.read,
|
|
|
|
file_item.sync_metadata,
|
|
|
|
file_item.delete):
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assertRaises(ResponseError, method)
|
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
def testBlankMetadataName(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
file_item.metadata = {'': Utils.create_name()}
|
|
|
|
self.assertRaises(ResponseError, file_item.write_random)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(400)
|
|
|
|
|
|
|
|
def testMetadataNumberLimit(self):
|
2012-09-05 20:49:50 -07:00
|
|
|
number_limit = load_constraint('max_meta_count')
|
|
|
|
size_limit = load_constraint('max_meta_overall_size')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
for i in (number_limit - 10, number_limit - 1, number_limit,
|
|
|
|
number_limit + 1, number_limit + 10, number_limit + 100):
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
j = size_limit / (i * 2)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
size = 0
|
|
|
|
metadata = {}
|
|
|
|
while len(metadata.keys()) < i:
|
2012-10-23 09:48:24 +02:00
|
|
|
key = Utils.create_ascii_name()
|
2010-07-12 17:03:45 -05:00
|
|
|
val = Utils.create_name()
|
|
|
|
|
|
|
|
if len(key) > j:
|
|
|
|
key = key[:j]
|
|
|
|
val = val[:j]
|
|
|
|
|
|
|
|
size += len(key) + len(val)
|
|
|
|
metadata[key] = val
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
file_item.metadata = metadata
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
if i <= number_limit:
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.write())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.sync_metadata())
|
2011-06-08 04:19:34 +00:00
|
|
|
self.assert_status((201, 202))
|
2010-07-12 17:03:45 -05:00
|
|
|
else:
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.write)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(400)
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.metadata = {}
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.write())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.metadata = metadata
|
|
|
|
self.assertRaises(ResponseError, file_item.sync_metadata)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(400)
|
|
|
|
|
|
|
|
def testContentTypeGuessing(self):
|
|
|
|
file_types = {'wav': 'audio/x-wav', 'txt': 'text/plain',
|
|
|
|
'zip': 'application/zip'}
|
|
|
|
|
|
|
|
container = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(container.create())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
for i in file_types.keys():
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = container.file(Utils.create_name() + '.' + i)
|
|
|
|
file_item.write('', cfg={'no_content_type': True})
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
file_types_read = {}
|
|
|
|
for i in container.files(parms={'format': 'json'}):
|
|
|
|
file_types_read[i['name'].split('.')[1]] = i['content_type']
|
|
|
|
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(file_types, file_types_read)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testRangedGets(self):
|
2015-03-31 22:35:37 -07:00
|
|
|
# We set the file_length to a strange multiple here. This is to check
|
|
|
|
# that ranges still work in the EC case when the requested range
|
|
|
|
# spans EC segment boundaries. The 1 MiB base value is chosen because
|
|
|
|
# that's a common EC segment size. The 1.33 multiple is to ensure we
|
|
|
|
# aren't aligned on segment boundaries
|
|
|
|
file_length = int(1048576 * 1.33)
|
2012-09-03 23:30:52 +08:00
|
|
|
range_size = file_length / 10
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
data = file_item.write_random(file_length)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
for i in range(0, file_length, range_size):
|
2012-09-03 23:30:52 +08:00
|
|
|
range_string = 'bytes=%d-%d' % (i, i + range_size - 1)
|
2010-07-12 17:03:45 -05:00
|
|
|
hdrs = {'Range': range_string}
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(
|
|
|
|
data[i: i + range_size] == file_item.read(hdrs=hdrs),
|
|
|
|
range_string)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
range_string = 'bytes=-%d' % (i)
|
|
|
|
hdrs = {'Range': range_string}
|
2012-10-03 14:20:52 -07:00
|
|
|
if i == 0:
|
|
|
|
# RFC 2616 14.35.1
|
|
|
|
# "If a syntactically valid byte-range-set includes ... at
|
|
|
|
# least one suffix-byte-range-spec with a NON-ZERO
|
|
|
|
# suffix-length, then the byte-range-set is satisfiable.
|
|
|
|
# Otherwise, the byte-range-set is unsatisfiable.
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
2012-10-03 14:20:52 -07:00
|
|
|
self.assert_status(416)
|
|
|
|
else:
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(file_item.read(hdrs=hdrs), data[-i:])
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assert_header('etag', file_item.md5)
|
|
|
|
self.assert_header('accept-ranges', 'bytes')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
range_string = 'bytes=%d-' % (i)
|
|
|
|
hdrs = {'Range': range_string}
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assertEqual(
|
|
|
|
file_item.read(hdrs=hdrs), data[i - file_length:],
|
2015-07-21 18:06:32 +05:30
|
|
|
range_string)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
range_string = 'bytes=%d-%d' % (file_length + 1000, file_length + 2000)
|
2010-07-12 17:03:45 -05:00
|
|
|
hdrs = {'Range': range_string}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(416)
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assert_header('etag', file_item.md5)
|
|
|
|
self.assert_header('accept-ranges', 'bytes')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
range_string = 'bytes=%d-%d' % (file_length - 1000, file_length + 2000)
|
2010-07-12 17:03:45 -05:00
|
|
|
hdrs = {'Range': range_string}
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assertEqual(file_item.read(hdrs=hdrs), data[-1000:], range_string)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
hdrs = {'Range': '0-4'}
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assertEqual(file_item.read(hdrs=hdrs), data, '0-4')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-10-03 14:20:52 -07:00
|
|
|
# RFC 2616 14.35.1
|
|
|
|
# "If the entity is shorter than the specified suffix-length, the
|
|
|
|
# entire entity-body is used."
|
|
|
|
range_string = 'bytes=-%d' % (file_length + 10)
|
|
|
|
hdrs = {'Range': range_string}
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assertEqual(file_item.read(hdrs=hdrs), data, range_string)
|
2012-10-03 14:20:52 -07:00
|
|
|
|
2015-03-25 14:59:43 -07:00
|
|
|
def testMultiRangeGets(self):
|
|
|
|
file_length = 10000
|
|
|
|
range_size = file_length / 10
|
|
|
|
subrange_size = range_size / 10
|
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
data = file_item.write_random(
|
2015-11-26 14:46:01 +00:00
|
|
|
file_length, hdrs={"Content-Type":
|
|
|
|
"lovecraft/rugose; squamous=true"})
|
2015-03-25 14:59:43 -07:00
|
|
|
|
|
|
|
for i in range(0, file_length, range_size):
|
|
|
|
range_string = 'bytes=%d-%d,%d-%d,%d-%d' % (
|
|
|
|
i, i + subrange_size - 1,
|
|
|
|
i + 2 * subrange_size, i + 3 * subrange_size - 1,
|
|
|
|
i + 4 * subrange_size, i + 5 * subrange_size - 1)
|
|
|
|
hdrs = {'Range': range_string}
|
|
|
|
|
|
|
|
fetched = file_item.read(hdrs=hdrs)
|
2015-11-26 14:46:01 +00:00
|
|
|
self.assert_status(206)
|
2015-03-25 14:59:43 -07:00
|
|
|
content_type = file_item.content_type
|
2015-11-26 14:46:01 +00:00
|
|
|
self.assertTrue(content_type.startswith("multipart/byteranges"))
|
|
|
|
self.assertIsNone(file_item.content_range)
|
2015-03-25 14:59:43 -07:00
|
|
|
|
|
|
|
# email.parser.FeedParser wants a message with headers on the
|
|
|
|
# front, then two CRLFs, and then a body (like emails have but
|
|
|
|
# HTTP response bodies don't). We fake it out by constructing a
|
|
|
|
# one-header preamble containing just the Content-Type, then
|
|
|
|
# feeding in the response body.
|
|
|
|
parser = email.parser.FeedParser()
|
|
|
|
parser.feed("Content-Type: %s\r\n\r\n" % content_type)
|
|
|
|
parser.feed(fetched)
|
|
|
|
root_message = parser.close()
|
|
|
|
self.assertTrue(root_message.is_multipart())
|
|
|
|
|
|
|
|
byteranges = root_message.get_payload()
|
|
|
|
self.assertEqual(len(byteranges), 3)
|
|
|
|
|
2015-11-26 14:46:01 +00:00
|
|
|
self.assertEqual(byteranges[0]['Content-Type'],
|
|
|
|
"lovecraft/rugose; squamous=true")
|
2015-03-25 14:59:43 -07:00
|
|
|
self.assertEqual(
|
|
|
|
byteranges[0]['Content-Range'],
|
|
|
|
"bytes %d-%d/%d" % (i, i + subrange_size - 1, file_length))
|
|
|
|
self.assertEqual(
|
|
|
|
byteranges[0].get_payload(),
|
|
|
|
data[i:(i + subrange_size)])
|
|
|
|
|
2015-11-26 14:46:01 +00:00
|
|
|
self.assertEqual(byteranges[1]['Content-Type'],
|
|
|
|
"lovecraft/rugose; squamous=true")
|
2015-03-25 14:59:43 -07:00
|
|
|
self.assertEqual(
|
|
|
|
byteranges[1]['Content-Range'],
|
|
|
|
"bytes %d-%d/%d" % (i + 2 * subrange_size,
|
|
|
|
i + 3 * subrange_size - 1, file_length))
|
|
|
|
self.assertEqual(
|
|
|
|
byteranges[1].get_payload(),
|
|
|
|
data[(i + 2 * subrange_size):(i + 3 * subrange_size)])
|
|
|
|
|
2015-11-26 14:46:01 +00:00
|
|
|
self.assertEqual(byteranges[2]['Content-Type'],
|
|
|
|
"lovecraft/rugose; squamous=true")
|
2015-03-25 14:59:43 -07:00
|
|
|
self.assertEqual(
|
|
|
|
byteranges[2]['Content-Range'],
|
|
|
|
"bytes %d-%d/%d" % (i + 4 * subrange_size,
|
|
|
|
i + 5 * subrange_size - 1, file_length))
|
|
|
|
self.assertEqual(
|
|
|
|
byteranges[2].get_payload(),
|
|
|
|
data[(i + 4 * subrange_size):(i + 5 * subrange_size)])
|
|
|
|
|
|
|
|
# The first two ranges are satisfiable but the third is not; the
|
|
|
|
# result is a multipart/byteranges response containing only the two
|
|
|
|
# satisfiable byteranges.
|
|
|
|
range_string = 'bytes=%d-%d,%d-%d,%d-%d' % (
|
|
|
|
0, subrange_size - 1,
|
|
|
|
2 * subrange_size, 3 * subrange_size - 1,
|
|
|
|
file_length, file_length + subrange_size - 1)
|
|
|
|
hdrs = {'Range': range_string}
|
|
|
|
fetched = file_item.read(hdrs=hdrs)
|
2015-11-26 14:46:01 +00:00
|
|
|
self.assert_status(206)
|
2015-03-25 14:59:43 -07:00
|
|
|
content_type = file_item.content_type
|
2015-11-26 14:46:01 +00:00
|
|
|
self.assertTrue(content_type.startswith("multipart/byteranges"))
|
|
|
|
self.assertIsNone(file_item.content_range)
|
2015-03-25 14:59:43 -07:00
|
|
|
|
|
|
|
parser = email.parser.FeedParser()
|
|
|
|
parser.feed("Content-Type: %s\r\n\r\n" % content_type)
|
|
|
|
parser.feed(fetched)
|
|
|
|
root_message = parser.close()
|
|
|
|
|
|
|
|
self.assertTrue(root_message.is_multipart())
|
|
|
|
byteranges = root_message.get_payload()
|
|
|
|
self.assertEqual(len(byteranges), 2)
|
|
|
|
|
2015-11-26 14:46:01 +00:00
|
|
|
self.assertEqual(byteranges[0]['Content-Type'],
|
|
|
|
"lovecraft/rugose; squamous=true")
|
2015-03-25 14:59:43 -07:00
|
|
|
self.assertEqual(
|
|
|
|
byteranges[0]['Content-Range'],
|
|
|
|
"bytes %d-%d/%d" % (0, subrange_size - 1, file_length))
|
|
|
|
self.assertEqual(byteranges[0].get_payload(), data[:subrange_size])
|
|
|
|
|
2015-11-26 14:46:01 +00:00
|
|
|
self.assertEqual(byteranges[1]['Content-Type'],
|
|
|
|
"lovecraft/rugose; squamous=true")
|
2015-03-25 14:59:43 -07:00
|
|
|
self.assertEqual(
|
|
|
|
byteranges[1]['Content-Range'],
|
|
|
|
"bytes %d-%d/%d" % (2 * subrange_size, 3 * subrange_size - 1,
|
|
|
|
file_length))
|
|
|
|
self.assertEqual(
|
|
|
|
byteranges[1].get_payload(),
|
|
|
|
data[(2 * subrange_size):(3 * subrange_size)])
|
|
|
|
|
|
|
|
# The first range is satisfiable but the second is not; the
|
|
|
|
# result is either a multipart/byteranges response containing one
|
|
|
|
# byterange or a normal, non-MIME 206 response.
|
|
|
|
range_string = 'bytes=%d-%d,%d-%d' % (
|
|
|
|
0, subrange_size - 1,
|
|
|
|
file_length, file_length + subrange_size - 1)
|
|
|
|
hdrs = {'Range': range_string}
|
|
|
|
fetched = file_item.read(hdrs=hdrs)
|
2015-11-26 14:46:01 +00:00
|
|
|
self.assert_status(206)
|
2015-03-25 14:59:43 -07:00
|
|
|
content_type = file_item.content_type
|
|
|
|
if content_type.startswith("multipart/byteranges"):
|
2015-11-26 14:46:01 +00:00
|
|
|
self.assertIsNone(file_item.content_range)
|
2015-03-25 14:59:43 -07:00
|
|
|
parser = email.parser.FeedParser()
|
|
|
|
parser.feed("Content-Type: %s\r\n\r\n" % content_type)
|
|
|
|
parser.feed(fetched)
|
|
|
|
root_message = parser.close()
|
|
|
|
|
|
|
|
self.assertTrue(root_message.is_multipart())
|
|
|
|
byteranges = root_message.get_payload()
|
|
|
|
self.assertEqual(len(byteranges), 1)
|
|
|
|
|
2015-11-26 14:46:01 +00:00
|
|
|
self.assertEqual(byteranges[0]['Content-Type'],
|
|
|
|
"lovecraft/rugose; squamous=true")
|
2015-03-25 14:59:43 -07:00
|
|
|
self.assertEqual(
|
|
|
|
byteranges[0]['Content-Range'],
|
|
|
|
"bytes %d-%d/%d" % (0, subrange_size - 1, file_length))
|
|
|
|
self.assertEqual(byteranges[0].get_payload(), data[:subrange_size])
|
|
|
|
else:
|
|
|
|
self.assertEqual(
|
2015-11-26 14:46:01 +00:00
|
|
|
file_item.content_range,
|
2015-03-25 14:59:43 -07:00
|
|
|
"bytes %d-%d/%d" % (0, subrange_size - 1, file_length))
|
2015-11-26 14:46:01 +00:00
|
|
|
self.assertEqual(content_type, "lovecraft/rugose; squamous=true")
|
2015-03-25 14:59:43 -07:00
|
|
|
self.assertEqual(fetched, data[:subrange_size])
|
|
|
|
|
|
|
|
# No byterange is satisfiable, so we get a 416 response.
|
|
|
|
range_string = 'bytes=%d-%d,%d-%d' % (
|
|
|
|
file_length, file_length + 2,
|
|
|
|
file_length + 100, file_length + 102)
|
|
|
|
hdrs = {'Range': range_string}
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
|
|
|
self.assert_status(416)
|
|
|
|
|
2012-06-06 03:39:53 +09:00
|
|
|
def testRangedGetsWithLWSinHeader(self):
|
|
|
|
file_length = 10000
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
data = file_item.write_random(file_length)
|
2012-06-06 03:39:53 +09:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
for r in ('BYTES=0-999', 'bytes = 0-999', 'BYTES = 0 - 999',
|
2012-09-03 23:30:52 +08:00
|
|
|
'bytes = 0 - 999', 'bytes=0 - 999', 'bytes=0-999 '):
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.read(hdrs={'Range': r}) == data[0:1000])
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testFileSizeLimit(self):
|
2012-09-05 20:49:50 -07:00
|
|
|
limit = load_constraint('max_file_size')
|
2010-07-12 17:03:45 -05:00
|
|
|
tsecs = 3
|
|
|
|
|
2014-04-07 13:01:44 -04:00
|
|
|
def timeout(seconds, method, *args, **kwargs):
|
|
|
|
try:
|
|
|
|
with eventlet.Timeout(seconds):
|
|
|
|
method(*args, **kwargs)
|
|
|
|
except eventlet.Timeout:
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
for i in (limit - 100, limit - 10, limit - 1, limit, limit + 1,
|
|
|
|
limit + 10, limit + 100):
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
if i <= limit:
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(timeout(tsecs, file_item.write,
|
|
|
|
cfg={'set_content_length': i}))
|
2010-07-12 17:03:45 -05:00
|
|
|
else:
|
|
|
|
self.assertRaises(ResponseError, timeout, tsecs,
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.write,
|
|
|
|
cfg={'set_content_length': i})
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testNoContentLengthForPut(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.write, 'testing',
|
2012-09-03 23:30:52 +08:00
|
|
|
cfg={'no_content_length': True})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(411)
|
|
|
|
|
|
|
|
def testDelete(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
file_item.write_random(self.env.file_size)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(file_item.name, self.env.container.files())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.delete())
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertNotIn(file_item.name, self.env.container.files())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testBadHeaders(self):
|
|
|
|
file_length = 100
|
|
|
|
|
|
|
|
# no content type on puts should be ok
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
file_item.write_random(file_length, cfg={'no_content_type': True})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
|
|
|
|
|
|
|
# content length x
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.write_random, file_length,
|
2012-09-03 23:30:52 +08:00
|
|
|
hdrs={'Content-Length': 'X'},
|
|
|
|
cfg={'no_content_length': True})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(400)
|
|
|
|
|
2014-08-21 10:33:30 -04:00
|
|
|
# no content-length
|
|
|
|
self.assertRaises(ResponseError, file_item.write_random, file_length,
|
|
|
|
cfg={'no_content_length': True})
|
|
|
|
self.assert_status(411)
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, file_item.write_random, file_length,
|
|
|
|
hdrs={'transfer-encoding': 'gzip,chunked'},
|
|
|
|
cfg={'no_content_length': True})
|
|
|
|
self.assert_status(501)
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
# bad request types
|
2015-07-30 00:16:06 +02:00
|
|
|
# for req in ('LICK', 'GETorHEAD_base', 'container_info',
|
|
|
|
# 'best_response'):
|
2010-09-02 21:50:16 -07:00
|
|
|
for req in ('LICK', 'GETorHEAD_base'):
|
2010-07-12 17:03:45 -05:00
|
|
|
self.env.account.conn.make_request(req)
|
|
|
|
self.assert_status(405)
|
|
|
|
|
|
|
|
# bad range headers
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(
|
|
|
|
len(file_item.read(hdrs={'Range': 'parsecs=8-12'})) ==
|
|
|
|
file_length)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(200)
|
|
|
|
|
|
|
|
def testMetadataLengthLimits(self):
|
2012-09-05 20:49:50 -07:00
|
|
|
key_limit = load_constraint('max_meta_name_length')
|
|
|
|
value_limit = load_constraint('max_meta_value_length')
|
2012-09-03 23:30:52 +08:00
|
|
|
lengths = [[key_limit, value_limit], [key_limit, value_limit + 1],
|
|
|
|
[key_limit + 1, value_limit], [key_limit, 0],
|
|
|
|
[key_limit, value_limit * 10],
|
|
|
|
[key_limit * 10, value_limit]]
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
for l in lengths:
|
2012-09-03 23:30:52 +08:00
|
|
|
metadata = {'a' * l[0]: 'b' * l[1]}
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
file_item.metadata = metadata
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
if l[0] <= key_limit and l[1] <= value_limit:
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.write())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.sync_metadata())
|
2010-07-12 17:03:45 -05:00
|
|
|
else:
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.write)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(400)
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.metadata = {}
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.write())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.metadata = metadata
|
|
|
|
self.assertRaises(ResponseError, file_item.sync_metadata)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(400)
|
|
|
|
|
|
|
|
def testEtagWayoff(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
2010-07-12 17:03:45 -05:00
|
|
|
hdrs = {'etag': 'reallylonganddefinitelynotavalidetagvalue'}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.write_random, hdrs=hdrs)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(422)
|
|
|
|
|
|
|
|
def testFileCreate(self):
|
|
|
|
for i in range(10):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
data = file_item.write_random()
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(data == file_item.read())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(200)
|
|
|
|
|
|
|
|
def testHead(self):
|
|
|
|
file_name = Utils.create_name()
|
|
|
|
content_type = Utils.create_name()
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(file_name)
|
|
|
|
file_item.content_type = content_type
|
|
|
|
file_item.write_random(self.env.file_size)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
md5 = file_item.md5
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(file_name)
|
|
|
|
info = file_item.info()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
self.assert_status(200)
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(info['content_length'], self.env.file_size)
|
|
|
|
self.assertEqual(info['etag'], md5)
|
|
|
|
self.assertEqual(info['content_type'], content_type)
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn('last_modified', info)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testDeleteOfFileThatDoesNotExist(self):
|
|
|
|
# in container that exists
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.delete)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
# in container that does not exist
|
|
|
|
container = self.env.account.container(Utils.create_name())
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = container.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.delete)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
def testHeadOnFileThatDoesNotExist(self):
|
|
|
|
# in container that exists
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.info)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
# in container that does not exist
|
|
|
|
container = self.env.account.container(Utils.create_name())
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = container.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.info)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
def testMetadataOnPost(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
file_item.write_random(self.env.file_size)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
for i in range(10):
|
|
|
|
metadata = {}
|
2013-08-13 21:57:51 +08:00
|
|
|
for j in range(10):
|
2012-10-23 09:48:24 +02:00
|
|
|
metadata[Utils.create_ascii_name()] = Utils.create_name()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.metadata = metadata
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.sync_metadata())
|
2011-06-08 04:19:34 +00:00
|
|
|
self.assert_status((201, 202))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(file_item.name)
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.initialize())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(200)
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(file_item.metadata, metadata)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testGetContentType(self):
|
|
|
|
file_name = Utils.create_name()
|
|
|
|
content_type = Utils.create_name()
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(file_name)
|
|
|
|
file_item.content_type = content_type
|
|
|
|
file_item.write_random()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(file_name)
|
|
|
|
file_item.read()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(content_type, file_item.content_type)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testGetOnFileThatDoesNotExist(self):
|
|
|
|
# in container that exists
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.read)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
# in container that does not exist
|
|
|
|
container = self.env.account.container(Utils.create_name())
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = container.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.read)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
def testPostOnFileThatDoesNotExist(self):
|
|
|
|
# in container that exists
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
file_item.metadata['Field'] = 'Value'
|
|
|
|
self.assertRaises(ResponseError, file_item.sync_metadata)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
# in container that does not exist
|
|
|
|
container = self.env.account.container(Utils.create_name())
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = container.file(Utils.create_name())
|
|
|
|
file_item.metadata['Field'] = 'Value'
|
|
|
|
self.assertRaises(ResponseError, file_item.sync_metadata)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
def testMetadataOnPut(self):
|
|
|
|
for i in range(10):
|
|
|
|
metadata = {}
|
|
|
|
for j in range(10):
|
2012-10-23 09:48:24 +02:00
|
|
|
metadata[Utils.create_ascii_name()] = Utils.create_name()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
file_item.metadata = metadata
|
|
|
|
file_item.write_random(self.env.file_size)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(file_item.name)
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.initialize())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(200)
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(file_item.metadata, metadata)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testSerialization(self):
|
|
|
|
container = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(container.create())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
files = []
|
|
|
|
for i in (0, 1, 10, 100, 1000, 10000):
|
2012-09-03 23:30:52 +08:00
|
|
|
files.append({'name': Utils.create_name(),
|
|
|
|
'content_type': Utils.create_name(), 'bytes': i})
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
write_time = time.time()
|
|
|
|
for f in files:
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = container.file(f['name'])
|
|
|
|
file_item.content_type = f['content_type']
|
|
|
|
file_item.write_random(f['bytes'])
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
f['hash'] = file_item.md5
|
2010-07-12 17:03:45 -05:00
|
|
|
f['json'] = False
|
|
|
|
f['xml'] = False
|
|
|
|
write_time = time.time() - write_time
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in ['json', 'xml']:
|
|
|
|
for file_item in container.files(parms={'format': format_type}):
|
2010-07-12 17:03:45 -05:00
|
|
|
found = False
|
|
|
|
for f in files:
|
2013-08-04 11:15:53 +08:00
|
|
|
if f['name'] != file_item['name']:
|
2010-07-12 17:03:45 -05:00
|
|
|
continue
|
|
|
|
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(file_item['content_type'],
|
|
|
|
f['content_type'])
|
|
|
|
self.assertEqual(int(file_item['bytes']), f['bytes'])
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
d = datetime.strptime(
|
|
|
|
file_item['last_modified'].split('.')[0],
|
|
|
|
"%Y-%m-%dT%H:%M:%S")
|
2010-07-12 17:03:45 -05:00
|
|
|
lm = time.mktime(d.timetuple())
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
if 'last_modified' in f:
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(f['last_modified'], lm)
|
2010-07-12 17:03:45 -05:00
|
|
|
else:
|
2012-09-03 23:30:52 +08:00
|
|
|
f['last_modified'] = lm
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
f[format_type] = True
|
2010-07-12 17:03:45 -05:00
|
|
|
found = True
|
|
|
|
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(
|
|
|
|
found, 'Unexpected file %s found in '
|
|
|
|
'%s listing' % (file_item['name'], format_type))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
headers = dict(self.env.conn.response.getheaders())
|
2013-08-04 11:15:53 +08:00
|
|
|
if format_type == 'json':
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(headers['content-type'],
|
|
|
|
'application/json; charset=utf-8')
|
2013-08-04 11:15:53 +08:00
|
|
|
elif format_type == 'xml':
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(headers['content-type'],
|
|
|
|
'application/xml; charset=utf-8')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
lm_diff = max([f['last_modified'] for f in files]) -\
|
2010-07-12 17:03:45 -05:00
|
|
|
min([f['last_modified'] for f in files])
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(
|
|
|
|
lm_diff < write_time + 1, 'Diff in last '
|
|
|
|
'modified times should be less than time to write files')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
for f in files:
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in ['json', 'xml']:
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(
|
|
|
|
f[format_type], 'File %s not found in %s listing'
|
|
|
|
% (f['name'], format_type))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testStackedOverwrite(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
for i in range(1, 11):
|
2013-08-04 11:15:53 +08:00
|
|
|
data = file_item.write_random(512)
|
|
|
|
file_item.write(data)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.read() == data)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testTooLongName(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file('x' * 1025)
|
|
|
|
self.assertRaises(ResponseError, file_item.write)
|
2012-09-03 23:30:52 +08:00
|
|
|
self.assert_status(400)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testZeroByteFile(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.write(''))
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(file_item.name, self.env.container.files())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.read() == '')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testEtagResponse(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2015-05-27 17:27:47 +02:00
|
|
|
data = six.StringIO(file_item.write_random(512))
|
2010-07-12 17:03:45 -05:00
|
|
|
etag = File.compute_md5sum(data)
|
|
|
|
|
|
|
|
headers = dict(self.env.conn.response.getheaders())
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn('etag', headers.keys())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
header_etag = headers['etag'].strip('"')
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(etag, header_etag)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testChunkedPut(self):
|
2014-03-31 23:22:49 -04:00
|
|
|
if (tf.web_front_end == 'apache2'):
|
|
|
|
raise SkipTest("Chunked PUT can only be tested with apache2 web"
|
|
|
|
" front end")
|
2014-04-07 13:01:44 -04:00
|
|
|
|
|
|
|
def chunks(s, length=3):
|
|
|
|
i, j = 0, length
|
|
|
|
while i < len(s):
|
|
|
|
yield s[i:j]
|
|
|
|
i, j = j, j + length
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
data = File.random_data(10000)
|
|
|
|
etag = File.compute_md5sum(data)
|
|
|
|
|
|
|
|
for i in (1, 10, 100, 1000):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
for j in chunks(data, i):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.chunked_write(j)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.chunked_write())
|
|
|
|
self.assertTrue(data == file_item.read())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
info = file_item.info()
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(etag, info['etag'])
|
2010-07-12 17:03:45 -05:00
|
|
|
|
Update container on fast-POST
This patch makes a number of changes to enable content-type
metadata to be updated when using the fast-POST mode of
operation, as proposed in the associated spec [1].
* the object server and diskfile are modified to allow
content-type to be updated by a POST and the updated value
to be stored in .meta files.
* the object server accepts PUTs and DELETEs with older
timestamps than existing .meta files. This is to be
consistent with replication that will leave a later .meta
file in place when replicating a .data file.
* the diskfile interface is modified to provide accessor
methods for the content-type and its timestamp.
* the naming of .meta files is modified to encode two
timestamps when the .meta file contains a content-type value
that was set prior to the latest metadata update; this
enables consistency to be achieved when rsync is used for
replication.
* ssync is modified to sync meta files when content-type
differs between local and remote copies of objects.
* the object server issues container updates when handling
POST requests, notifying the container server of the current
immutable metadata (etag, size, hash, swift_bytes),
content-type with their respective timestamps, and the
mutable metadata timestamp.
* the container server maintains the most recently reported
values for immutable metadata, content-type and mutable
metadata, each with their respective timestamps, in a single
db row.
* new probe tests verify that replication achieves eventual
consistency of containers and objects after discrete updates
to content-type and mutable metadata, and that container-sync
sync's objects after fast-post updates.
[1] spec change-id: I60688efc3df692d3a39557114dca8c5490f7837e
Change-Id: Ia597cd460bb5fd40aa92e886e3e18a7542603d01
2015-08-10 10:30:10 -05:00
|
|
|
def test_POST(self):
|
|
|
|
# verify consistency between object and container listing metadata
|
|
|
|
file_name = Utils.create_name()
|
|
|
|
file_item = self.env.container.file(file_name)
|
|
|
|
file_item.content_type = 'text/foobar'
|
|
|
|
file_item.write_random(1024)
|
|
|
|
|
|
|
|
# sanity check
|
|
|
|
file_item = self.env.container.file(file_name)
|
|
|
|
file_item.initialize()
|
|
|
|
self.assertEqual('text/foobar', file_item.content_type)
|
|
|
|
self.assertEqual(1024, file_item.size)
|
|
|
|
etag = file_item.etag
|
|
|
|
|
|
|
|
# check container listing is consistent
|
|
|
|
listing = self.env.container.files(parms={'format': 'json'})
|
|
|
|
for f_dict in listing:
|
|
|
|
if f_dict['name'] == file_name:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
self.fail('Failed to find file %r in listing' % file_name)
|
|
|
|
self.assertEqual(1024, f_dict['bytes'])
|
|
|
|
self.assertEqual('text/foobar', f_dict['content_type'])
|
|
|
|
self.assertEqual(etag, f_dict['hash'])
|
|
|
|
|
|
|
|
# now POST updated content-type to each file
|
|
|
|
file_item = self.env.container.file(file_name)
|
|
|
|
file_item.content_type = 'image/foobarbaz'
|
|
|
|
file_item.sync_metadata({'Test': 'blah'})
|
|
|
|
|
|
|
|
# sanity check object metadata
|
|
|
|
file_item = self.env.container.file(file_name)
|
|
|
|
file_item.initialize()
|
|
|
|
|
|
|
|
self.assertEqual(1024, file_item.size)
|
|
|
|
self.assertEqual('image/foobarbaz', file_item.content_type)
|
|
|
|
self.assertEqual(etag, file_item.etag)
|
|
|
|
self.assertIn('test', file_item.metadata)
|
|
|
|
|
|
|
|
# check for consistency between object and container listing
|
|
|
|
listing = self.env.container.files(parms={'format': 'json'})
|
|
|
|
for f_dict in listing:
|
|
|
|
if f_dict['name'] == file_name:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
self.fail('Failed to find file %r in listing' % file_name)
|
|
|
|
self.assertEqual(1024, f_dict['bytes'])
|
|
|
|
self.assertEqual('image/foobarbaz', f_dict['content_type'])
|
|
|
|
self.assertEqual(etag, f_dict['hash'])
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestFileUTF8(Base2, TestFile):
|
|
|
|
set_up = False
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2013-11-21 11:03:46 -08:00
|
|
|
class TestDloEnv(object):
|
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.conn = Connection(tf.config)
|
2013-11-21 11:03:46 -08:00
|
|
|
cls.conn.authenticate()
|
2015-12-16 17:19:24 +11:00
|
|
|
|
|
|
|
config2 = tf.config.copy()
|
|
|
|
config2['username'] = tf.config['username3']
|
|
|
|
config2['password'] = tf.config['password3']
|
|
|
|
cls.conn2 = Connection(config2)
|
|
|
|
cls.conn2.authenticate()
|
|
|
|
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.account = Account(cls.conn, tf.config.get('account',
|
|
|
|
tf.config['username']))
|
2013-11-21 11:03:46 -08:00
|
|
|
cls.account.delete_containers()
|
|
|
|
|
|
|
|
cls.container = cls.account.container(Utils.create_name())
|
2015-12-16 17:19:24 +11:00
|
|
|
cls.container2 = cls.account.container(Utils.create_name())
|
2013-11-21 11:03:46 -08:00
|
|
|
|
2015-12-16 17:19:24 +11:00
|
|
|
for cont in (cls.container, cls.container2):
|
|
|
|
if not cont.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
2013-11-21 11:03:46 -08:00
|
|
|
|
|
|
|
# avoid getting a prefix that stops halfway through an encoded
|
|
|
|
# character
|
|
|
|
prefix = Utils.create_name().decode("utf-8")[:10].encode("utf-8")
|
|
|
|
cls.segment_prefix = prefix
|
|
|
|
|
|
|
|
for letter in ('a', 'b', 'c', 'd', 'e'):
|
|
|
|
file_item = cls.container.file("%s/seg_lower%s" % (prefix, letter))
|
|
|
|
file_item.write(letter * 10)
|
|
|
|
|
|
|
|
file_item = cls.container.file("%s/seg_upper%s" % (prefix, letter))
|
|
|
|
file_item.write(letter.upper() * 10)
|
|
|
|
|
2015-12-16 17:19:24 +11:00
|
|
|
for letter in ('f', 'g', 'h', 'i', 'j'):
|
|
|
|
file_item = cls.container2.file("%s/seg_lower%s" %
|
|
|
|
(prefix, letter))
|
|
|
|
file_item.write(letter * 10)
|
|
|
|
|
2013-11-21 11:03:46 -08:00
|
|
|
man1 = cls.container.file("man1")
|
|
|
|
man1.write('man1-contents',
|
|
|
|
hdrs={"X-Object-Manifest": "%s/%s/seg_lower" %
|
|
|
|
(cls.container.name, prefix)})
|
|
|
|
|
2015-12-16 17:19:24 +11:00
|
|
|
man2 = cls.container.file("man2")
|
|
|
|
man2.write('man2-contents',
|
2013-11-21 11:03:46 -08:00
|
|
|
hdrs={"X-Object-Manifest": "%s/%s/seg_upper" %
|
|
|
|
(cls.container.name, prefix)})
|
|
|
|
|
|
|
|
manall = cls.container.file("manall")
|
|
|
|
manall.write('manall-contents',
|
|
|
|
hdrs={"X-Object-Manifest": "%s/%s/seg" %
|
|
|
|
(cls.container.name, prefix)})
|
|
|
|
|
2015-12-16 17:19:24 +11:00
|
|
|
mancont2 = cls.container.file("mancont2")
|
|
|
|
mancont2.write(
|
|
|
|
'mancont2-contents',
|
|
|
|
hdrs={"X-Object-Manifest": "%s/%s/seg_lower" %
|
|
|
|
(cls.container2.name, prefix)})
|
|
|
|
|
2013-11-21 11:03:46 -08:00
|
|
|
|
|
|
|
class TestDlo(Base):
|
|
|
|
env = TestDloEnv
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
def test_get_manifest(self):
|
|
|
|
file_item = self.env.container.file('man1')
|
|
|
|
file_contents = file_item.read()
|
|
|
|
self.assertEqual(
|
|
|
|
file_contents,
|
|
|
|
"aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeee")
|
|
|
|
|
|
|
|
file_item = self.env.container.file('man2')
|
|
|
|
file_contents = file_item.read()
|
|
|
|
self.assertEqual(
|
|
|
|
file_contents,
|
|
|
|
"AAAAAAAAAABBBBBBBBBBCCCCCCCCCCDDDDDDDDDDEEEEEEEEEE")
|
|
|
|
|
|
|
|
file_item = self.env.container.file('manall')
|
|
|
|
file_contents = file_item.read()
|
|
|
|
self.assertEqual(
|
|
|
|
file_contents,
|
|
|
|
("aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeee" +
|
|
|
|
"AAAAAAAAAABBBBBBBBBBCCCCCCCCCCDDDDDDDDDDEEEEEEEEEE"))
|
|
|
|
|
|
|
|
def test_get_manifest_document_itself(self):
|
|
|
|
file_item = self.env.container.file('man1')
|
|
|
|
file_contents = file_item.read(parms={'multipart-manifest': 'get'})
|
|
|
|
self.assertEqual(file_contents, "man1-contents")
|
2015-01-12 12:51:46 +05:30
|
|
|
self.assertEqual(file_item.info()['x_object_manifest'],
|
|
|
|
"%s/%s/seg_lower" %
|
|
|
|
(self.env.container.name, self.env.segment_prefix))
|
2013-11-21 11:03:46 -08:00
|
|
|
|
|
|
|
def test_get_range(self):
|
|
|
|
file_item = self.env.container.file('man1')
|
|
|
|
file_contents = file_item.read(size=25, offset=8)
|
|
|
|
self.assertEqual(file_contents, "aabbbbbbbbbbccccccccccddd")
|
|
|
|
|
|
|
|
file_contents = file_item.read(size=1, offset=47)
|
|
|
|
self.assertEqual(file_contents, "e")
|
|
|
|
|
|
|
|
def test_get_range_out_of_range(self):
|
|
|
|
file_item = self.env.container.file('man1')
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, file_item.read, size=7, offset=50)
|
|
|
|
self.assert_status(416)
|
|
|
|
|
|
|
|
def test_copy(self):
|
|
|
|
# Adding a new segment, copying the manifest, and then deleting the
|
|
|
|
# segment proves that the new object is really the concatenated
|
|
|
|
# segments and not just a manifest.
|
|
|
|
f_segment = self.env.container.file("%s/seg_lowerf" %
|
|
|
|
(self.env.segment_prefix))
|
|
|
|
f_segment.write('ffffffffff')
|
|
|
|
try:
|
|
|
|
man1_item = self.env.container.file('man1')
|
|
|
|
man1_item.copy(self.env.container.name, "copied-man1")
|
|
|
|
finally:
|
|
|
|
# try not to leave this around for other tests to stumble over
|
|
|
|
f_segment.delete()
|
|
|
|
|
|
|
|
file_item = self.env.container.file('copied-man1')
|
|
|
|
file_contents = file_item.read()
|
|
|
|
self.assertEqual(
|
|
|
|
file_contents,
|
|
|
|
"aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffff")
|
2015-01-12 12:51:46 +05:30
|
|
|
# The copied object must not have X-Object-Manifest
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertNotIn("x_object_manifest", file_item.info())
|
2013-11-21 11:03:46 -08:00
|
|
|
|
2014-04-30 15:00:49 +03:00
|
|
|
def test_copy_account(self):
|
|
|
|
# dlo use same account and same container only
|
|
|
|
acct = self.env.conn.account_name
|
|
|
|
# Adding a new segment, copying the manifest, and then deleting the
|
|
|
|
# segment proves that the new object is really the concatenated
|
|
|
|
# segments and not just a manifest.
|
|
|
|
f_segment = self.env.container.file("%s/seg_lowerf" %
|
|
|
|
(self.env.segment_prefix))
|
|
|
|
f_segment.write('ffffffffff')
|
|
|
|
try:
|
|
|
|
man1_item = self.env.container.file('man1')
|
|
|
|
man1_item.copy_account(acct,
|
|
|
|
self.env.container.name,
|
|
|
|
"copied-man1")
|
|
|
|
finally:
|
|
|
|
# try not to leave this around for other tests to stumble over
|
|
|
|
f_segment.delete()
|
|
|
|
|
|
|
|
file_item = self.env.container.file('copied-man1')
|
|
|
|
file_contents = file_item.read()
|
|
|
|
self.assertEqual(
|
|
|
|
file_contents,
|
|
|
|
"aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffff")
|
2015-01-12 12:51:46 +05:30
|
|
|
# The copied object must not have X-Object-Manifest
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertNotIn("x_object_manifest", file_item.info())
|
2014-04-30 15:00:49 +03:00
|
|
|
|
2014-02-27 22:38:53 -08:00
|
|
|
def test_copy_manifest(self):
|
2015-01-12 12:51:46 +05:30
|
|
|
# Copying the manifest with multipart-manifest=get query string
|
|
|
|
# should result in another manifest
|
2014-02-27 22:38:53 -08:00
|
|
|
try:
|
|
|
|
man1_item = self.env.container.file('man1')
|
|
|
|
man1_item.copy(self.env.container.name, "copied-man1",
|
|
|
|
parms={'multipart-manifest': 'get'})
|
|
|
|
|
|
|
|
copied = self.env.container.file("copied-man1")
|
|
|
|
copied_contents = copied.read(parms={'multipart-manifest': 'get'})
|
|
|
|
self.assertEqual(copied_contents, "man1-contents")
|
|
|
|
|
|
|
|
copied_contents = copied.read()
|
|
|
|
self.assertEqual(
|
|
|
|
copied_contents,
|
|
|
|
"aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeee")
|
2015-01-12 12:51:46 +05:30
|
|
|
self.assertEqual(man1_item.info()['x_object_manifest'],
|
|
|
|
copied.info()['x_object_manifest'])
|
2014-02-27 22:38:53 -08:00
|
|
|
finally:
|
|
|
|
# try not to leave this around for other tests to stumble over
|
|
|
|
self.env.container.file("copied-man1").delete()
|
|
|
|
|
2014-02-20 23:01:00 -08:00
|
|
|
def test_dlo_if_match_get(self):
|
|
|
|
manifest = self.env.container.file("man1")
|
|
|
|
etag = manifest.info()['etag']
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, manifest.read,
|
|
|
|
hdrs={'If-Match': 'not-%s' % etag})
|
|
|
|
self.assert_status(412)
|
|
|
|
|
|
|
|
manifest.read(hdrs={'If-Match': etag})
|
|
|
|
self.assert_status(200)
|
|
|
|
|
|
|
|
def test_dlo_if_none_match_get(self):
|
|
|
|
manifest = self.env.container.file("man1")
|
|
|
|
etag = manifest.info()['etag']
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, manifest.read,
|
|
|
|
hdrs={'If-None-Match': etag})
|
|
|
|
self.assert_status(304)
|
|
|
|
|
|
|
|
manifest.read(hdrs={'If-None-Match': "not-%s" % etag})
|
|
|
|
self.assert_status(200)
|
|
|
|
|
|
|
|
def test_dlo_if_match_head(self):
|
|
|
|
manifest = self.env.container.file("man1")
|
|
|
|
etag = manifest.info()['etag']
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, manifest.info,
|
|
|
|
hdrs={'If-Match': 'not-%s' % etag})
|
|
|
|
self.assert_status(412)
|
|
|
|
|
|
|
|
manifest.info(hdrs={'If-Match': etag})
|
|
|
|
self.assert_status(200)
|
|
|
|
|
|
|
|
def test_dlo_if_none_match_head(self):
|
|
|
|
manifest = self.env.container.file("man1")
|
|
|
|
etag = manifest.info()['etag']
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, manifest.info,
|
|
|
|
hdrs={'If-None-Match': etag})
|
|
|
|
self.assert_status(304)
|
|
|
|
|
|
|
|
manifest.info(hdrs={'If-None-Match': "not-%s" % etag})
|
|
|
|
self.assert_status(200)
|
|
|
|
|
2015-12-16 17:19:24 +11:00
|
|
|
def test_dlo_referer_on_segment_container(self):
|
|
|
|
# First the account2 (test3) should fail
|
|
|
|
headers = {'X-Auth-Token': self.env.conn2.storage_token,
|
|
|
|
'Referer': 'http://blah.example.com'}
|
|
|
|
dlo_file = self.env.container.file("mancont2")
|
|
|
|
self.assertRaises(ResponseError, dlo_file.read,
|
|
|
|
hdrs=headers)
|
|
|
|
self.assert_status(403)
|
|
|
|
|
|
|
|
# Now set the referer on the dlo container only
|
|
|
|
referer_metadata = {'X-Container-Read': '.r:*.example.com,.rlistings'}
|
|
|
|
self.env.container.update_metadata(referer_metadata)
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, dlo_file.read,
|
|
|
|
hdrs=headers)
|
|
|
|
self.assert_status(403)
|
|
|
|
|
|
|
|
# Finally set the referer on the segment container
|
|
|
|
self.env.container2.update_metadata(referer_metadata)
|
|
|
|
|
|
|
|
contents = dlo_file.read(hdrs=headers)
|
|
|
|
self.assertEqual(
|
|
|
|
contents,
|
|
|
|
"ffffffffffgggggggggghhhhhhhhhhiiiiiiiiiijjjjjjjjjj")
|
|
|
|
|
2013-11-21 11:03:46 -08:00
|
|
|
|
|
|
|
class TestDloUTF8(Base2, TestDlo):
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
|
Privileged acct ACL header, new ACL syntax, TempAuth impl.
* Introduce a new privileged account header: X-Account-Access-Control
* Introduce JSON-based version 2 ACL syntax -- see below for discussion
* Implement account ACL authorization in TempAuth
X-Account-Access-Control Header
-------------------------------
Accounts now have a new privileged header to represent ACLs or any other
form of account-level access control. The value of the header is an opaque
string to be interpreted by the auth system, but it must be a JSON-encoded
dictionary. A reference implementation is given in TempAuth, with the
knowledge that historically other auth systems often use TempAuth as a
starting point.
The reference implementation describes three levels of account access:
"admin", "read-write", and "read-only". Adding new access control
features in a future patch (e.g. "write-only" account access) will
automatically be forward- and backward-compatible, due to the JSON
dictionary header format.
The privileged X-Account-Access-Control header may only be read or written
by a user with "swift_owner" status, traditionally the account owner but
now also any user on the "admin" ACL.
Access Levels:
Read-only access is intended to indicate to the auth system that this
list of identities can read everything (except privileged headers) in
the account. Specifically, a user with read-only account access can get
a list of containers in the account, list the contents of any container,
retrieve any object, and see the (non-privileged) headers of the
account, any container, or any object.
Read-write access is intended to indicate to the auth system that this
list of identities can read or write (or create) any container. A user
with read-write account access can create new containers, set any
unprivileged container headers, overwrite objects, delete containers,
etc. A read-write user can NOT set account headers (or perform any
PUT/POST/DELETE requests on the account).
Admin access is intended to indicate to the auth system that this list of
identities has "swift_owner" privileges. A user with admin account access
can do anything the account owner can, including setting account headers
and any privileged headers -- and thus changing the value of
X-Account-Access-Control and thereby granting read-only, read-write, or
admin access to other users.
The auth system is responsible for making decisions based on this header,
if it chooses to support its use. Therefore the above access level
descriptions are necessarily advisory only for other auth systems.
When setting the value of the header, callers are urged to use the new
format_acl() method, described below.
New ACL Format
--------------
The account ACLs introduce a new format for ACLs, rather than reusing the
existing format from X-Container-Read/X-Container-Write. There are several
reasons for this:
* Container ACL format does not support Unicode
* Container ACLs have a different structure than account ACLs
+ account ACLs have no concept of referrers or rlistings
+ accounts have additional "admin" access level
+ account access levels are structured as admin > rw > ro, which seems more
appropriate for how people access accounts, rather than reusing
container ACLs' orthogonal read and write access
In addition, the container ACL syntax is a bit arbitrary and highly custom,
so instead of parsing additional custom syntax, I'd rather propose a next
version and introduce a means for migration. The V2 ACL syntax has the
following benefits:
* JSON is a well-known standard syntax with parsers in all languages
* no artificial value restrictions (you can grant access to a user named
".rlistings" if you want)
* forward and backward compatibility: you may have extraneous keys, but
your attempt to parse the header won't raise an exception
I've introduced hooks in parse_acl and format_acl which currently default
to the old V1 syntax but tolerate the V2 syntax and can easily be flipped
to default to V2. I'm not changing the default or adding code to rewrite
V1 ACLs to V2, because this patch has suffered a lot of scope creep already,
but this seems like a sensible milestone in the migration.
TempAuth Account ACL Implementation
-----------------------------------
As stated above, core Swift is responsible for privileging the
X-Account-Access-Control header (making it only accessible to swift_owners),
for translating it to -sysmeta-* headers to trigger persistence by the
account server, and for including the header in the responses to requests
by privileged users. Core Swift puts no expectation on the *content* of
this header. Auth systems (including TempAuth) are responsible for
defining the content of the header and taking action based on it.
In addition to the changes described above, this patch defines a format
to be used by TempAuth for these headers in the common.middleware.acl
module, in the methods format_v2_acl() and parse_v2_acl(). This patch
also teaches TempAuth to take action based on the header contents. TempAuth
now sets swift_owner=True if the user is on the Admin ACL, authorizes
GET/HEAD/OPTIONS requests if the user is on any ACL, authorizes
PUT/POST/DELETE requests if the user is on the admin or read-write ACL, etc.
Note that the action of setting swift_owner=True triggers core Swift to
add or strip the privileged headers from the responses. Core Swift (not
the auth system) is responsible for that.
DocImpact: Documentation for the new ACL usage and format appears in
summary form in doc/source/overview_auth.rst, and in more detail in
swift/common/middleware/tempauth.py in the TempAuth class docstring.
I leave it to the Swift doc team to determine whether more is needed.
Change-Id: I836a99eaaa6bb0e92dc03e1ca46a474522e6e826
2013-11-13 20:55:14 +00:00
|
|
|
class TestFileComparisonEnv(object):
|
2010-07-12 17:03:45 -05:00
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.conn = Connection(tf.config)
|
2010-07-12 17:03:45 -05:00
|
|
|
cls.conn.authenticate()
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.account = Account(cls.conn, tf.config.get('account',
|
|
|
|
tf.config['username']))
|
2010-07-12 17:03:45 -05:00
|
|
|
cls.account.delete_containers()
|
|
|
|
|
|
|
|
cls.container = cls.account.container(Utils.create_name())
|
|
|
|
|
|
|
|
if not cls.container.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
cls.file_count = 20
|
|
|
|
cls.file_size = 128
|
|
|
|
cls.files = list()
|
|
|
|
for x in range(cls.file_count):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cls.container.file(Utils.create_name())
|
|
|
|
file_item.write_random(cls.file_size)
|
|
|
|
cls.files.append(file_item)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-11-21 11:14:34 +08:00
|
|
|
cls.time_old_f1 = time.strftime("%a, %d %b %Y %H:%M:%S GMT",
|
|
|
|
time.gmtime(time.time() - 86400))
|
|
|
|
cls.time_old_f2 = time.strftime("%A, %d-%b-%y %H:%M:%S GMT",
|
|
|
|
time.gmtime(time.time() - 86400))
|
|
|
|
cls.time_old_f3 = time.strftime("%a %b %d %H:%M:%S %Y",
|
|
|
|
time.gmtime(time.time() - 86400))
|
|
|
|
cls.time_new = time.strftime("%a, %d %b %Y %H:%M:%S GMT",
|
|
|
|
time.gmtime(time.time() + 86400))
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
class TestFileComparison(Base):
|
|
|
|
env = TestFileComparisonEnv
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
def testIfMatch(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in self.env.files:
|
|
|
|
hdrs = {'If-Match': file_item.md5}
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.read(hdrs=hdrs))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
hdrs = {'If-Match': 'bogus'}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assert_header('etag', file_item.md5)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2016-01-12 11:46:21 -06:00
|
|
|
def testIfMatchMultipleEtags(self):
|
|
|
|
for file_item in self.env.files:
|
|
|
|
hdrs = {'If-Match': '"bogus1", "%s", "bogus2"' % file_item.md5}
|
|
|
|
self.assertTrue(file_item.read(hdrs=hdrs))
|
|
|
|
|
|
|
|
hdrs = {'If-Match': '"bogus1", "bogus2", "bogus3"'}
|
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
|
|
|
self.assert_status(412)
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assert_header('etag', file_item.md5)
|
2016-01-12 11:46:21 -06:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def testIfNoneMatch(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in self.env.files:
|
2010-07-12 17:03:45 -05:00
|
|
|
hdrs = {'If-None-Match': 'bogus'}
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.read(hdrs=hdrs))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
hdrs = {'If-None-Match': file_item.md5}
|
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(304)
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assert_header('etag', file_item.md5)
|
|
|
|
self.assert_header('accept-ranges', 'bytes')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2016-01-12 11:46:21 -06:00
|
|
|
def testIfNoneMatchMultipleEtags(self):
|
|
|
|
for file_item in self.env.files:
|
|
|
|
hdrs = {'If-None-Match': '"bogus1", "bogus2", "bogus3"'}
|
|
|
|
self.assertTrue(file_item.read(hdrs=hdrs))
|
|
|
|
|
|
|
|
hdrs = {'If-None-Match':
|
|
|
|
'"bogus1", "bogus2", "%s"' % file_item.md5}
|
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
|
|
|
self.assert_status(304)
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assert_header('etag', file_item.md5)
|
|
|
|
self.assert_header('accept-ranges', 'bytes')
|
2016-01-12 11:46:21 -06:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def testIfModifiedSince(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in self.env.files:
|
2013-11-21 11:14:34 +08:00
|
|
|
hdrs = {'If-Modified-Since': self.env.time_old_f1}
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.read(hdrs=hdrs))
|
|
|
|
self.assertTrue(file_item.info(hdrs=hdrs))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
hdrs = {'If-Modified-Since': self.env.time_new}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(304)
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assert_header('etag', file_item.md5)
|
|
|
|
self.assert_header('accept-ranges', 'bytes')
|
2014-03-04 11:52:48 -08:00
|
|
|
self.assertRaises(ResponseError, file_item.info, hdrs=hdrs)
|
|
|
|
self.assert_status(304)
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assert_header('etag', file_item.md5)
|
|
|
|
self.assert_header('accept-ranges', 'bytes')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testIfUnmodifiedSince(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in self.env.files:
|
2010-07-12 17:03:45 -05:00
|
|
|
hdrs = {'If-Unmodified-Since': self.env.time_new}
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.read(hdrs=hdrs))
|
|
|
|
self.assertTrue(file_item.info(hdrs=hdrs))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-11-21 11:14:34 +08:00
|
|
|
hdrs = {'If-Unmodified-Since': self.env.time_old_f2}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assert_header('etag', file_item.md5)
|
2014-03-04 11:52:48 -08:00
|
|
|
self.assertRaises(ResponseError, file_item.info, hdrs=hdrs)
|
|
|
|
self.assert_status(412)
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assert_header('etag', file_item.md5)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testIfMatchAndUnmodified(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in self.env.files:
|
|
|
|
hdrs = {'If-Match': file_item.md5,
|
2012-09-03 23:30:52 +08:00
|
|
|
'If-Unmodified-Since': self.env.time_new}
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.read(hdrs=hdrs))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
hdrs = {'If-Match': 'bogus',
|
|
|
|
'If-Unmodified-Since': self.env.time_new}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assert_header('etag', file_item.md5)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
hdrs = {'If-Match': file_item.md5,
|
2013-11-21 11:14:34 +08:00
|
|
|
'If-Unmodified-Since': self.env.time_old_f3}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assert_header('etag', file_item.md5)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-11-07 04:45:27 +00:00
|
|
|
def testLastModified(self):
|
|
|
|
file_name = Utils.create_name()
|
|
|
|
content_type = Utils.create_name()
|
|
|
|
|
2016-03-16 17:41:30 +00:00
|
|
|
file_item = self.env.container.file(file_name)
|
|
|
|
file_item.content_type = content_type
|
|
|
|
resp = file_item.write_random_return_resp(self.env.file_size)
|
2013-11-07 04:45:27 +00:00
|
|
|
put_last_modified = resp.getheader('last-modified')
|
2016-03-16 17:41:30 +00:00
|
|
|
etag = file_item.md5
|
2013-11-07 04:45:27 +00:00
|
|
|
|
2016-03-16 17:41:30 +00:00
|
|
|
file_item = self.env.container.file(file_name)
|
|
|
|
info = file_item.info()
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn('last_modified', info)
|
2013-11-07 04:45:27 +00:00
|
|
|
last_modified = info['last_modified']
|
|
|
|
self.assertEqual(put_last_modified, info['last_modified'])
|
|
|
|
|
|
|
|
hdrs = {'If-Modified-Since': last_modified}
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
2013-11-07 04:45:27 +00:00
|
|
|
self.assert_status(304)
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assert_header('etag', etag)
|
|
|
|
self.assert_header('accept-ranges', 'bytes')
|
2013-11-07 04:45:27 +00:00
|
|
|
|
|
|
|
hdrs = {'If-Unmodified-Since': last_modified}
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assertTrue(file_item.read(hdrs=hdrs))
|
2013-11-07 04:45:27 +00:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestFileComparisonUTF8(Base2, TestFileComparison):
|
|
|
|
set_up = False
|
|
|
|
|
2013-11-18 13:17:48 -08:00
|
|
|
|
|
|
|
class TestSloEnv(object):
|
|
|
|
slo_enabled = None # tri-state: None initially, then True/False
|
|
|
|
|
2015-02-18 11:59:31 +05:30
|
|
|
@classmethod
|
|
|
|
def create_segments(cls, container):
|
|
|
|
seg_info = {}
|
|
|
|
for letter, size in (('a', 1024 * 1024),
|
|
|
|
('b', 1024 * 1024),
|
|
|
|
('c', 1024 * 1024),
|
|
|
|
('d', 1024 * 1024),
|
|
|
|
('e', 1)):
|
|
|
|
seg_name = "seg_%s" % letter
|
|
|
|
file_item = container.file(seg_name)
|
|
|
|
file_item.write(letter * size)
|
|
|
|
seg_info[seg_name] = {
|
|
|
|
'size_bytes': size,
|
|
|
|
'etag': file_item.md5,
|
|
|
|
'path': '/%s/%s' % (container.name, seg_name)}
|
|
|
|
return seg_info
|
|
|
|
|
2013-11-18 13:17:48 -08:00
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.conn = Connection(tf.config)
|
2013-11-18 13:17:48 -08:00
|
|
|
cls.conn.authenticate()
|
2014-04-30 15:00:49 +03:00
|
|
|
config2 = deepcopy(tf.config)
|
|
|
|
config2['account'] = tf.config['account2']
|
|
|
|
config2['username'] = tf.config['username2']
|
|
|
|
config2['password'] = tf.config['password2']
|
|
|
|
cls.conn2 = Connection(config2)
|
|
|
|
cls.conn2.authenticate()
|
|
|
|
cls.account2 = cls.conn2.get_account()
|
|
|
|
cls.account2.delete_containers()
|
2015-12-16 17:19:24 +11:00
|
|
|
config3 = tf.config.copy()
|
|
|
|
config3['username'] = tf.config['username3']
|
|
|
|
config3['password'] = tf.config['password3']
|
|
|
|
cls.conn3 = Connection(config3)
|
|
|
|
cls.conn3.authenticate()
|
2013-12-08 18:00:47 -08:00
|
|
|
|
|
|
|
if cls.slo_enabled is None:
|
2014-03-06 13:11:03 -08:00
|
|
|
cls.slo_enabled = 'slo' in cluster_info
|
2013-12-08 18:00:47 -08:00
|
|
|
if not cls.slo_enabled:
|
|
|
|
return
|
|
|
|
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.account = Account(cls.conn, tf.config.get('account',
|
|
|
|
tf.config['username']))
|
2013-11-18 13:17:48 -08:00
|
|
|
cls.account.delete_containers()
|
|
|
|
|
|
|
|
cls.container = cls.account.container(Utils.create_name())
|
2015-12-16 17:19:24 +11:00
|
|
|
cls.container2 = cls.account.container(Utils.create_name())
|
2013-11-18 13:17:48 -08:00
|
|
|
|
2015-12-16 17:19:24 +11:00
|
|
|
for cont in (cls.container, cls.container2):
|
|
|
|
if not cont.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
2013-11-18 13:17:48 -08:00
|
|
|
|
2015-02-18 11:59:31 +05:30
|
|
|
cls.seg_info = seg_info = cls.create_segments(cls.container)
|
2013-11-18 13:17:48 -08:00
|
|
|
|
|
|
|
file_item = cls.container.file("manifest-abcde")
|
|
|
|
file_item.write(
|
|
|
|
json.dumps([seg_info['seg_a'], seg_info['seg_b'],
|
|
|
|
seg_info['seg_c'], seg_info['seg_d'],
|
|
|
|
seg_info['seg_e']]),
|
|
|
|
parms={'multipart-manifest': 'put'})
|
|
|
|
|
2015-12-16 17:19:24 +11:00
|
|
|
# Put the same manifest in the container2
|
|
|
|
file_item = cls.container2.file("manifest-abcde")
|
|
|
|
file_item.write(
|
|
|
|
json.dumps([seg_info['seg_a'], seg_info['seg_b'],
|
|
|
|
seg_info['seg_c'], seg_info['seg_d'],
|
|
|
|
seg_info['seg_e']]),
|
|
|
|
parms={'multipart-manifest': 'put'})
|
|
|
|
|
2013-11-18 13:17:48 -08:00
|
|
|
file_item = cls.container.file('manifest-cd')
|
|
|
|
cd_json = json.dumps([seg_info['seg_c'], seg_info['seg_d']])
|
|
|
|
file_item.write(cd_json, parms={'multipart-manifest': 'put'})
|
|
|
|
cd_etag = hashlib.md5(seg_info['seg_c']['etag'] +
|
|
|
|
seg_info['seg_d']['etag']).hexdigest()
|
|
|
|
|
|
|
|
file_item = cls.container.file("manifest-bcd-submanifest")
|
|
|
|
file_item.write(
|
|
|
|
json.dumps([seg_info['seg_b'],
|
|
|
|
{'etag': cd_etag,
|
|
|
|
'size_bytes': (seg_info['seg_c']['size_bytes'] +
|
|
|
|
seg_info['seg_d']['size_bytes']),
|
|
|
|
'path': '/%s/%s' % (cls.container.name,
|
|
|
|
'manifest-cd')}]),
|
|
|
|
parms={'multipart-manifest': 'put'})
|
|
|
|
bcd_submanifest_etag = hashlib.md5(
|
|
|
|
seg_info['seg_b']['etag'] + cd_etag).hexdigest()
|
|
|
|
|
|
|
|
file_item = cls.container.file("manifest-abcde-submanifest")
|
|
|
|
file_item.write(
|
|
|
|
json.dumps([
|
|
|
|
seg_info['seg_a'],
|
|
|
|
{'etag': bcd_submanifest_etag,
|
|
|
|
'size_bytes': (seg_info['seg_b']['size_bytes'] +
|
|
|
|
seg_info['seg_c']['size_bytes'] +
|
|
|
|
seg_info['seg_d']['size_bytes']),
|
|
|
|
'path': '/%s/%s' % (cls.container.name,
|
|
|
|
'manifest-bcd-submanifest')},
|
|
|
|
seg_info['seg_e']]),
|
|
|
|
parms={'multipart-manifest': 'put'})
|
Add the ability to specify ranges for SLO segments
Users can now include an optional 'range' field in segment descriptions
to specify which bytes from the underlying object should be used for the
segment data. Only one range may be specified per segment. Note that the
'etag' and 'size_bytes' fields still describe the backing object as a
whole. So, if a user uploads a manifest like:
[{"path": "/con/obj_seg_1", "etag": null, "size_bytes": 1048576,
"range": "0-1023"},
{"path": "/con/obj_seg_2", "etag": null, "size_bytes": 1048576,
"range": "512-4095"},
{"path": "/con/obj_seg_1", "etag": null, "size_bytes": 1048576,
"range": "-2048"}]
then the segment will consist of the first 1024 bytes of /con/obj_seg_1,
followed by bytes 513 through 4096 (inclusive) of /con/obj_seg_2, and
finally bytes 1046528 through 1048576 (i.e., the last 2048 bytes) of
/con/obj_seg_1.
ETag generation for SLOs had been updated to prevent collisions when
using different ranges for the same set of objects.
Additionally, there are two performance enhancements:
* On download, multiple sequential requests for segments from the same
underlying object will be coalesced into a single ranged request,
provided it still does not meet Swift's "egregious range requests"
critieria.
* On upload, multiple sequential segments referencing the same object
will be validated against the response from a single HEAD request.
Change-Id: Ia21d51c2cef4e2ee5162161dd2c1d3069009b52c
DocImpact
2015-08-11 00:42:30 -05:00
|
|
|
abcde_submanifest_etag = hashlib.md5(
|
|
|
|
seg_info['seg_a']['etag'] + bcd_submanifest_etag +
|
|
|
|
seg_info['seg_e']['etag']).hexdigest()
|
|
|
|
abcde_submanifest_size = (seg_info['seg_a']['size_bytes'] +
|
|
|
|
seg_info['seg_b']['size_bytes'] +
|
|
|
|
seg_info['seg_c']['size_bytes'] +
|
|
|
|
seg_info['seg_d']['size_bytes'] +
|
|
|
|
seg_info['seg_e']['size_bytes'])
|
|
|
|
|
|
|
|
file_item = cls.container.file("ranged-manifest")
|
|
|
|
file_item.write(
|
|
|
|
json.dumps([
|
|
|
|
{'etag': abcde_submanifest_etag,
|
|
|
|
'size_bytes': abcde_submanifest_size,
|
|
|
|
'path': '/%s/%s' % (cls.container.name,
|
|
|
|
'manifest-abcde-submanifest'),
|
|
|
|
'range': '-1048578'}, # 'c' + ('d' * 2**20) + 'e'
|
|
|
|
{'etag': abcde_submanifest_etag,
|
|
|
|
'size_bytes': abcde_submanifest_size,
|
|
|
|
'path': '/%s/%s' % (cls.container.name,
|
|
|
|
'manifest-abcde-submanifest'),
|
|
|
|
'range': '524288-1572863'}, # 'a' * 2**19 + 'b' * 2**19
|
|
|
|
{'etag': abcde_submanifest_etag,
|
|
|
|
'size_bytes': abcde_submanifest_size,
|
|
|
|
'path': '/%s/%s' % (cls.container.name,
|
|
|
|
'manifest-abcde-submanifest'),
|
|
|
|
'range': '3145727-3145728'}]), # 'cd'
|
|
|
|
parms={'multipart-manifest': 'put'})
|
|
|
|
ranged_manifest_etag = hashlib.md5(
|
|
|
|
abcde_submanifest_etag + ':3145727-4194304;' +
|
|
|
|
abcde_submanifest_etag + ':524288-1572863;' +
|
|
|
|
abcde_submanifest_etag + ':3145727-3145728;').hexdigest()
|
|
|
|
ranged_manifest_size = 2 * 1024 * 1024 + 4
|
|
|
|
|
|
|
|
file_item = cls.container.file("ranged-submanifest")
|
|
|
|
file_item.write(
|
|
|
|
json.dumps([
|
|
|
|
seg_info['seg_c'],
|
|
|
|
{'etag': ranged_manifest_etag,
|
|
|
|
'size_bytes': ranged_manifest_size,
|
|
|
|
'path': '/%s/%s' % (cls.container.name,
|
|
|
|
'ranged-manifest')},
|
|
|
|
{'etag': ranged_manifest_etag,
|
|
|
|
'size_bytes': ranged_manifest_size,
|
|
|
|
'path': '/%s/%s' % (cls.container.name,
|
|
|
|
'ranged-manifest'),
|
|
|
|
'range': '524289-1572865'},
|
|
|
|
{'etag': ranged_manifest_etag,
|
|
|
|
'size_bytes': ranged_manifest_size,
|
|
|
|
'path': '/%s/%s' % (cls.container.name,
|
|
|
|
'ranged-manifest'),
|
|
|
|
'range': '-3'}]),
|
|
|
|
parms={'multipart-manifest': 'put'})
|
2013-11-18 13:17:48 -08:00
|
|
|
|
Allow SLO PUTs to forgo per-segment integrity checks
While manifests still require 'etag' and 'size_bytes' fields for each
segment (to catch user errors like 'etaf' or 'size_btyes'), an explicit
null for either will skip that particular integrity check and instead
use whatever value is retrieved when HEADing the segment. So, if a user
uploads a manifest like:
[{"path": "/con/obj_seg_1", "etag": null, "size_bytes": 1048576},
{"path": "/con/obj_seg_2", "etag": "etag2", "size_bytes": null},
{"path": "/con/obj_seg_3", "etag": null, "size_bytes": null}]
then the etag will only be verified for the /con/obj_seg_2 segment,
and the segment size will only be verified for the /con/obj_seg_1
segment. However, the manifest that's ultimately stored (and can be
retrieved with a ?multipart-manifest=get query-string) will still look
like:
[{"name": "/con/obj_seg_1", "hash": "etag1", "bytes": 1048576, ...},
{"name": "/con/obj_seg_2", "hash": "etag2", "bytes": 1048576, ...},
{"name": "/con/obj_seg_3", "hash": "etag3", "bytes": 1234, ...}]
This allows the middleware to continue performing integrity checks on
object GET.
Change-Id: I2c4e585221387dd02a8679a50398d6b614407b12
DocImpact
2015-05-20 00:39:41 -07:00
|
|
|
file_item = cls.container.file("manifest-db")
|
|
|
|
file_item.write(
|
|
|
|
json.dumps([
|
|
|
|
{'path': seg_info['seg_d']['path'], 'etag': None,
|
|
|
|
'size_bytes': None},
|
|
|
|
{'path': seg_info['seg_b']['path'], 'etag': None,
|
|
|
|
'size_bytes': None},
|
|
|
|
]), parms={'multipart-manifest': 'put'})
|
|
|
|
|
2015-12-01 14:59:35 +00:00
|
|
|
file_item = cls.container.file("ranged-manifest-repeated-segment")
|
|
|
|
file_item.write(
|
|
|
|
json.dumps([
|
|
|
|
{'path': seg_info['seg_a']['path'], 'etag': None,
|
|
|
|
'size_bytes': None, 'range': '-1048578'},
|
|
|
|
{'path': seg_info['seg_a']['path'], 'etag': None,
|
|
|
|
'size_bytes': None},
|
|
|
|
{'path': seg_info['seg_b']['path'], 'etag': None,
|
|
|
|
'size_bytes': None, 'range': '-1048578'},
|
|
|
|
]), parms={'multipart-manifest': 'put'})
|
|
|
|
|
2013-11-18 13:17:48 -08:00
|
|
|
|
|
|
|
class TestSlo(Base):
|
|
|
|
env = TestSloEnv
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
super(TestSlo, self).setUp()
|
|
|
|
if self.env.slo_enabled is False:
|
|
|
|
raise SkipTest("SLO not enabled")
|
|
|
|
elif self.env.slo_enabled is not True:
|
|
|
|
# just some sanity checking
|
|
|
|
raise Exception(
|
|
|
|
"Expected slo_enabled to be True/False, got %r" %
|
|
|
|
(self.env.slo_enabled,))
|
|
|
|
|
|
|
|
def test_slo_get_simple_manifest(self):
|
|
|
|
file_item = self.env.container.file('manifest-abcde')
|
|
|
|
file_contents = file_item.read()
|
|
|
|
self.assertEqual(4 * 1024 * 1024 + 1, len(file_contents))
|
|
|
|
self.assertEqual('a', file_contents[0])
|
|
|
|
self.assertEqual('a', file_contents[1024 * 1024 - 1])
|
|
|
|
self.assertEqual('b', file_contents[1024 * 1024])
|
|
|
|
self.assertEqual('d', file_contents[-2])
|
|
|
|
self.assertEqual('e', file_contents[-1])
|
|
|
|
|
Update container on fast-POST
This patch makes a number of changes to enable content-type
metadata to be updated when using the fast-POST mode of
operation, as proposed in the associated spec [1].
* the object server and diskfile are modified to allow
content-type to be updated by a POST and the updated value
to be stored in .meta files.
* the object server accepts PUTs and DELETEs with older
timestamps than existing .meta files. This is to be
consistent with replication that will leave a later .meta
file in place when replicating a .data file.
* the diskfile interface is modified to provide accessor
methods for the content-type and its timestamp.
* the naming of .meta files is modified to encode two
timestamps when the .meta file contains a content-type value
that was set prior to the latest metadata update; this
enables consistency to be achieved when rsync is used for
replication.
* ssync is modified to sync meta files when content-type
differs between local and remote copies of objects.
* the object server issues container updates when handling
POST requests, notifying the container server of the current
immutable metadata (etag, size, hash, swift_bytes),
content-type with their respective timestamps, and the
mutable metadata timestamp.
* the container server maintains the most recently reported
values for immutable metadata, content-type and mutable
metadata, each with their respective timestamps, in a single
db row.
* new probe tests verify that replication achieves eventual
consistency of containers and objects after discrete updates
to content-type and mutable metadata, and that container-sync
sync's objects after fast-post updates.
[1] spec change-id: I60688efc3df692d3a39557114dca8c5490f7837e
Change-Id: Ia597cd460bb5fd40aa92e886e3e18a7542603d01
2015-08-10 10:30:10 -05:00
|
|
|
def test_slo_container_listing(self):
|
|
|
|
# the listing object size should equal the sum of the size of the
|
|
|
|
# segments, not the size of the manifest body
|
|
|
|
file_item = self.env.container.file(Utils.create_name)
|
|
|
|
file_item.write(
|
|
|
|
json.dumps([self.env.seg_info['seg_a']]),
|
|
|
|
parms={'multipart-manifest': 'put'})
|
2016-05-19 19:58:56 +01:00
|
|
|
# The container listing has the etag of the actual manifest object
|
|
|
|
# contents which we get using multipart-manifest=get. Arguably this
|
|
|
|
# should be the etag that we get when NOT using multipart-manifest=get,
|
|
|
|
# to be consistent with size and content-type. But here we at least
|
|
|
|
# verify that it remains consistent when the object is updated with a
|
|
|
|
# POST.
|
|
|
|
file_item.initialize(parms={'multipart-manifest': 'get'})
|
|
|
|
expected_etag = file_item.etag
|
Update container on fast-POST
This patch makes a number of changes to enable content-type
metadata to be updated when using the fast-POST mode of
operation, as proposed in the associated spec [1].
* the object server and diskfile are modified to allow
content-type to be updated by a POST and the updated value
to be stored in .meta files.
* the object server accepts PUTs and DELETEs with older
timestamps than existing .meta files. This is to be
consistent with replication that will leave a later .meta
file in place when replicating a .data file.
* the diskfile interface is modified to provide accessor
methods for the content-type and its timestamp.
* the naming of .meta files is modified to encode two
timestamps when the .meta file contains a content-type value
that was set prior to the latest metadata update; this
enables consistency to be achieved when rsync is used for
replication.
* ssync is modified to sync meta files when content-type
differs between local and remote copies of objects.
* the object server issues container updates when handling
POST requests, notifying the container server of the current
immutable metadata (etag, size, hash, swift_bytes),
content-type with their respective timestamps, and the
mutable metadata timestamp.
* the container server maintains the most recently reported
values for immutable metadata, content-type and mutable
metadata, each with their respective timestamps, in a single
db row.
* new probe tests verify that replication achieves eventual
consistency of containers and objects after discrete updates
to content-type and mutable metadata, and that container-sync
sync's objects after fast-post updates.
[1] spec change-id: I60688efc3df692d3a39557114dca8c5490f7837e
Change-Id: Ia597cd460bb5fd40aa92e886e3e18a7542603d01
2015-08-10 10:30:10 -05:00
|
|
|
|
2016-05-19 19:58:56 +01:00
|
|
|
listing = self.env.container.files(parms={'format': 'json'})
|
|
|
|
for f_dict in listing:
|
Update container on fast-POST
This patch makes a number of changes to enable content-type
metadata to be updated when using the fast-POST mode of
operation, as proposed in the associated spec [1].
* the object server and diskfile are modified to allow
content-type to be updated by a POST and the updated value
to be stored in .meta files.
* the object server accepts PUTs and DELETEs with older
timestamps than existing .meta files. This is to be
consistent with replication that will leave a later .meta
file in place when replicating a .data file.
* the diskfile interface is modified to provide accessor
methods for the content-type and its timestamp.
* the naming of .meta files is modified to encode two
timestamps when the .meta file contains a content-type value
that was set prior to the latest metadata update; this
enables consistency to be achieved when rsync is used for
replication.
* ssync is modified to sync meta files when content-type
differs between local and remote copies of objects.
* the object server issues container updates when handling
POST requests, notifying the container server of the current
immutable metadata (etag, size, hash, swift_bytes),
content-type with their respective timestamps, and the
mutable metadata timestamp.
* the container server maintains the most recently reported
values for immutable metadata, content-type and mutable
metadata, each with their respective timestamps, in a single
db row.
* new probe tests verify that replication achieves eventual
consistency of containers and objects after discrete updates
to content-type and mutable metadata, and that container-sync
sync's objects after fast-post updates.
[1] spec change-id: I60688efc3df692d3a39557114dca8c5490f7837e
Change-Id: Ia597cd460bb5fd40aa92e886e3e18a7542603d01
2015-08-10 10:30:10 -05:00
|
|
|
if f_dict['name'] == file_item.name:
|
|
|
|
self.assertEqual(1024 * 1024, f_dict['bytes'])
|
|
|
|
self.assertEqual('application/octet-stream',
|
|
|
|
f_dict['content_type'])
|
2016-05-19 19:58:56 +01:00
|
|
|
self.assertEqual(expected_etag, f_dict['hash'])
|
Update container on fast-POST
This patch makes a number of changes to enable content-type
metadata to be updated when using the fast-POST mode of
operation, as proposed in the associated spec [1].
* the object server and diskfile are modified to allow
content-type to be updated by a POST and the updated value
to be stored in .meta files.
* the object server accepts PUTs and DELETEs with older
timestamps than existing .meta files. This is to be
consistent with replication that will leave a later .meta
file in place when replicating a .data file.
* the diskfile interface is modified to provide accessor
methods for the content-type and its timestamp.
* the naming of .meta files is modified to encode two
timestamps when the .meta file contains a content-type value
that was set prior to the latest metadata update; this
enables consistency to be achieved when rsync is used for
replication.
* ssync is modified to sync meta files when content-type
differs between local and remote copies of objects.
* the object server issues container updates when handling
POST requests, notifying the container server of the current
immutable metadata (etag, size, hash, swift_bytes),
content-type with their respective timestamps, and the
mutable metadata timestamp.
* the container server maintains the most recently reported
values for immutable metadata, content-type and mutable
metadata, each with their respective timestamps, in a single
db row.
* new probe tests verify that replication achieves eventual
consistency of containers and objects after discrete updates
to content-type and mutable metadata, and that container-sync
sync's objects after fast-post updates.
[1] spec change-id: I60688efc3df692d3a39557114dca8c5490f7837e
Change-Id: Ia597cd460bb5fd40aa92e886e3e18a7542603d01
2015-08-10 10:30:10 -05:00
|
|
|
break
|
|
|
|
else:
|
|
|
|
self.fail('Failed to find manifest file in container listing')
|
|
|
|
|
|
|
|
# now POST updated content-type file
|
|
|
|
file_item.content_type = 'image/jpeg'
|
|
|
|
file_item.sync_metadata({'X-Object-Meta-Test': 'blah'})
|
|
|
|
file_item.initialize()
|
|
|
|
self.assertEqual('image/jpeg', file_item.content_type) # sanity
|
|
|
|
|
|
|
|
# verify that the container listing is consistent with the file
|
2016-05-19 19:58:56 +01:00
|
|
|
listing = self.env.container.files(parms={'format': 'json'})
|
|
|
|
for f_dict in listing:
|
|
|
|
if f_dict['name'] == file_item.name:
|
|
|
|
self.assertEqual(1024 * 1024, f_dict['bytes'])
|
|
|
|
self.assertEqual(file_item.content_type,
|
|
|
|
f_dict['content_type'])
|
|
|
|
self.assertEqual(expected_etag, f_dict['hash'])
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
self.fail('Failed to find manifest file in container listing')
|
|
|
|
|
|
|
|
# now POST with no change to content-type
|
|
|
|
file_item.sync_metadata({'X-Object-Meta-Test': 'blah'},
|
|
|
|
cfg={'no_content_type': True})
|
|
|
|
file_item.initialize()
|
|
|
|
self.assertEqual('image/jpeg', file_item.content_type) # sanity
|
|
|
|
|
|
|
|
# verify that the container listing is consistent with the file
|
|
|
|
listing = self.env.container.files(parms={'format': 'json'})
|
|
|
|
for f_dict in listing:
|
Update container on fast-POST
This patch makes a number of changes to enable content-type
metadata to be updated when using the fast-POST mode of
operation, as proposed in the associated spec [1].
* the object server and diskfile are modified to allow
content-type to be updated by a POST and the updated value
to be stored in .meta files.
* the object server accepts PUTs and DELETEs with older
timestamps than existing .meta files. This is to be
consistent with replication that will leave a later .meta
file in place when replicating a .data file.
* the diskfile interface is modified to provide accessor
methods for the content-type and its timestamp.
* the naming of .meta files is modified to encode two
timestamps when the .meta file contains a content-type value
that was set prior to the latest metadata update; this
enables consistency to be achieved when rsync is used for
replication.
* ssync is modified to sync meta files when content-type
differs between local and remote copies of objects.
* the object server issues container updates when handling
POST requests, notifying the container server of the current
immutable metadata (etag, size, hash, swift_bytes),
content-type with their respective timestamps, and the
mutable metadata timestamp.
* the container server maintains the most recently reported
values for immutable metadata, content-type and mutable
metadata, each with their respective timestamps, in a single
db row.
* new probe tests verify that replication achieves eventual
consistency of containers and objects after discrete updates
to content-type and mutable metadata, and that container-sync
sync's objects after fast-post updates.
[1] spec change-id: I60688efc3df692d3a39557114dca8c5490f7837e
Change-Id: Ia597cd460bb5fd40aa92e886e3e18a7542603d01
2015-08-10 10:30:10 -05:00
|
|
|
if f_dict['name'] == file_item.name:
|
|
|
|
self.assertEqual(1024 * 1024, f_dict['bytes'])
|
|
|
|
self.assertEqual(file_item.content_type,
|
|
|
|
f_dict['content_type'])
|
2016-05-19 19:58:56 +01:00
|
|
|
self.assertEqual(expected_etag, f_dict['hash'])
|
Update container on fast-POST
This patch makes a number of changes to enable content-type
metadata to be updated when using the fast-POST mode of
operation, as proposed in the associated spec [1].
* the object server and diskfile are modified to allow
content-type to be updated by a POST and the updated value
to be stored in .meta files.
* the object server accepts PUTs and DELETEs with older
timestamps than existing .meta files. This is to be
consistent with replication that will leave a later .meta
file in place when replicating a .data file.
* the diskfile interface is modified to provide accessor
methods for the content-type and its timestamp.
* the naming of .meta files is modified to encode two
timestamps when the .meta file contains a content-type value
that was set prior to the latest metadata update; this
enables consistency to be achieved when rsync is used for
replication.
* ssync is modified to sync meta files when content-type
differs between local and remote copies of objects.
* the object server issues container updates when handling
POST requests, notifying the container server of the current
immutable metadata (etag, size, hash, swift_bytes),
content-type with their respective timestamps, and the
mutable metadata timestamp.
* the container server maintains the most recently reported
values for immutable metadata, content-type and mutable
metadata, each with their respective timestamps, in a single
db row.
* new probe tests verify that replication achieves eventual
consistency of containers and objects after discrete updates
to content-type and mutable metadata, and that container-sync
sync's objects after fast-post updates.
[1] spec change-id: I60688efc3df692d3a39557114dca8c5490f7837e
Change-Id: Ia597cd460bb5fd40aa92e886e3e18a7542603d01
2015-08-10 10:30:10 -05:00
|
|
|
break
|
|
|
|
else:
|
|
|
|
self.fail('Failed to find manifest file in container listing')
|
|
|
|
|
2013-11-18 13:17:48 -08:00
|
|
|
def test_slo_get_nested_manifest(self):
|
|
|
|
file_item = self.env.container.file('manifest-abcde-submanifest')
|
|
|
|
file_contents = file_item.read()
|
|
|
|
self.assertEqual(4 * 1024 * 1024 + 1, len(file_contents))
|
|
|
|
self.assertEqual('a', file_contents[0])
|
|
|
|
self.assertEqual('a', file_contents[1024 * 1024 - 1])
|
|
|
|
self.assertEqual('b', file_contents[1024 * 1024])
|
|
|
|
self.assertEqual('d', file_contents[-2])
|
|
|
|
self.assertEqual('e', file_contents[-1])
|
|
|
|
|
Add the ability to specify ranges for SLO segments
Users can now include an optional 'range' field in segment descriptions
to specify which bytes from the underlying object should be used for the
segment data. Only one range may be specified per segment. Note that the
'etag' and 'size_bytes' fields still describe the backing object as a
whole. So, if a user uploads a manifest like:
[{"path": "/con/obj_seg_1", "etag": null, "size_bytes": 1048576,
"range": "0-1023"},
{"path": "/con/obj_seg_2", "etag": null, "size_bytes": 1048576,
"range": "512-4095"},
{"path": "/con/obj_seg_1", "etag": null, "size_bytes": 1048576,
"range": "-2048"}]
then the segment will consist of the first 1024 bytes of /con/obj_seg_1,
followed by bytes 513 through 4096 (inclusive) of /con/obj_seg_2, and
finally bytes 1046528 through 1048576 (i.e., the last 2048 bytes) of
/con/obj_seg_1.
ETag generation for SLOs had been updated to prevent collisions when
using different ranges for the same set of objects.
Additionally, there are two performance enhancements:
* On download, multiple sequential requests for segments from the same
underlying object will be coalesced into a single ranged request,
provided it still does not meet Swift's "egregious range requests"
critieria.
* On upload, multiple sequential segments referencing the same object
will be validated against the response from a single HEAD request.
Change-Id: Ia21d51c2cef4e2ee5162161dd2c1d3069009b52c
DocImpact
2015-08-11 00:42:30 -05:00
|
|
|
def test_slo_get_ranged_manifest(self):
|
|
|
|
file_item = self.env.container.file('ranged-manifest')
|
|
|
|
grouped_file_contents = [
|
|
|
|
(char, sum(1 for _char in grp))
|
|
|
|
for char, grp in itertools.groupby(file_item.read())]
|
|
|
|
self.assertEqual([
|
|
|
|
('c', 1),
|
|
|
|
('d', 1024 * 1024),
|
|
|
|
('e', 1),
|
|
|
|
('a', 512 * 1024),
|
|
|
|
('b', 512 * 1024),
|
|
|
|
('c', 1),
|
|
|
|
('d', 1)], grouped_file_contents)
|
|
|
|
|
2015-12-01 14:59:35 +00:00
|
|
|
def test_slo_get_ranged_manifest_repeated_segment(self):
|
|
|
|
file_item = self.env.container.file('ranged-manifest-repeated-segment')
|
|
|
|
grouped_file_contents = [
|
|
|
|
(char, sum(1 for _char in grp))
|
|
|
|
for char, grp in itertools.groupby(file_item.read())]
|
|
|
|
self.assertEqual(
|
|
|
|
[('a', 2097152), ('b', 1048576)],
|
|
|
|
grouped_file_contents)
|
|
|
|
|
Add the ability to specify ranges for SLO segments
Users can now include an optional 'range' field in segment descriptions
to specify which bytes from the underlying object should be used for the
segment data. Only one range may be specified per segment. Note that the
'etag' and 'size_bytes' fields still describe the backing object as a
whole. So, if a user uploads a manifest like:
[{"path": "/con/obj_seg_1", "etag": null, "size_bytes": 1048576,
"range": "0-1023"},
{"path": "/con/obj_seg_2", "etag": null, "size_bytes": 1048576,
"range": "512-4095"},
{"path": "/con/obj_seg_1", "etag": null, "size_bytes": 1048576,
"range": "-2048"}]
then the segment will consist of the first 1024 bytes of /con/obj_seg_1,
followed by bytes 513 through 4096 (inclusive) of /con/obj_seg_2, and
finally bytes 1046528 through 1048576 (i.e., the last 2048 bytes) of
/con/obj_seg_1.
ETag generation for SLOs had been updated to prevent collisions when
using different ranges for the same set of objects.
Additionally, there are two performance enhancements:
* On download, multiple sequential requests for segments from the same
underlying object will be coalesced into a single ranged request,
provided it still does not meet Swift's "egregious range requests"
critieria.
* On upload, multiple sequential segments referencing the same object
will be validated against the response from a single HEAD request.
Change-Id: Ia21d51c2cef4e2ee5162161dd2c1d3069009b52c
DocImpact
2015-08-11 00:42:30 -05:00
|
|
|
def test_slo_get_ranged_submanifest(self):
|
|
|
|
file_item = self.env.container.file('ranged-submanifest')
|
|
|
|
grouped_file_contents = [
|
|
|
|
(char, sum(1 for _char in grp))
|
|
|
|
for char, grp in itertools.groupby(file_item.read())]
|
|
|
|
self.assertEqual([
|
|
|
|
('c', 1024 * 1024 + 1),
|
|
|
|
('d', 1024 * 1024),
|
|
|
|
('e', 1),
|
|
|
|
('a', 512 * 1024),
|
|
|
|
('b', 512 * 1024),
|
|
|
|
('c', 1),
|
|
|
|
('d', 512 * 1024 + 1),
|
|
|
|
('e', 1),
|
|
|
|
('a', 512 * 1024),
|
|
|
|
('b', 1),
|
|
|
|
('c', 1),
|
|
|
|
('d', 1)], grouped_file_contents)
|
|
|
|
|
2013-11-18 13:17:48 -08:00
|
|
|
def test_slo_ranged_get(self):
|
|
|
|
file_item = self.env.container.file('manifest-abcde')
|
|
|
|
file_contents = file_item.read(size=1024 * 1024 + 2,
|
|
|
|
offset=1024 * 1024 - 1)
|
|
|
|
self.assertEqual('a', file_contents[0])
|
|
|
|
self.assertEqual('b', file_contents[1])
|
|
|
|
self.assertEqual('b', file_contents[-2])
|
|
|
|
self.assertEqual('c', file_contents[-1])
|
|
|
|
|
|
|
|
def test_slo_ranged_submanifest(self):
|
|
|
|
file_item = self.env.container.file('manifest-abcde-submanifest')
|
|
|
|
file_contents = file_item.read(size=1024 * 1024 + 2,
|
|
|
|
offset=1024 * 1024 * 2 - 1)
|
|
|
|
self.assertEqual('b', file_contents[0])
|
|
|
|
self.assertEqual('c', file_contents[1])
|
|
|
|
self.assertEqual('c', file_contents[-2])
|
|
|
|
self.assertEqual('d', file_contents[-1])
|
|
|
|
|
|
|
|
def test_slo_etag_is_hash_of_etags(self):
|
|
|
|
expected_hash = hashlib.md5()
|
|
|
|
expected_hash.update(hashlib.md5('a' * 1024 * 1024).hexdigest())
|
|
|
|
expected_hash.update(hashlib.md5('b' * 1024 * 1024).hexdigest())
|
|
|
|
expected_hash.update(hashlib.md5('c' * 1024 * 1024).hexdigest())
|
|
|
|
expected_hash.update(hashlib.md5('d' * 1024 * 1024).hexdigest())
|
|
|
|
expected_hash.update(hashlib.md5('e').hexdigest())
|
|
|
|
expected_etag = expected_hash.hexdigest()
|
|
|
|
|
|
|
|
file_item = self.env.container.file('manifest-abcde')
|
|
|
|
self.assertEqual(expected_etag, file_item.info()['etag'])
|
|
|
|
|
|
|
|
def test_slo_etag_is_hash_of_etags_submanifests(self):
|
|
|
|
|
|
|
|
def hd(x):
|
|
|
|
return hashlib.md5(x).hexdigest()
|
|
|
|
|
|
|
|
expected_etag = hd(hd('a' * 1024 * 1024) +
|
|
|
|
hd(hd('b' * 1024 * 1024) +
|
|
|
|
hd(hd('c' * 1024 * 1024) +
|
|
|
|
hd('d' * 1024 * 1024))) +
|
|
|
|
hd('e'))
|
|
|
|
|
|
|
|
file_item = self.env.container.file('manifest-abcde-submanifest')
|
|
|
|
self.assertEqual(expected_etag, file_item.info()['etag'])
|
|
|
|
|
|
|
|
def test_slo_etag_mismatch(self):
|
|
|
|
file_item = self.env.container.file("manifest-a-bad-etag")
|
|
|
|
try:
|
|
|
|
file_item.write(
|
|
|
|
json.dumps([{
|
|
|
|
'size_bytes': 1024 * 1024,
|
|
|
|
'etag': 'not it',
|
|
|
|
'path': '/%s/%s' % (self.env.container.name, 'seg_a')}]),
|
|
|
|
parms={'multipart-manifest': 'put'})
|
|
|
|
except ResponseError as err:
|
|
|
|
self.assertEqual(400, err.status)
|
|
|
|
else:
|
|
|
|
self.fail("Expected ResponseError but didn't get it")
|
|
|
|
|
|
|
|
def test_slo_size_mismatch(self):
|
|
|
|
file_item = self.env.container.file("manifest-a-bad-size")
|
|
|
|
try:
|
|
|
|
file_item.write(
|
|
|
|
json.dumps([{
|
|
|
|
'size_bytes': 1024 * 1024 - 1,
|
|
|
|
'etag': hashlib.md5('a' * 1024 * 1024).hexdigest(),
|
|
|
|
'path': '/%s/%s' % (self.env.container.name, 'seg_a')}]),
|
|
|
|
parms={'multipart-manifest': 'put'})
|
|
|
|
except ResponseError as err:
|
|
|
|
self.assertEqual(400, err.status)
|
|
|
|
else:
|
|
|
|
self.fail("Expected ResponseError but didn't get it")
|
|
|
|
|
Allow SLO PUTs to forgo per-segment integrity checks
While manifests still require 'etag' and 'size_bytes' fields for each
segment (to catch user errors like 'etaf' or 'size_btyes'), an explicit
null for either will skip that particular integrity check and instead
use whatever value is retrieved when HEADing the segment. So, if a user
uploads a manifest like:
[{"path": "/con/obj_seg_1", "etag": null, "size_bytes": 1048576},
{"path": "/con/obj_seg_2", "etag": "etag2", "size_bytes": null},
{"path": "/con/obj_seg_3", "etag": null, "size_bytes": null}]
then the etag will only be verified for the /con/obj_seg_2 segment,
and the segment size will only be verified for the /con/obj_seg_1
segment. However, the manifest that's ultimately stored (and can be
retrieved with a ?multipart-manifest=get query-string) will still look
like:
[{"name": "/con/obj_seg_1", "hash": "etag1", "bytes": 1048576, ...},
{"name": "/con/obj_seg_2", "hash": "etag2", "bytes": 1048576, ...},
{"name": "/con/obj_seg_3", "hash": "etag3", "bytes": 1234, ...}]
This allows the middleware to continue performing integrity checks on
object GET.
Change-Id: I2c4e585221387dd02a8679a50398d6b614407b12
DocImpact
2015-05-20 00:39:41 -07:00
|
|
|
def test_slo_unspecified_etag(self):
|
|
|
|
file_item = self.env.container.file("manifest-a-unspecified-etag")
|
|
|
|
file_item.write(
|
|
|
|
json.dumps([{
|
|
|
|
'size_bytes': 1024 * 1024,
|
|
|
|
'etag': None,
|
|
|
|
'path': '/%s/%s' % (self.env.container.name, 'seg_a')}]),
|
|
|
|
parms={'multipart-manifest': 'put'})
|
|
|
|
self.assert_status(201)
|
|
|
|
|
|
|
|
def test_slo_unspecified_size(self):
|
|
|
|
file_item = self.env.container.file("manifest-a-unspecified-size")
|
|
|
|
file_item.write(
|
|
|
|
json.dumps([{
|
|
|
|
'size_bytes': None,
|
|
|
|
'etag': hashlib.md5('a' * 1024 * 1024).hexdigest(),
|
|
|
|
'path': '/%s/%s' % (self.env.container.name, 'seg_a')}]),
|
|
|
|
parms={'multipart-manifest': 'put'})
|
|
|
|
self.assert_status(201)
|
|
|
|
|
|
|
|
def test_slo_missing_etag(self):
|
|
|
|
file_item = self.env.container.file("manifest-a-missing-etag")
|
|
|
|
try:
|
|
|
|
file_item.write(
|
|
|
|
json.dumps([{
|
|
|
|
'size_bytes': 1024 * 1024,
|
|
|
|
'path': '/%s/%s' % (self.env.container.name, 'seg_a')}]),
|
|
|
|
parms={'multipart-manifest': 'put'})
|
|
|
|
except ResponseError as err:
|
|
|
|
self.assertEqual(400, err.status)
|
|
|
|
else:
|
|
|
|
self.fail("Expected ResponseError but didn't get it")
|
|
|
|
|
|
|
|
def test_slo_missing_size(self):
|
|
|
|
file_item = self.env.container.file("manifest-a-missing-size")
|
|
|
|
try:
|
|
|
|
file_item.write(
|
|
|
|
json.dumps([{
|
|
|
|
'etag': hashlib.md5('a' * 1024 * 1024).hexdigest(),
|
|
|
|
'path': '/%s/%s' % (self.env.container.name, 'seg_a')}]),
|
|
|
|
parms={'multipart-manifest': 'put'})
|
|
|
|
except ResponseError as err:
|
|
|
|
self.assertEqual(400, err.status)
|
|
|
|
else:
|
|
|
|
self.fail("Expected ResponseError but didn't get it")
|
|
|
|
|
2015-04-16 11:42:12 -07:00
|
|
|
def test_slo_overwrite_segment_with_manifest(self):
|
|
|
|
file_item = self.env.container.file("seg_b")
|
Improve SLO PUT error checking
This commit tries to give the user a reason that their SLO manifest
was invalid instead of just saying "Invalid SLO Manifest File". It
doesn't get every error condition, but it's better than before.
Examples of things that now have real error messages include:
* bad keys in manifest (e.g. using "name" instead of "path")
* bogus range (e.g. "bytes=123-taco")
* multiple ranges (e.g. "bytes=10-20,30-40")
* bad JSON structure (i.e. not a list of objects)
* non-integer size_bytes
Also fixed an annoyance with unspecified-size segments that are too
small. Previously, if you uploaded a segment reference with
'{"size_bytes": null, ...}' in it and the referenced segment was less
than 1 MiB, you'd get a response that looked like this:
HTTP/1.1 400 Bad Request
Content-Length: 62
Content-Type: text/html; charset=UTF-8
X-Trans-Id: txd9ee3b25896642098e4d9-0055dd095a
Date: Wed, 26 Aug 2015 00:33:30 GMT
Each segment, except the last, must be at least 1048576 bytes.
This is true, but not particularly helpful, since it doesn't tell you
which of your segments violated the rule.
Now you get something more like this:
HTTP/1.1 400 Bad Request
Content-Length: 49
Content-Type: text/plain
X-Trans-Id: tx586e52580bac4956ad8e2-0055dd09c2
Date: Wed, 26 Aug 2015 00:35:14 GMT
Errors:
/segs/small, Too Small; each segment, except the last...
It's not exactly a tutorial on SLO manifests, but at least it names
the problematic segment.
This also changes the status code for a self-referential manifest from
409 to 400. The rest of the error machinery was using 400, and
special-casing self-reference would be really annoying. Besides, now
that we're showing more than one error message at a time, what would
the right status code be for a manifest with a self-referential
segment *and* a segment with a bad range? 400? 409? 404.5? It's much
more consistent to just say invalid manifest --> 400.
Change-Id: I2275683230b36bc273319254e37c16b9e9b9d69c
2015-09-25 17:26:37 -07:00
|
|
|
with self.assertRaises(ResponseError) as catcher:
|
2015-04-16 11:42:12 -07:00
|
|
|
file_item.write(
|
|
|
|
json.dumps([
|
|
|
|
{'size_bytes': 1024 * 1024,
|
|
|
|
'etag': hashlib.md5('a' * 1024 * 1024).hexdigest(),
|
|
|
|
'path': '/%s/%s' % (self.env.container.name, 'seg_a')},
|
|
|
|
{'size_bytes': 1024 * 1024,
|
|
|
|
'etag': hashlib.md5('b' * 1024 * 1024).hexdigest(),
|
|
|
|
'path': '/%s/%s' % (self.env.container.name, 'seg_b')},
|
|
|
|
{'size_bytes': 1024 * 1024,
|
|
|
|
'etag': hashlib.md5('c' * 1024 * 1024).hexdigest(),
|
|
|
|
'path': '/%s/%s' % (self.env.container.name, 'seg_c')}]),
|
|
|
|
parms={'multipart-manifest': 'put'})
|
Improve SLO PUT error checking
This commit tries to give the user a reason that their SLO manifest
was invalid instead of just saying "Invalid SLO Manifest File". It
doesn't get every error condition, but it's better than before.
Examples of things that now have real error messages include:
* bad keys in manifest (e.g. using "name" instead of "path")
* bogus range (e.g. "bytes=123-taco")
* multiple ranges (e.g. "bytes=10-20,30-40")
* bad JSON structure (i.e. not a list of objects)
* non-integer size_bytes
Also fixed an annoyance with unspecified-size segments that are too
small. Previously, if you uploaded a segment reference with
'{"size_bytes": null, ...}' in it and the referenced segment was less
than 1 MiB, you'd get a response that looked like this:
HTTP/1.1 400 Bad Request
Content-Length: 62
Content-Type: text/html; charset=UTF-8
X-Trans-Id: txd9ee3b25896642098e4d9-0055dd095a
Date: Wed, 26 Aug 2015 00:33:30 GMT
Each segment, except the last, must be at least 1048576 bytes.
This is true, but not particularly helpful, since it doesn't tell you
which of your segments violated the rule.
Now you get something more like this:
HTTP/1.1 400 Bad Request
Content-Length: 49
Content-Type: text/plain
X-Trans-Id: tx586e52580bac4956ad8e2-0055dd09c2
Date: Wed, 26 Aug 2015 00:35:14 GMT
Errors:
/segs/small, Too Small; each segment, except the last...
It's not exactly a tutorial on SLO manifests, but at least it names
the problematic segment.
This also changes the status code for a self-referential manifest from
409 to 400. The rest of the error machinery was using 400, and
special-casing self-reference would be really annoying. Besides, now
that we're showing more than one error message at a time, what would
the right status code be for a manifest with a self-referential
segment *and* a segment with a bad range? 400? 409? 404.5? It's much
more consistent to just say invalid manifest --> 400.
Change-Id: I2275683230b36bc273319254e37c16b9e9b9d69c
2015-09-25 17:26:37 -07:00
|
|
|
self.assertEqual(400, catcher.exception.status)
|
2015-04-16 11:42:12 -07:00
|
|
|
|
2013-11-18 13:17:48 -08:00
|
|
|
def test_slo_copy(self):
|
|
|
|
file_item = self.env.container.file("manifest-abcde")
|
|
|
|
file_item.copy(self.env.container.name, "copied-abcde")
|
|
|
|
|
|
|
|
copied = self.env.container.file("copied-abcde")
|
|
|
|
copied_contents = copied.read(parms={'multipart-manifest': 'get'})
|
|
|
|
self.assertEqual(4 * 1024 * 1024 + 1, len(copied_contents))
|
|
|
|
|
2014-04-30 15:00:49 +03:00
|
|
|
def test_slo_copy_account(self):
|
|
|
|
acct = self.env.conn.account_name
|
|
|
|
# same account copy
|
|
|
|
file_item = self.env.container.file("manifest-abcde")
|
|
|
|
file_item.copy_account(acct, self.env.container.name, "copied-abcde")
|
|
|
|
|
|
|
|
copied = self.env.container.file("copied-abcde")
|
|
|
|
copied_contents = copied.read(parms={'multipart-manifest': 'get'})
|
|
|
|
self.assertEqual(4 * 1024 * 1024 + 1, len(copied_contents))
|
|
|
|
|
|
|
|
# copy to different account
|
|
|
|
acct = self.env.conn2.account_name
|
|
|
|
dest_cont = self.env.account2.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(dest_cont.create(hdrs={
|
2014-04-30 15:00:49 +03:00
|
|
|
'X-Container-Write': self.env.conn.user_acl
|
|
|
|
}))
|
|
|
|
file_item = self.env.container.file("manifest-abcde")
|
|
|
|
file_item.copy_account(acct, dest_cont, "copied-abcde")
|
|
|
|
|
|
|
|
copied = dest_cont.file("copied-abcde")
|
|
|
|
copied_contents = copied.read(parms={'multipart-manifest': 'get'})
|
|
|
|
self.assertEqual(4 * 1024 * 1024 + 1, len(copied_contents))
|
|
|
|
|
2013-11-18 13:17:48 -08:00
|
|
|
def test_slo_copy_the_manifest(self):
|
2016-05-19 19:58:56 +01:00
|
|
|
source = self.env.container.file("manifest-abcde")
|
|
|
|
source_contents = source.read(parms={'multipart-manifest': 'get'})
|
|
|
|
source_json = json.loads(source_contents)
|
|
|
|
source.initialize()
|
|
|
|
self.assertEqual('application/octet-stream', source.content_type)
|
|
|
|
source.initialize(parms={'multipart-manifest': 'get'})
|
|
|
|
source_hash = hashlib.md5()
|
|
|
|
source_hash.update(source_contents)
|
|
|
|
self.assertEqual(source_hash.hexdigest(), source.etag)
|
|
|
|
|
|
|
|
self.assertTrue(source.copy(self.env.container.name,
|
|
|
|
"copied-abcde-manifest-only",
|
|
|
|
parms={'multipart-manifest': 'get'}))
|
2013-11-18 13:17:48 -08:00
|
|
|
|
Move all SLO functionality to middleware
This way, with zero additional effort, SLO will support enhancements
to object storage and retrieval, such as:
* automatic resume of GETs on broken connection (today)
* storage policies (in the near future)
* erasure-coded object segments (in the far future)
This also lets SLOs work with other sorts of hypothetical third-party
middleware, for example object compression or encryption.
Getting COPY to work here is sort of a hack; the proxy's object
controller now checks for "swift.copy_response_hook" in the request's
environment and feeds the GET response (the source of the new object's
data) through it. This lets a COPY of a SLO manifest actually combine
the segments instead of merely copying the manifest document.
Updated ObjectController to expect a response's app_iter to be an
iterable, not just an iterator. (PEP 333 says "When called by the
server, the application object must return an iterable yielding zero
or more strings." ObjectController was just being too strict.) This
way, SLO can re-use the same response-generation logic for GET and
COPY requests.
Added a (sort of hokey) mechanism to allow middlewares to close
incompletely-consumed app iterators without triggering a warning. SLO
does this when it realizes it's performed a ranged GET on a manifest;
it closes the iterable, removes the range, and retries the
request. Without this change, the proxy logs would get 'Client
disconnected on read' in them.
DocImpact
blueprint multi-ring-large-objects
Change-Id: Ic11662eb5c7176fbf422a6fc87a569928d6f85a1
2013-11-13 12:06:55 -08:00
|
|
|
copied = self.env.container.file("copied-abcde-manifest-only")
|
2013-11-18 13:17:48 -08:00
|
|
|
copied_contents = copied.read(parms={'multipart-manifest': 'get'})
|
|
|
|
try:
|
2016-05-19 19:58:56 +01:00
|
|
|
copied_json = json.loads(copied_contents)
|
2013-11-18 13:17:48 -08:00
|
|
|
except ValueError:
|
|
|
|
self.fail("COPY didn't copy the manifest (invalid json on GET)")
|
2016-05-19 19:58:56 +01:00
|
|
|
self.assertEqual(source_json, copied_json)
|
|
|
|
copied.initialize()
|
|
|
|
self.assertEqual('application/octet-stream', copied.content_type)
|
|
|
|
copied.initialize(parms={'multipart-manifest': 'get'})
|
|
|
|
copied_hash = hashlib.md5()
|
|
|
|
copied_hash.update(copied_contents)
|
|
|
|
self.assertEqual(copied_hash.hexdigest(), copied.etag)
|
|
|
|
|
|
|
|
# verify the listing metadata
|
|
|
|
listing = self.env.container.files(parms={'format': 'json'})
|
|
|
|
names = {}
|
|
|
|
for f_dict in listing:
|
|
|
|
if f_dict['name'] in ('manifest-abcde',
|
|
|
|
'copied-abcde-manifest-only'):
|
|
|
|
names[f_dict['name']] = f_dict
|
|
|
|
|
|
|
|
self.assertIn('manifest-abcde', names)
|
|
|
|
actual = names['manifest-abcde']
|
|
|
|
self.assertEqual(4 * 1024 * 1024 + 1, actual['bytes'])
|
|
|
|
self.assertEqual('application/octet-stream', actual['content_type'])
|
|
|
|
self.assertEqual(source.etag, actual['hash'])
|
|
|
|
|
|
|
|
self.assertIn('copied-abcde-manifest-only', names)
|
|
|
|
actual = names['copied-abcde-manifest-only']
|
|
|
|
self.assertEqual(4 * 1024 * 1024 + 1, actual['bytes'])
|
|
|
|
self.assertEqual('application/octet-stream', actual['content_type'])
|
|
|
|
self.assertEqual(copied.etag, actual['hash'])
|
|
|
|
|
|
|
|
def test_slo_copy_the_manifest_updating_metadata(self):
|
|
|
|
source = self.env.container.file("manifest-abcde")
|
|
|
|
source.content_type = 'application/octet-stream'
|
|
|
|
source.sync_metadata({'test': 'original'})
|
|
|
|
source_contents = source.read(parms={'multipart-manifest': 'get'})
|
|
|
|
source_json = json.loads(source_contents)
|
|
|
|
source.initialize()
|
|
|
|
self.assertEqual('application/octet-stream', source.content_type)
|
|
|
|
source.initialize(parms={'multipart-manifest': 'get'})
|
|
|
|
source_hash = hashlib.md5()
|
|
|
|
source_hash.update(source_contents)
|
|
|
|
self.assertEqual(source_hash.hexdigest(), source.etag)
|
|
|
|
self.assertEqual(source.metadata['test'], 'original')
|
|
|
|
|
|
|
|
self.assertTrue(
|
|
|
|
source.copy(self.env.container.name, "copied-abcde-manifest-only",
|
|
|
|
parms={'multipart-manifest': 'get'},
|
|
|
|
hdrs={'Content-Type': 'image/jpeg',
|
|
|
|
'X-Object-Meta-Test': 'updated'}))
|
|
|
|
|
|
|
|
copied = self.env.container.file("copied-abcde-manifest-only")
|
|
|
|
copied_contents = copied.read(parms={'multipart-manifest': 'get'})
|
|
|
|
try:
|
|
|
|
copied_json = json.loads(copied_contents)
|
|
|
|
except ValueError:
|
|
|
|
self.fail("COPY didn't copy the manifest (invalid json on GET)")
|
|
|
|
self.assertEqual(source_json, copied_json)
|
|
|
|
copied.initialize()
|
|
|
|
self.assertEqual('image/jpeg', copied.content_type)
|
|
|
|
copied.initialize(parms={'multipart-manifest': 'get'})
|
|
|
|
copied_hash = hashlib.md5()
|
|
|
|
copied_hash.update(copied_contents)
|
|
|
|
self.assertEqual(copied_hash.hexdigest(), copied.etag)
|
|
|
|
self.assertEqual(copied.metadata['test'], 'updated')
|
|
|
|
|
|
|
|
# verify the listing metadata
|
|
|
|
listing = self.env.container.files(parms={'format': 'json'})
|
|
|
|
names = {}
|
|
|
|
for f_dict in listing:
|
|
|
|
if f_dict['name'] in ('manifest-abcde',
|
|
|
|
'copied-abcde-manifest-only'):
|
|
|
|
names[f_dict['name']] = f_dict
|
|
|
|
|
|
|
|
self.assertIn('manifest-abcde', names)
|
|
|
|
actual = names['manifest-abcde']
|
|
|
|
self.assertEqual(4 * 1024 * 1024 + 1, actual['bytes'])
|
|
|
|
self.assertEqual('application/octet-stream', actual['content_type'])
|
|
|
|
# the container listing should have the etag of the manifest contents
|
|
|
|
self.assertEqual(source.etag, actual['hash'])
|
|
|
|
|
|
|
|
self.assertIn('copied-abcde-manifest-only', names)
|
|
|
|
actual = names['copied-abcde-manifest-only']
|
|
|
|
self.assertEqual(4 * 1024 * 1024 + 1, actual['bytes'])
|
|
|
|
self.assertEqual('image/jpeg', actual['content_type'])
|
|
|
|
self.assertEqual(copied.etag, actual['hash'])
|
2013-11-18 13:17:48 -08:00
|
|
|
|
2014-04-30 15:00:49 +03:00
|
|
|
def test_slo_copy_the_manifest_account(self):
|
|
|
|
acct = self.env.conn.account_name
|
|
|
|
# same account
|
|
|
|
file_item = self.env.container.file("manifest-abcde")
|
|
|
|
file_item.copy_account(acct,
|
|
|
|
self.env.container.name,
|
|
|
|
"copied-abcde-manifest-only",
|
|
|
|
parms={'multipart-manifest': 'get'})
|
|
|
|
|
|
|
|
copied = self.env.container.file("copied-abcde-manifest-only")
|
|
|
|
copied_contents = copied.read(parms={'multipart-manifest': 'get'})
|
|
|
|
try:
|
|
|
|
json.loads(copied_contents)
|
|
|
|
except ValueError:
|
|
|
|
self.fail("COPY didn't copy the manifest (invalid json on GET)")
|
|
|
|
|
|
|
|
# different account
|
|
|
|
acct = self.env.conn2.account_name
|
|
|
|
dest_cont = self.env.account2.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(dest_cont.create(hdrs={
|
2014-04-30 15:00:49 +03:00
|
|
|
'X-Container-Write': self.env.conn.user_acl
|
|
|
|
}))
|
2015-02-18 11:59:31 +05:30
|
|
|
|
|
|
|
# manifest copy will fail because there is no read access to segments
|
|
|
|
# in destination account
|
|
|
|
file_item.copy_account(
|
|
|
|
acct, dest_cont, "copied-abcde-manifest-only",
|
|
|
|
parms={'multipart-manifest': 'get'})
|
|
|
|
self.assertEqual(400, file_item.conn.response.status)
|
|
|
|
resp_body = file_item.conn.response.read()
|
|
|
|
self.assertEqual(5, resp_body.count('403 Forbidden'),
|
|
|
|
'Unexpected response body %r' % resp_body)
|
|
|
|
|
|
|
|
# create segments container in account2 with read access for account1
|
|
|
|
segs_container = self.env.account2.container(self.env.container.name)
|
|
|
|
self.assertTrue(segs_container.create(hdrs={
|
|
|
|
'X-Container-Read': self.env.conn.user_acl
|
|
|
|
}))
|
|
|
|
|
|
|
|
# manifest copy will still fail because there are no segments in
|
|
|
|
# destination account
|
|
|
|
file_item.copy_account(
|
|
|
|
acct, dest_cont, "copied-abcde-manifest-only",
|
|
|
|
parms={'multipart-manifest': 'get'})
|
|
|
|
self.assertEqual(400, file_item.conn.response.status)
|
|
|
|
resp_body = file_item.conn.response.read()
|
|
|
|
self.assertEqual(5, resp_body.count('404 Not Found'),
|
|
|
|
'Unexpected response body %r' % resp_body)
|
|
|
|
|
|
|
|
# create segments in account2 container with same name as in account1,
|
|
|
|
# manifest copy now succeeds
|
|
|
|
self.env.create_segments(segs_container)
|
|
|
|
|
|
|
|
self.assertTrue(file_item.copy_account(
|
|
|
|
acct, dest_cont, "copied-abcde-manifest-only",
|
|
|
|
parms={'multipart-manifest': 'get'}))
|
2014-04-30 15:00:49 +03:00
|
|
|
|
|
|
|
copied = dest_cont.file("copied-abcde-manifest-only")
|
|
|
|
copied_contents = copied.read(parms={'multipart-manifest': 'get'})
|
|
|
|
try:
|
|
|
|
json.loads(copied_contents)
|
|
|
|
except ValueError:
|
|
|
|
self.fail("COPY didn't copy the manifest (invalid json on GET)")
|
|
|
|
|
2015-04-24 02:15:36 -07:00
|
|
|
def _make_manifest(self):
|
|
|
|
file_item = self.env.container.file("manifest-post")
|
|
|
|
seg_info = self.env.seg_info
|
|
|
|
file_item.write(
|
|
|
|
json.dumps([seg_info['seg_a'], seg_info['seg_b'],
|
|
|
|
seg_info['seg_c'], seg_info['seg_d'],
|
|
|
|
seg_info['seg_e']]),
|
|
|
|
parms={'multipart-manifest': 'put'})
|
|
|
|
return file_item
|
|
|
|
|
|
|
|
def test_slo_post_the_manifest_metadata_update(self):
|
|
|
|
file_item = self._make_manifest()
|
|
|
|
# sanity check, check the object is an SLO manifest
|
|
|
|
file_item.info()
|
|
|
|
file_item.header_fields([('slo', 'x-static-large-object')])
|
|
|
|
|
|
|
|
# POST a user metadata (i.e. x-object-meta-post)
|
|
|
|
file_item.sync_metadata({'post': 'update'})
|
|
|
|
|
|
|
|
updated = self.env.container.file("manifest-post")
|
|
|
|
updated.info()
|
|
|
|
updated.header_fields([('user-meta', 'x-object-meta-post')]) # sanity
|
2015-05-13 00:43:59 -07:00
|
|
|
updated.header_fields([('slo', 'x-static-large-object')])
|
2015-04-24 02:15:36 -07:00
|
|
|
updated_contents = updated.read(parms={'multipart-manifest': 'get'})
|
|
|
|
try:
|
|
|
|
json.loads(updated_contents)
|
|
|
|
except ValueError:
|
|
|
|
self.fail("Unexpected content on GET, expected a json body")
|
|
|
|
|
|
|
|
def test_slo_post_the_manifest_metadata_update_with_qs(self):
|
|
|
|
# multipart-manifest query should be ignored on post
|
|
|
|
for verb in ('put', 'get', 'delete'):
|
|
|
|
file_item = self._make_manifest()
|
|
|
|
# sanity check, check the object is an SLO manifest
|
|
|
|
file_item.info()
|
|
|
|
file_item.header_fields([('slo', 'x-static-large-object')])
|
|
|
|
# POST a user metadata (i.e. x-object-meta-post)
|
|
|
|
file_item.sync_metadata(metadata={'post': 'update'},
|
|
|
|
parms={'multipart-manifest': verb})
|
|
|
|
updated = self.env.container.file("manifest-post")
|
|
|
|
updated.info()
|
|
|
|
updated.header_fields(
|
|
|
|
[('user-meta', 'x-object-meta-post')]) # sanity
|
2015-05-13 00:43:59 -07:00
|
|
|
updated.header_fields([('slo', 'x-static-large-object')])
|
2015-04-24 02:15:36 -07:00
|
|
|
updated_contents = updated.read(
|
|
|
|
parms={'multipart-manifest': 'get'})
|
|
|
|
try:
|
|
|
|
json.loads(updated_contents)
|
|
|
|
except ValueError:
|
|
|
|
self.fail(
|
|
|
|
"Unexpected content on GET, expected a json body")
|
|
|
|
|
Move all SLO functionality to middleware
This way, with zero additional effort, SLO will support enhancements
to object storage and retrieval, such as:
* automatic resume of GETs on broken connection (today)
* storage policies (in the near future)
* erasure-coded object segments (in the far future)
This also lets SLOs work with other sorts of hypothetical third-party
middleware, for example object compression or encryption.
Getting COPY to work here is sort of a hack; the proxy's object
controller now checks for "swift.copy_response_hook" in the request's
environment and feeds the GET response (the source of the new object's
data) through it. This lets a COPY of a SLO manifest actually combine
the segments instead of merely copying the manifest document.
Updated ObjectController to expect a response's app_iter to be an
iterable, not just an iterator. (PEP 333 says "When called by the
server, the application object must return an iterable yielding zero
or more strings." ObjectController was just being too strict.) This
way, SLO can re-use the same response-generation logic for GET and
COPY requests.
Added a (sort of hokey) mechanism to allow middlewares to close
incompletely-consumed app iterators without triggering a warning. SLO
does this when it realizes it's performed a ranged GET on a manifest;
it closes the iterable, removes the range, and retries the
request. Without this change, the proxy logs would get 'Client
disconnected on read' in them.
DocImpact
blueprint multi-ring-large-objects
Change-Id: Ic11662eb5c7176fbf422a6fc87a569928d6f85a1
2013-11-13 12:06:55 -08:00
|
|
|
def test_slo_get_the_manifest(self):
|
|
|
|
manifest = self.env.container.file("manifest-abcde")
|
|
|
|
got_body = manifest.read(parms={'multipart-manifest': 'get'})
|
|
|
|
|
|
|
|
self.assertEqual('application/json; charset=utf-8',
|
|
|
|
manifest.content_type)
|
|
|
|
try:
|
|
|
|
json.loads(got_body)
|
|
|
|
except ValueError:
|
|
|
|
self.fail("GET with multipart-manifest=get got invalid json")
|
|
|
|
|
Allow SLO PUTs to forgo per-segment integrity checks
While manifests still require 'etag' and 'size_bytes' fields for each
segment (to catch user errors like 'etaf' or 'size_btyes'), an explicit
null for either will skip that particular integrity check and instead
use whatever value is retrieved when HEADing the segment. So, if a user
uploads a manifest like:
[{"path": "/con/obj_seg_1", "etag": null, "size_bytes": 1048576},
{"path": "/con/obj_seg_2", "etag": "etag2", "size_bytes": null},
{"path": "/con/obj_seg_3", "etag": null, "size_bytes": null}]
then the etag will only be verified for the /con/obj_seg_2 segment,
and the segment size will only be verified for the /con/obj_seg_1
segment. However, the manifest that's ultimately stored (and can be
retrieved with a ?multipart-manifest=get query-string) will still look
like:
[{"name": "/con/obj_seg_1", "hash": "etag1", "bytes": 1048576, ...},
{"name": "/con/obj_seg_2", "hash": "etag2", "bytes": 1048576, ...},
{"name": "/con/obj_seg_3", "hash": "etag3", "bytes": 1234, ...}]
This allows the middleware to continue performing integrity checks on
object GET.
Change-Id: I2c4e585221387dd02a8679a50398d6b614407b12
DocImpact
2015-05-20 00:39:41 -07:00
|
|
|
def test_slo_get_the_manifest_with_details_from_server(self):
|
|
|
|
manifest = self.env.container.file("manifest-db")
|
|
|
|
got_body = manifest.read(parms={'multipart-manifest': 'get'})
|
|
|
|
|
|
|
|
self.assertEqual('application/json; charset=utf-8',
|
|
|
|
manifest.content_type)
|
|
|
|
try:
|
|
|
|
value = json.loads(got_body)
|
|
|
|
except ValueError:
|
|
|
|
self.fail("GET with multipart-manifest=get got invalid json")
|
|
|
|
|
|
|
|
self.assertEqual(len(value), 2)
|
|
|
|
self.assertEqual(value[0]['bytes'], 1024 * 1024)
|
|
|
|
self.assertEqual(value[0]['hash'],
|
|
|
|
hashlib.md5('d' * 1024 * 1024).hexdigest())
|
|
|
|
self.assertEqual(value[0]['name'],
|
|
|
|
'/%s/seg_d' % self.env.container.name.decode("utf-8"))
|
|
|
|
|
|
|
|
self.assertEqual(value[1]['bytes'], 1024 * 1024)
|
|
|
|
self.assertEqual(value[1]['hash'],
|
|
|
|
hashlib.md5('b' * 1024 * 1024).hexdigest())
|
|
|
|
self.assertEqual(value[1]['name'],
|
|
|
|
'/%s/seg_b' % self.env.container.name.decode("utf-8"))
|
|
|
|
|
2016-01-05 14:40:50 -06:00
|
|
|
def test_slo_get_raw_the_manifest_with_details_from_server(self):
|
|
|
|
manifest = self.env.container.file("manifest-db")
|
|
|
|
got_body = manifest.read(parms={'multipart-manifest': 'get',
|
|
|
|
'format': 'raw'})
|
|
|
|
|
2016-05-19 19:58:56 +01:00
|
|
|
# raw format should have the actual manifest object content-type
|
|
|
|
self.assertEqual('application/octet-stream', manifest.content_type)
|
2016-01-05 14:40:50 -06:00
|
|
|
try:
|
|
|
|
value = json.loads(got_body)
|
|
|
|
except ValueError:
|
|
|
|
msg = "GET with multipart-manifest=get&format=raw got invalid json"
|
|
|
|
self.fail(msg)
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
set(value[0].keys()), set(('size_bytes', 'etag', 'path')))
|
|
|
|
self.assertEqual(len(value), 2)
|
|
|
|
self.assertEqual(value[0]['size_bytes'], 1024 * 1024)
|
|
|
|
self.assertEqual(value[0]['etag'],
|
|
|
|
hashlib.md5('d' * 1024 * 1024).hexdigest())
|
|
|
|
self.assertEqual(value[0]['path'],
|
|
|
|
'/%s/seg_d' % self.env.container.name.decode("utf-8"))
|
|
|
|
self.assertEqual(value[1]['size_bytes'], 1024 * 1024)
|
|
|
|
self.assertEqual(value[1]['etag'],
|
|
|
|
hashlib.md5('b' * 1024 * 1024).hexdigest())
|
|
|
|
self.assertEqual(value[1]['path'],
|
|
|
|
'/%s/seg_b' % self.env.container.name.decode("utf-8"))
|
|
|
|
|
|
|
|
file_item = self.env.container.file("manifest-from-get-raw")
|
|
|
|
file_item.write(got_body, parms={'multipart-manifest': 'put'})
|
|
|
|
|
|
|
|
file_contents = file_item.read()
|
|
|
|
self.assertEqual(2 * 1024 * 1024, len(file_contents))
|
|
|
|
|
Move all SLO functionality to middleware
This way, with zero additional effort, SLO will support enhancements
to object storage and retrieval, such as:
* automatic resume of GETs on broken connection (today)
* storage policies (in the near future)
* erasure-coded object segments (in the far future)
This also lets SLOs work with other sorts of hypothetical third-party
middleware, for example object compression or encryption.
Getting COPY to work here is sort of a hack; the proxy's object
controller now checks for "swift.copy_response_hook" in the request's
environment and feeds the GET response (the source of the new object's
data) through it. This lets a COPY of a SLO manifest actually combine
the segments instead of merely copying the manifest document.
Updated ObjectController to expect a response's app_iter to be an
iterable, not just an iterator. (PEP 333 says "When called by the
server, the application object must return an iterable yielding zero
or more strings." ObjectController was just being too strict.) This
way, SLO can re-use the same response-generation logic for GET and
COPY requests.
Added a (sort of hokey) mechanism to allow middlewares to close
incompletely-consumed app iterators without triggering a warning. SLO
does this when it realizes it's performed a ranged GET on a manifest;
it closes the iterable, removes the range, and retries the
request. Without this change, the proxy logs would get 'Client
disconnected on read' in them.
DocImpact
blueprint multi-ring-large-objects
Change-Id: Ic11662eb5c7176fbf422a6fc87a569928d6f85a1
2013-11-13 12:06:55 -08:00
|
|
|
def test_slo_head_the_manifest(self):
|
|
|
|
manifest = self.env.container.file("manifest-abcde")
|
|
|
|
got_info = manifest.info(parms={'multipart-manifest': 'get'})
|
|
|
|
|
|
|
|
self.assertEqual('application/json; charset=utf-8',
|
|
|
|
got_info['content_type'])
|
|
|
|
|
2014-02-20 22:01:39 -08:00
|
|
|
def test_slo_if_match_get(self):
|
|
|
|
manifest = self.env.container.file("manifest-abcde")
|
|
|
|
etag = manifest.info()['etag']
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, manifest.read,
|
|
|
|
hdrs={'If-Match': 'not-%s' % etag})
|
|
|
|
self.assert_status(412)
|
|
|
|
|
|
|
|
manifest.read(hdrs={'If-Match': etag})
|
|
|
|
self.assert_status(200)
|
|
|
|
|
2016-04-13 11:07:44 -07:00
|
|
|
def test_slo_if_none_match_put(self):
|
|
|
|
file_item = self.env.container.file("manifest-if-none-match")
|
|
|
|
manifest = json.dumps([{
|
|
|
|
'size_bytes': 1024 * 1024,
|
|
|
|
'etag': None,
|
|
|
|
'path': '/%s/%s' % (self.env.container.name, 'seg_a')}])
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, file_item.write, manifest,
|
|
|
|
parms={'multipart-manifest': 'put'},
|
|
|
|
hdrs={'If-None-Match': '"not-star"'})
|
|
|
|
self.assert_status(400)
|
|
|
|
|
|
|
|
file_item.write(manifest, parms={'multipart-manifest': 'put'},
|
|
|
|
hdrs={'If-None-Match': '*'})
|
|
|
|
self.assert_status(201)
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, file_item.write, manifest,
|
|
|
|
parms={'multipart-manifest': 'put'},
|
|
|
|
hdrs={'If-None-Match': '*'})
|
|
|
|
self.assert_status(412)
|
|
|
|
|
2014-02-20 22:01:39 -08:00
|
|
|
def test_slo_if_none_match_get(self):
|
|
|
|
manifest = self.env.container.file("manifest-abcde")
|
|
|
|
etag = manifest.info()['etag']
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, manifest.read,
|
|
|
|
hdrs={'If-None-Match': etag})
|
|
|
|
self.assert_status(304)
|
|
|
|
|
|
|
|
manifest.read(hdrs={'If-None-Match': "not-%s" % etag})
|
|
|
|
self.assert_status(200)
|
|
|
|
|
|
|
|
def test_slo_if_match_head(self):
|
|
|
|
manifest = self.env.container.file("manifest-abcde")
|
|
|
|
etag = manifest.info()['etag']
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, manifest.info,
|
|
|
|
hdrs={'If-Match': 'not-%s' % etag})
|
|
|
|
self.assert_status(412)
|
|
|
|
|
|
|
|
manifest.info(hdrs={'If-Match': etag})
|
|
|
|
self.assert_status(200)
|
|
|
|
|
|
|
|
def test_slo_if_none_match_head(self):
|
|
|
|
manifest = self.env.container.file("manifest-abcde")
|
|
|
|
etag = manifest.info()['etag']
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, manifest.info,
|
|
|
|
hdrs={'If-None-Match': etag})
|
|
|
|
self.assert_status(304)
|
|
|
|
|
|
|
|
manifest.info(hdrs={'If-None-Match': "not-%s" % etag})
|
|
|
|
self.assert_status(200)
|
|
|
|
|
2015-12-16 17:19:24 +11:00
|
|
|
def test_slo_referer_on_segment_container(self):
|
|
|
|
# First the account2 (test3) should fail
|
|
|
|
headers = {'X-Auth-Token': self.env.conn3.storage_token,
|
|
|
|
'Referer': 'http://blah.example.com'}
|
|
|
|
slo_file = self.env.container2.file('manifest-abcde')
|
|
|
|
self.assertRaises(ResponseError, slo_file.read,
|
|
|
|
hdrs=headers)
|
|
|
|
self.assert_status(403)
|
|
|
|
|
|
|
|
# Now set the referer on the slo container only
|
|
|
|
referer_metadata = {'X-Container-Read': '.r:*.example.com,.rlistings'}
|
|
|
|
self.env.container2.update_metadata(referer_metadata)
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, slo_file.read,
|
|
|
|
hdrs=headers)
|
|
|
|
self.assert_status(409)
|
|
|
|
|
|
|
|
# Finally set the referer on the segment container
|
|
|
|
self.env.container.update_metadata(referer_metadata)
|
|
|
|
contents = slo_file.read(hdrs=headers)
|
|
|
|
self.assertEqual(4 * 1024 * 1024 + 1, len(contents))
|
|
|
|
self.assertEqual('a', contents[0])
|
|
|
|
self.assertEqual('a', contents[1024 * 1024 - 1])
|
|
|
|
self.assertEqual('b', contents[1024 * 1024])
|
|
|
|
self.assertEqual('d', contents[-2])
|
|
|
|
self.assertEqual('e', contents[-1])
|
|
|
|
|
2013-11-18 13:17:48 -08:00
|
|
|
|
|
|
|
class TestSloUTF8(Base2, TestSlo):
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
|
2013-12-18 10:43:29 -08:00
|
|
|
class TestObjectVersioningEnv(object):
|
|
|
|
versioning_enabled = None # tri-state: None initially, then True/False
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.conn = Connection(tf.config)
|
2014-11-09 13:13:27 -05:00
|
|
|
cls.storage_url, cls.storage_token = cls.conn.authenticate()
|
2013-12-18 10:43:29 -08:00
|
|
|
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.account = Account(cls.conn, tf.config.get('account',
|
|
|
|
tf.config['username']))
|
2013-12-18 10:43:29 -08:00
|
|
|
|
2015-03-20 10:17:25 +00:00
|
|
|
# Second connection for ACL tests
|
|
|
|
config2 = deepcopy(tf.config)
|
|
|
|
config2['account'] = tf.config['account2']
|
|
|
|
config2['username'] = tf.config['username2']
|
|
|
|
config2['password'] = tf.config['password2']
|
|
|
|
cls.conn2 = Connection(config2)
|
|
|
|
cls.conn2.authenticate()
|
|
|
|
|
2013-12-18 10:43:29 -08:00
|
|
|
# avoid getting a prefix that stops halfway through an encoded
|
|
|
|
# character
|
|
|
|
prefix = Utils.create_name().decode("utf-8")[:10].encode("utf-8")
|
|
|
|
|
|
|
|
cls.versions_container = cls.account.container(prefix + "-versions")
|
|
|
|
if not cls.versions_container.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
cls.container = cls.account.container(prefix + "-objs")
|
|
|
|
if not cls.container.create(
|
|
|
|
hdrs={'X-Versions-Location': cls.versions_container.name}):
|
2016-01-07 15:46:17 -08:00
|
|
|
if cls.conn.response.status == 412:
|
|
|
|
cls.versioning_enabled = False
|
|
|
|
return
|
2013-12-18 10:43:29 -08:00
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
container_info = cls.container.info()
|
|
|
|
# if versioning is off, then X-Versions-Location won't persist
|
|
|
|
cls.versioning_enabled = 'versions' in container_info
|
|
|
|
|
2014-11-09 13:13:27 -05:00
|
|
|
# setup another account to test ACLs
|
|
|
|
config2 = deepcopy(tf.config)
|
|
|
|
config2['account'] = tf.config['account2']
|
|
|
|
config2['username'] = tf.config['username2']
|
|
|
|
config2['password'] = tf.config['password2']
|
|
|
|
cls.conn2 = Connection(config2)
|
|
|
|
cls.storage_url2, cls.storage_token2 = cls.conn2.authenticate()
|
|
|
|
cls.account2 = cls.conn2.get_account()
|
|
|
|
cls.account2.delete_containers()
|
|
|
|
|
|
|
|
# setup another account with no access to anything to test ACLs
|
|
|
|
config3 = deepcopy(tf.config)
|
|
|
|
config3['account'] = tf.config['account']
|
|
|
|
config3['username'] = tf.config['username3']
|
|
|
|
config3['password'] = tf.config['password3']
|
|
|
|
cls.conn3 = Connection(config3)
|
|
|
|
cls.storage_url3, cls.storage_token3 = cls.conn3.authenticate()
|
|
|
|
cls.account3 = cls.conn3.get_account()
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def tearDown(cls):
|
|
|
|
cls.account.delete_containers()
|
|
|
|
cls.account2.delete_containers()
|
|
|
|
|
2013-12-18 10:43:29 -08:00
|
|
|
|
2014-04-09 19:15:04 +08:00
|
|
|
class TestCrossPolicyObjectVersioningEnv(object):
|
|
|
|
# tri-state: None initially, then True/False
|
|
|
|
versioning_enabled = None
|
|
|
|
multiple_policies_enabled = None
|
|
|
|
policies = None
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
|
|
|
cls.conn = Connection(tf.config)
|
|
|
|
cls.conn.authenticate()
|
|
|
|
|
|
|
|
if cls.multiple_policies_enabled is None:
|
|
|
|
try:
|
|
|
|
cls.policies = tf.FunctionalStoragePolicyCollection.from_info()
|
|
|
|
except AssertionError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
if cls.policies and len(cls.policies) > 1:
|
|
|
|
cls.multiple_policies_enabled = True
|
|
|
|
else:
|
|
|
|
cls.multiple_policies_enabled = False
|
2016-01-07 15:46:17 -08:00
|
|
|
cls.versioning_enabled = True
|
|
|
|
# We don't actually know the state of versioning, but without
|
|
|
|
# multiple policies the tests should be skipped anyway. Claiming
|
|
|
|
# versioning support lets us report the right reason for skipping.
|
2014-04-09 19:15:04 +08:00
|
|
|
return
|
|
|
|
|
|
|
|
policy = cls.policies.select()
|
|
|
|
version_policy = cls.policies.exclude(name=policy['name']).select()
|
|
|
|
|
|
|
|
cls.account = Account(cls.conn, tf.config.get('account',
|
|
|
|
tf.config['username']))
|
|
|
|
|
2015-03-20 10:17:25 +00:00
|
|
|
# Second connection for ACL tests
|
|
|
|
config2 = deepcopy(tf.config)
|
|
|
|
config2['account'] = tf.config['account2']
|
|
|
|
config2['username'] = tf.config['username2']
|
|
|
|
config2['password'] = tf.config['password2']
|
|
|
|
cls.conn2 = Connection(config2)
|
|
|
|
cls.conn2.authenticate()
|
|
|
|
|
2014-04-09 19:15:04 +08:00
|
|
|
# avoid getting a prefix that stops halfway through an encoded
|
|
|
|
# character
|
|
|
|
prefix = Utils.create_name().decode("utf-8")[:10].encode("utf-8")
|
|
|
|
|
|
|
|
cls.versions_container = cls.account.container(prefix + "-versions")
|
|
|
|
if not cls.versions_container.create(
|
2014-06-23 12:52:50 -07:00
|
|
|
{'X-Storage-Policy': policy['name']}):
|
2014-04-09 19:15:04 +08:00
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
cls.container = cls.account.container(prefix + "-objs")
|
|
|
|
if not cls.container.create(
|
|
|
|
hdrs={'X-Versions-Location': cls.versions_container.name,
|
2014-06-23 12:52:50 -07:00
|
|
|
'X-Storage-Policy': version_policy['name']}):
|
2016-01-07 15:46:17 -08:00
|
|
|
if cls.conn.response.status == 412:
|
|
|
|
cls.versioning_enabled = False
|
|
|
|
return
|
2014-04-09 19:15:04 +08:00
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
container_info = cls.container.info()
|
|
|
|
# if versioning is off, then X-Versions-Location won't persist
|
|
|
|
cls.versioning_enabled = 'versions' in container_info
|
|
|
|
|
2014-11-09 13:13:27 -05:00
|
|
|
# setup another account to test ACLs
|
|
|
|
config2 = deepcopy(tf.config)
|
|
|
|
config2['account'] = tf.config['account2']
|
|
|
|
config2['username'] = tf.config['username2']
|
|
|
|
config2['password'] = tf.config['password2']
|
|
|
|
cls.conn2 = Connection(config2)
|
|
|
|
cls.storage_url2, cls.storage_token2 = cls.conn2.authenticate()
|
|
|
|
cls.account2 = cls.conn2.get_account()
|
|
|
|
cls.account2.delete_containers()
|
|
|
|
|
|
|
|
# setup another account with no access to anything to test ACLs
|
|
|
|
config3 = deepcopy(tf.config)
|
|
|
|
config3['account'] = tf.config['account']
|
|
|
|
config3['username'] = tf.config['username3']
|
|
|
|
config3['password'] = tf.config['password3']
|
|
|
|
cls.conn3 = Connection(config3)
|
|
|
|
cls.storage_url3, cls.storage_token3 = cls.conn3.authenticate()
|
|
|
|
cls.account3 = cls.conn3.get_account()
|
|
|
|
|
2016-01-07 15:46:17 -08:00
|
|
|
@classmethod
|
|
|
|
def tearDown(cls):
|
|
|
|
cls.account.delete_containers()
|
|
|
|
cls.account2.delete_containers()
|
|
|
|
|
2014-04-09 19:15:04 +08:00
|
|
|
|
2013-12-18 10:43:29 -08:00
|
|
|
class TestObjectVersioning(Base):
|
|
|
|
env = TestObjectVersioningEnv
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
super(TestObjectVersioning, self).setUp()
|
|
|
|
if self.env.versioning_enabled is False:
|
|
|
|
raise SkipTest("Object versioning not enabled")
|
|
|
|
elif self.env.versioning_enabled is not True:
|
|
|
|
# just some sanity checking
|
|
|
|
raise Exception(
|
|
|
|
"Expected versioning_enabled to be True/False, got %r" %
|
|
|
|
(self.env.versioning_enabled,))
|
|
|
|
|
2016-02-08 15:31:42 +00:00
|
|
|
def _tear_down_files(self):
|
2015-03-20 10:17:25 +00:00
|
|
|
try:
|
2016-02-08 15:31:42 +00:00
|
|
|
# only delete files and not containers
|
2014-11-09 13:13:27 -05:00
|
|
|
# as they were configured in self.env
|
2015-03-20 10:17:25 +00:00
|
|
|
self.env.versions_container.delete_files()
|
|
|
|
self.env.container.delete_files()
|
|
|
|
except ResponseError:
|
|
|
|
pass
|
|
|
|
|
2016-02-08 15:31:42 +00:00
|
|
|
def tearDown(self):
|
|
|
|
super(TestObjectVersioning, self).tearDown()
|
|
|
|
self._tear_down_files()
|
|
|
|
|
2014-11-09 13:13:27 -05:00
|
|
|
def test_clear_version_option(self):
|
|
|
|
# sanity
|
|
|
|
self.assertEqual(self.env.container.info()['versions'],
|
|
|
|
self.env.versions_container.name)
|
|
|
|
self.env.container.update_metadata(
|
|
|
|
hdrs={'X-Versions-Location': ''})
|
|
|
|
self.assertEqual(self.env.container.info().get('versions'), None)
|
|
|
|
|
|
|
|
# set location back to the way it was
|
|
|
|
self.env.container.update_metadata(
|
|
|
|
hdrs={'X-Versions-Location': self.env.versions_container.name})
|
|
|
|
self.assertEqual(self.env.container.info()['versions'],
|
|
|
|
self.env.versions_container.name)
|
|
|
|
|
2013-12-18 10:43:29 -08:00
|
|
|
def test_overwriting(self):
|
|
|
|
container = self.env.container
|
|
|
|
versions_container = self.env.versions_container
|
2014-11-09 13:13:27 -05:00
|
|
|
cont_info = container.info()
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(cont_info['versions'], versions_container.name)
|
2014-11-09 13:13:27 -05:00
|
|
|
|
2013-12-18 10:43:29 -08:00
|
|
|
obj_name = Utils.create_name()
|
|
|
|
|
|
|
|
versioned_obj = container.file(obj_name)
|
2015-12-21 16:25:45 -02:00
|
|
|
put_headers = {'Content-Type': 'text/jibberish01',
|
|
|
|
'Content-Encoding': 'gzip',
|
|
|
|
'Content-Disposition': 'attachment; filename=myfile'}
|
|
|
|
versioned_obj.write("aaaaa", hdrs=put_headers)
|
2014-11-09 13:13:27 -05:00
|
|
|
obj_info = versioned_obj.info()
|
|
|
|
self.assertEqual('text/jibberish01', obj_info['content_type'])
|
2013-12-18 10:43:29 -08:00
|
|
|
|
2015-12-21 16:25:45 -02:00
|
|
|
# the allowed headers are configurable in object server, so we cannot
|
|
|
|
# assert that content-encoding or content-disposition get *copied* to
|
|
|
|
# the object version unless they were set on the original PUT, so
|
|
|
|
# populate expected_headers by making a HEAD on the original object
|
|
|
|
resp_headers = dict(versioned_obj.conn.response.getheaders())
|
|
|
|
expected_headers = {}
|
|
|
|
for k, v in put_headers.items():
|
|
|
|
if k.lower() in resp_headers:
|
|
|
|
expected_headers[k] = v
|
|
|
|
|
2013-12-18 10:43:29 -08:00
|
|
|
self.assertEqual(0, versions_container.info()['object_count'])
|
2014-11-09 13:13:27 -05:00
|
|
|
versioned_obj.write("bbbbb", hdrs={'Content-Type': 'text/jibberish02',
|
|
|
|
'X-Object-Meta-Foo': 'Bar'})
|
|
|
|
versioned_obj.initialize()
|
|
|
|
self.assertEqual(versioned_obj.content_type, 'text/jibberish02')
|
|
|
|
self.assertEqual(versioned_obj.metadata['foo'], 'Bar')
|
2013-12-18 10:43:29 -08:00
|
|
|
|
|
|
|
# the old version got saved off
|
|
|
|
self.assertEqual(1, versions_container.info()['object_count'])
|
|
|
|
versioned_obj_name = versions_container.files()[0]
|
2014-11-09 13:13:27 -05:00
|
|
|
prev_version = versions_container.file(versioned_obj_name)
|
|
|
|
prev_version.initialize()
|
|
|
|
self.assertEqual("aaaaa", prev_version.read())
|
|
|
|
self.assertEqual(prev_version.content_type, 'text/jibberish01')
|
|
|
|
|
2015-12-21 16:25:45 -02:00
|
|
|
resp_headers = dict(prev_version.conn.response.getheaders())
|
|
|
|
for k, v in expected_headers.items():
|
|
|
|
self.assertIn(k.lower(), resp_headers)
|
|
|
|
self.assertEqual(v, resp_headers[k.lower()])
|
|
|
|
|
2014-11-09 13:13:27 -05:00
|
|
|
# make sure the new obj metadata did not leak to the prev. version
|
|
|
|
self.assertTrue('foo' not in prev_version.metadata)
|
|
|
|
|
|
|
|
# check that POST does not create a new version
|
|
|
|
versioned_obj.sync_metadata(metadata={'fu': 'baz'})
|
|
|
|
self.assertEqual(1, versions_container.info()['object_count'])
|
2013-12-18 10:43:29 -08:00
|
|
|
|
|
|
|
# if we overwrite it again, there are two versions
|
|
|
|
versioned_obj.write("ccccc")
|
|
|
|
self.assertEqual(2, versions_container.info()['object_count'])
|
2014-11-09 13:13:27 -05:00
|
|
|
versioned_obj_name = versions_container.files()[1]
|
|
|
|
prev_version = versions_container.file(versioned_obj_name)
|
|
|
|
prev_version.initialize()
|
|
|
|
self.assertEqual("bbbbb", prev_version.read())
|
|
|
|
self.assertEqual(prev_version.content_type, 'text/jibberish02')
|
|
|
|
self.assertTrue('foo' in prev_version.metadata)
|
|
|
|
self.assertTrue('fu' in prev_version.metadata)
|
2013-12-18 10:43:29 -08:00
|
|
|
|
|
|
|
# as we delete things, the old contents return
|
|
|
|
self.assertEqual("ccccc", versioned_obj.read())
|
2014-11-09 13:13:27 -05:00
|
|
|
|
|
|
|
# test copy from a different container
|
|
|
|
src_container = self.env.account.container(Utils.create_name())
|
|
|
|
self.assertTrue(src_container.create())
|
|
|
|
src_name = Utils.create_name()
|
|
|
|
src_obj = src_container.file(src_name)
|
|
|
|
src_obj.write("ddddd", hdrs={'Content-Type': 'text/jibberish04'})
|
|
|
|
src_obj.copy(container.name, obj_name)
|
|
|
|
|
|
|
|
self.assertEqual("ddddd", versioned_obj.read())
|
|
|
|
versioned_obj.initialize()
|
|
|
|
self.assertEqual(versioned_obj.content_type, 'text/jibberish04')
|
|
|
|
|
|
|
|
# make sure versions container has the previous version
|
|
|
|
self.assertEqual(3, versions_container.info()['object_count'])
|
|
|
|
versioned_obj_name = versions_container.files()[2]
|
|
|
|
prev_version = versions_container.file(versioned_obj_name)
|
|
|
|
prev_version.initialize()
|
|
|
|
self.assertEqual("ccccc", prev_version.read())
|
|
|
|
|
|
|
|
# test delete
|
|
|
|
versioned_obj.delete()
|
|
|
|
self.assertEqual("ccccc", versioned_obj.read())
|
2013-12-18 10:43:29 -08:00
|
|
|
versioned_obj.delete()
|
|
|
|
self.assertEqual("bbbbb", versioned_obj.read())
|
|
|
|
versioned_obj.delete()
|
|
|
|
self.assertEqual("aaaaa", versioned_obj.read())
|
2014-11-09 13:13:27 -05:00
|
|
|
self.assertEqual(0, versions_container.info()['object_count'])
|
2015-12-21 16:25:45 -02:00
|
|
|
|
|
|
|
# verify that all the original object headers have been copied back
|
|
|
|
obj_info = versioned_obj.info()
|
|
|
|
self.assertEqual('text/jibberish01', obj_info['content_type'])
|
|
|
|
resp_headers = dict(versioned_obj.conn.response.getheaders())
|
|
|
|
for k, v in expected_headers.items():
|
|
|
|
self.assertIn(k.lower(), resp_headers)
|
|
|
|
self.assertEqual(v, resp_headers[k.lower()])
|
|
|
|
|
2013-12-18 10:43:29 -08:00
|
|
|
versioned_obj.delete()
|
|
|
|
self.assertRaises(ResponseError, versioned_obj.read)
|
|
|
|
|
2014-12-08 21:29:30 -05:00
|
|
|
def test_versioning_dlo(self):
|
|
|
|
container = self.env.container
|
|
|
|
versions_container = self.env.versions_container
|
|
|
|
obj_name = Utils.create_name()
|
|
|
|
|
|
|
|
for i in ('1', '2', '3'):
|
|
|
|
time.sleep(.01) # guarantee that the timestamp changes
|
|
|
|
obj_name_seg = obj_name + '/' + i
|
|
|
|
versioned_obj = container.file(obj_name_seg)
|
|
|
|
versioned_obj.write(i)
|
|
|
|
versioned_obj.write(i + i)
|
|
|
|
|
|
|
|
self.assertEqual(3, versions_container.info()['object_count'])
|
|
|
|
|
|
|
|
man_file = container.file(obj_name)
|
|
|
|
man_file.write('', hdrs={"X-Object-Manifest": "%s/%s/" %
|
|
|
|
(self.env.container.name, obj_name)})
|
|
|
|
|
|
|
|
# guarantee that the timestamp changes
|
|
|
|
time.sleep(.01)
|
|
|
|
|
|
|
|
# write manifest file again
|
|
|
|
man_file.write('', hdrs={"X-Object-Manifest": "%s/%s/" %
|
|
|
|
(self.env.container.name, obj_name)})
|
|
|
|
|
|
|
|
self.assertEqual(3, versions_container.info()['object_count'])
|
|
|
|
self.assertEqual("112233", man_file.read())
|
|
|
|
|
2014-11-09 13:13:27 -05:00
|
|
|
def test_versioning_container_acl(self):
|
|
|
|
# create versions container and DO NOT give write access to account2
|
|
|
|
versions_container = self.env.account.container(Utils.create_name())
|
|
|
|
self.assertTrue(versions_container.create(hdrs={
|
|
|
|
'X-Container-Write': ''
|
|
|
|
}))
|
|
|
|
|
|
|
|
# check account2 cannot write to versions container
|
|
|
|
fail_obj_name = Utils.create_name()
|
|
|
|
fail_obj = versions_container.file(fail_obj_name)
|
|
|
|
self.assertRaises(ResponseError, fail_obj.write, "should fail",
|
|
|
|
cfg={'use_token': self.env.storage_token2})
|
|
|
|
|
|
|
|
# create container and give write access to account2
|
|
|
|
# don't set X-Versions-Location just yet
|
|
|
|
container = self.env.account.container(Utils.create_name())
|
|
|
|
self.assertTrue(container.create(hdrs={
|
|
|
|
'X-Container-Write': self.env.conn2.user_acl}))
|
|
|
|
|
|
|
|
# check account2 cannot set X-Versions-Location on container
|
|
|
|
self.assertRaises(ResponseError, container.update_metadata, hdrs={
|
|
|
|
'X-Versions-Location': versions_container},
|
|
|
|
cfg={'use_token': self.env.storage_token2})
|
|
|
|
|
|
|
|
# good! now let admin set the X-Versions-Location
|
|
|
|
# p.s.: sticking a 'x-remove' header here to test precedence
|
|
|
|
# of both headers. Setting the location should succeed.
|
|
|
|
self.assertTrue(container.update_metadata(hdrs={
|
|
|
|
'X-Remove-Versions-Location': versions_container,
|
|
|
|
'X-Versions-Location': versions_container}))
|
|
|
|
|
|
|
|
# write object twice to container and check version
|
|
|
|
obj_name = Utils.create_name()
|
|
|
|
versioned_obj = container.file(obj_name)
|
|
|
|
self.assertTrue(versioned_obj.write("never argue with the data",
|
|
|
|
cfg={'use_token': self.env.storage_token2}))
|
|
|
|
self.assertEqual(versioned_obj.read(), "never argue with the data")
|
|
|
|
|
|
|
|
self.assertTrue(
|
|
|
|
versioned_obj.write("we don't have no beer, just tequila",
|
|
|
|
cfg={'use_token': self.env.storage_token2}))
|
|
|
|
self.assertEqual(versioned_obj.read(),
|
|
|
|
"we don't have no beer, just tequila")
|
|
|
|
self.assertEqual(1, versions_container.info()['object_count'])
|
|
|
|
|
|
|
|
# read the original uploaded object
|
|
|
|
for filename in versions_container.files():
|
|
|
|
backup_file = versions_container.file(filename)
|
|
|
|
break
|
|
|
|
self.assertEqual(backup_file.read(), "never argue with the data")
|
|
|
|
|
|
|
|
# user3 (some random user with no access to anything)
|
|
|
|
# tries to read from versioned container
|
|
|
|
self.assertRaises(ResponseError, backup_file.read,
|
|
|
|
cfg={'use_token': self.env.storage_token3})
|
|
|
|
|
|
|
|
# user3 cannot write or delete from source container either
|
|
|
|
self.assertRaises(ResponseError, versioned_obj.write,
|
|
|
|
"some random user trying to write data",
|
|
|
|
cfg={'use_token': self.env.storage_token3})
|
|
|
|
self.assertRaises(ResponseError, versioned_obj.delete,
|
|
|
|
cfg={'use_token': self.env.storage_token3})
|
|
|
|
|
|
|
|
# user2 can't read or delete from versions-location
|
|
|
|
self.assertRaises(ResponseError, backup_file.read,
|
|
|
|
cfg={'use_token': self.env.storage_token2})
|
|
|
|
self.assertRaises(ResponseError, backup_file.delete,
|
|
|
|
cfg={'use_token': self.env.storage_token2})
|
|
|
|
|
|
|
|
# but is able to delete from the source container
|
|
|
|
# this could be a helpful scenario for dev ops that want to setup
|
|
|
|
# just one container to hold object versions of multiple containers
|
|
|
|
# and each one of those containers are owned by different users
|
|
|
|
self.assertTrue(versioned_obj.delete(
|
|
|
|
cfg={'use_token': self.env.storage_token2}))
|
|
|
|
|
|
|
|
# tear-down since we create these containers here
|
|
|
|
# and not in self.env
|
|
|
|
versions_container.delete_recursive()
|
|
|
|
container.delete_recursive()
|
|
|
|
|
2015-03-20 10:17:25 +00:00
|
|
|
def test_versioning_check_acl(self):
|
|
|
|
container = self.env.container
|
|
|
|
versions_container = self.env.versions_container
|
|
|
|
versions_container.create(hdrs={'X-Container-Read': '.r:*,.rlistings'})
|
|
|
|
|
|
|
|
obj_name = Utils.create_name()
|
|
|
|
versioned_obj = container.file(obj_name)
|
|
|
|
versioned_obj.write("aaaaa")
|
|
|
|
self.assertEqual("aaaaa", versioned_obj.read())
|
|
|
|
|
|
|
|
versioned_obj.write("bbbbb")
|
|
|
|
self.assertEqual("bbbbb", versioned_obj.read())
|
|
|
|
|
|
|
|
# Use token from second account and try to delete the object
|
|
|
|
org_token = self.env.account.conn.storage_token
|
|
|
|
self.env.account.conn.storage_token = self.env.conn2.storage_token
|
|
|
|
try:
|
|
|
|
self.assertRaises(ResponseError, versioned_obj.delete)
|
|
|
|
finally:
|
|
|
|
self.env.account.conn.storage_token = org_token
|
|
|
|
|
|
|
|
# Verify with token from first account
|
|
|
|
self.assertEqual("bbbbb", versioned_obj.read())
|
|
|
|
|
|
|
|
versioned_obj.delete()
|
|
|
|
self.assertEqual("aaaaa", versioned_obj.read())
|
|
|
|
|
2013-12-18 10:43:29 -08:00
|
|
|
|
|
|
|
class TestObjectVersioningUTF8(Base2, TestObjectVersioning):
|
|
|
|
set_up = False
|
|
|
|
|
2016-02-08 15:31:42 +00:00
|
|
|
def tearDown(self):
|
|
|
|
self._tear_down_files()
|
|
|
|
super(TestObjectVersioningUTF8, self).tearDown()
|
|
|
|
|
2013-12-18 10:43:29 -08:00
|
|
|
|
2014-04-09 19:15:04 +08:00
|
|
|
class TestCrossPolicyObjectVersioning(TestObjectVersioning):
|
|
|
|
env = TestCrossPolicyObjectVersioningEnv
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
super(TestCrossPolicyObjectVersioning, self).setUp()
|
|
|
|
if self.env.multiple_policies_enabled is False:
|
|
|
|
raise SkipTest('Cross policy test requires multiple policies')
|
|
|
|
elif self.env.multiple_policies_enabled is not True:
|
|
|
|
# just some sanity checking
|
|
|
|
raise Exception("Expected multiple_policies_enabled "
|
|
|
|
"to be True/False, got %r" % (
|
|
|
|
self.env.versioning_enabled,))
|
|
|
|
|
|
|
|
|
2015-12-21 16:25:45 -02:00
|
|
|
class TestSloWithVersioning(Base):
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
if 'slo' not in cluster_info:
|
|
|
|
raise SkipTest("SLO not enabled")
|
|
|
|
|
|
|
|
self.conn = Connection(tf.config)
|
|
|
|
self.conn.authenticate()
|
|
|
|
self.account = Account(
|
|
|
|
self.conn, tf.config.get('account', tf.config['username']))
|
|
|
|
self.account.delete_containers()
|
|
|
|
|
|
|
|
# create a container with versioning
|
|
|
|
self.versions_container = self.account.container(Utils.create_name())
|
|
|
|
self.container = self.account.container(Utils.create_name())
|
|
|
|
self.segments_container = self.account.container(Utils.create_name())
|
|
|
|
if not self.container.create(
|
|
|
|
hdrs={'X-Versions-Location': self.versions_container.name}):
|
|
|
|
raise ResponseError(self.conn.response)
|
|
|
|
if 'versions' not in self.container.info():
|
|
|
|
raise SkipTest("Object versioning not enabled")
|
|
|
|
|
|
|
|
for cont in (self.versions_container, self.segments_container):
|
|
|
|
if not cont.create():
|
|
|
|
raise ResponseError(self.conn.response)
|
|
|
|
|
|
|
|
# create some segments
|
|
|
|
self.seg_info = {}
|
|
|
|
for letter, size in (('a', 1024 * 1024),
|
|
|
|
('b', 1024 * 1024)):
|
|
|
|
seg_name = letter
|
|
|
|
file_item = self.segments_container.file(seg_name)
|
|
|
|
file_item.write(letter * size)
|
|
|
|
self.seg_info[seg_name] = {
|
|
|
|
'size_bytes': size,
|
|
|
|
'etag': file_item.md5,
|
|
|
|
'path': '/%s/%s' % (self.segments_container.name, seg_name)}
|
|
|
|
|
|
|
|
def _create_manifest(self, seg_name):
|
|
|
|
# create a manifest in the versioning container
|
|
|
|
file_item = self.container.file("my-slo-manifest")
|
|
|
|
file_item.write(
|
|
|
|
json.dumps([self.seg_info[seg_name]]),
|
|
|
|
parms={'multipart-manifest': 'put'})
|
|
|
|
return file_item
|
|
|
|
|
|
|
|
def _assert_is_manifest(self, file_item, seg_name):
|
|
|
|
manifest_body = file_item.read(parms={'multipart-manifest': 'get'})
|
|
|
|
resp_headers = dict(file_item.conn.response.getheaders())
|
|
|
|
self.assertIn('x-static-large-object', resp_headers)
|
|
|
|
self.assertEqual('application/json; charset=utf-8',
|
|
|
|
file_item.content_type)
|
|
|
|
try:
|
|
|
|
manifest = json.loads(manifest_body)
|
|
|
|
except ValueError:
|
|
|
|
self.fail("GET with multipart-manifest=get got invalid json")
|
|
|
|
|
|
|
|
self.assertEqual(1, len(manifest))
|
|
|
|
key_map = {'etag': 'hash', 'size_bytes': 'bytes', 'path': 'name'}
|
|
|
|
for k_client, k_slo in key_map.items():
|
|
|
|
self.assertEqual(self.seg_info[seg_name][k_client],
|
|
|
|
manifest[0][k_slo])
|
|
|
|
|
|
|
|
def _assert_is_object(self, file_item, seg_name):
|
|
|
|
file_contents = file_item.read()
|
|
|
|
self.assertEqual(1024 * 1024, len(file_contents))
|
|
|
|
self.assertEqual(seg_name, file_contents[0])
|
|
|
|
self.assertEqual(seg_name, file_contents[-1])
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
# remove versioning to allow simple container delete
|
|
|
|
self.container.update_metadata(hdrs={'X-Versions-Location': ''})
|
|
|
|
self.account.delete_containers()
|
|
|
|
|
|
|
|
def test_slo_manifest_version(self):
|
|
|
|
file_item = self._create_manifest('a')
|
|
|
|
# sanity check: read the manifest, then the large object
|
|
|
|
self._assert_is_manifest(file_item, 'a')
|
|
|
|
self._assert_is_object(file_item, 'a')
|
|
|
|
|
|
|
|
# upload new manifest
|
|
|
|
file_item = self._create_manifest('b')
|
|
|
|
# sanity check: read the manifest, then the large object
|
|
|
|
self._assert_is_manifest(file_item, 'b')
|
|
|
|
self._assert_is_object(file_item, 'b')
|
|
|
|
|
|
|
|
versions_list = self.versions_container.files()
|
|
|
|
self.assertEqual(1, len(versions_list))
|
|
|
|
version_file = self.versions_container.file(versions_list[0])
|
|
|
|
# check the version is still a manifest
|
|
|
|
self._assert_is_manifest(version_file, 'a')
|
|
|
|
self._assert_is_object(version_file, 'a')
|
|
|
|
|
|
|
|
# delete the newest manifest
|
|
|
|
file_item.delete()
|
|
|
|
|
|
|
|
# expect the original manifest file to be restored
|
|
|
|
self._assert_is_manifest(file_item, 'a')
|
|
|
|
self._assert_is_object(file_item, 'a')
|
|
|
|
|
|
|
|
|
2014-03-06 13:11:03 -08:00
|
|
|
class TestTempurlEnv(object):
|
|
|
|
tempurl_enabled = None # tri-state: None initially, then True/False
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.conn = Connection(tf.config)
|
2014-03-06 13:11:03 -08:00
|
|
|
cls.conn.authenticate()
|
|
|
|
|
|
|
|
if cls.tempurl_enabled is None:
|
|
|
|
cls.tempurl_enabled = 'tempurl' in cluster_info
|
|
|
|
if not cls.tempurl_enabled:
|
|
|
|
return
|
|
|
|
|
|
|
|
cls.tempurl_key = Utils.create_name()
|
|
|
|
cls.tempurl_key2 = Utils.create_name()
|
|
|
|
|
|
|
|
cls.account = Account(
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.conn, tf.config.get('account', tf.config['username']))
|
2014-03-06 13:11:03 -08:00
|
|
|
cls.account.delete_containers()
|
|
|
|
cls.account.update_metadata({
|
|
|
|
'temp-url-key': cls.tempurl_key,
|
|
|
|
'temp-url-key-2': cls.tempurl_key2
|
|
|
|
})
|
|
|
|
|
|
|
|
cls.container = cls.account.container(Utils.create_name())
|
|
|
|
if not cls.container.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
cls.obj = cls.container.file(Utils.create_name())
|
|
|
|
cls.obj.write("obj contents")
|
|
|
|
cls.other_obj = cls.container.file(Utils.create_name())
|
|
|
|
cls.other_obj.write("other obj contents")
|
|
|
|
|
|
|
|
|
|
|
|
class TestTempurl(Base):
|
|
|
|
env = TestTempurlEnv
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
super(TestTempurl, self).setUp()
|
|
|
|
if self.env.tempurl_enabled is False:
|
|
|
|
raise SkipTest("TempURL not enabled")
|
|
|
|
elif self.env.tempurl_enabled is not True:
|
|
|
|
# just some sanity checking
|
|
|
|
raise Exception(
|
|
|
|
"Expected tempurl_enabled to be True/False, got %r" %
|
|
|
|
(self.env.tempurl_enabled,))
|
|
|
|
|
|
|
|
expires = int(time.time()) + 86400
|
|
|
|
sig = self.tempurl_sig(
|
|
|
|
'GET', expires, self.env.conn.make_path(self.env.obj.path),
|
|
|
|
self.env.tempurl_key)
|
|
|
|
self.obj_tempurl_parms = {'temp_url_sig': sig,
|
|
|
|
'temp_url_expires': str(expires)}
|
|
|
|
|
|
|
|
def tempurl_sig(self, method, expires, path, key):
|
|
|
|
return hmac.new(
|
|
|
|
key,
|
2015-10-08 15:03:52 +02:00
|
|
|
'%s\n%s\n%s' % (method, expires, urllib.parse.unquote(path)),
|
2014-03-06 13:11:03 -08:00
|
|
|
hashlib.sha1).hexdigest()
|
|
|
|
|
|
|
|
def test_GET(self):
|
|
|
|
contents = self.env.obj.read(
|
|
|
|
parms=self.obj_tempurl_parms,
|
|
|
|
cfg={'no_auth_token': True})
|
|
|
|
self.assertEqual(contents, "obj contents")
|
|
|
|
|
|
|
|
# GET tempurls also allow HEAD requests
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(self.env.obj.info(parms=self.obj_tempurl_parms,
|
|
|
|
cfg={'no_auth_token': True}))
|
2014-03-06 13:11:03 -08:00
|
|
|
|
|
|
|
def test_GET_with_key_2(self):
|
|
|
|
expires = int(time.time()) + 86400
|
|
|
|
sig = self.tempurl_sig(
|
|
|
|
'GET', expires, self.env.conn.make_path(self.env.obj.path),
|
|
|
|
self.env.tempurl_key2)
|
|
|
|
parms = {'temp_url_sig': sig,
|
|
|
|
'temp_url_expires': str(expires)}
|
|
|
|
|
|
|
|
contents = self.env.obj.read(parms=parms, cfg={'no_auth_token': True})
|
|
|
|
self.assertEqual(contents, "obj contents")
|
|
|
|
|
Better scoping for tempurls, especially container tempurls
It used to be that a GET of a tempurl referencing a large object would
let you download that large object regardless of where its segments
lived. However, this led to some violated user expectations around
container tempurls.
(Note on shorthand: all tempurls reference objects. However, "account
tempurl" and "container tempurl" are shorthand meaning tempurls
generated using a key on the account or container, respectively.)
Let's say an application is given tempurl keys to a particular
container, and it does all its work therein using those keys. The user
expects that, if the application is compromised, then the attacker
only gains access to the "compromised-container". However, with the old
behavior, the attacker could read data from *any* container like so:
1) Choose a "victim-container" to download
2) Create PUT and GET tempurl for any object name within the
"compromised-container". The object doesn't need to exist;
we'll create it.
3) Using the PUT tempurl, upload a DLO manifest with
"X-Object-Manifest: /victim-container/"
4) Using the GET tempurl, download the object created in step 3. The
result will be the concatenation of all objects in the
"victim-container".
Step 3 need not be for all objects in the "victim-container"; for
example, a value "X-Object-Manifest: /victim-container/abc" would only
be the concatenation of all objects whose names begin with "abc". By
probing for object names in this way, individual objects may be found
and extracted.
A similar bug would exist for manifests referencing other accounts
except that neither the X-Object-Manifest (DLO) nor the JSON manifest
document (SLO) have a way of specifying a different account.
This change makes it so that a container tempurl only grants access to
objects within its container, *including* large-object segments. This
breaks backward compatibility for container tempurls that may have
pointed to cross container *LO's, but (a) there are security
implications, and (b) container tempurls are a relatively new feature.
This works by having the tempurl middleware install an authorization
callback ('swift.authorize' in the WSGI environment) that limits the
scope of any requests to the account or container from which the key
came.
This requires swift.authorize to persist for both the manifest request
and all segment requests; this is done by having the proxy server
restore it to the WSGI environment prior to returning from __call__.
[CVE-2015-5223]
Co-Authored-By: Clay Gerrard <clayg@swiftstack.com>
Co-Authored-By: Alistair Coles <alistair.coles@hp.com>
Co-Authored-By: Christian Schwede <cschwede@redhat.com>
Co-Authored-By: Matthew Oliver <matt@oliver.net.au>
Change-Id: Ie6d52f7a07e87f6fec21ed8b0ec1d84be8b2b11c
Closes-Bug: 1449212
2015-08-11 09:10:13 -05:00
|
|
|
def test_GET_DLO_inside_container(self):
|
|
|
|
seg1 = self.env.container.file(
|
|
|
|
"get-dlo-inside-seg1" + Utils.create_name())
|
|
|
|
seg2 = self.env.container.file(
|
|
|
|
"get-dlo-inside-seg2" + Utils.create_name())
|
|
|
|
seg1.write("one fish two fish ")
|
|
|
|
seg2.write("red fish blue fish")
|
|
|
|
|
|
|
|
manifest = self.env.container.file("manifest" + Utils.create_name())
|
|
|
|
manifest.write(
|
|
|
|
'',
|
|
|
|
hdrs={"X-Object-Manifest": "%s/get-dlo-inside-seg" %
|
|
|
|
(self.env.container.name,)})
|
|
|
|
|
|
|
|
expires = int(time.time()) + 86400
|
|
|
|
sig = self.tempurl_sig(
|
|
|
|
'GET', expires, self.env.conn.make_path(manifest.path),
|
|
|
|
self.env.tempurl_key)
|
|
|
|
parms = {'temp_url_sig': sig,
|
|
|
|
'temp_url_expires': str(expires)}
|
|
|
|
|
|
|
|
contents = manifest.read(parms=parms, cfg={'no_auth_token': True})
|
|
|
|
self.assertEqual(contents, "one fish two fish red fish blue fish")
|
|
|
|
|
|
|
|
def test_GET_DLO_outside_container(self):
|
|
|
|
seg1 = self.env.container.file(
|
|
|
|
"get-dlo-outside-seg1" + Utils.create_name())
|
|
|
|
seg2 = self.env.container.file(
|
|
|
|
"get-dlo-outside-seg2" + Utils.create_name())
|
|
|
|
seg1.write("one fish two fish ")
|
|
|
|
seg2.write("red fish blue fish")
|
|
|
|
|
|
|
|
container2 = self.env.account.container(Utils.create_name())
|
|
|
|
container2.create()
|
|
|
|
|
|
|
|
manifest = container2.file("manifest" + Utils.create_name())
|
|
|
|
manifest.write(
|
|
|
|
'',
|
|
|
|
hdrs={"X-Object-Manifest": "%s/get-dlo-outside-seg" %
|
|
|
|
(self.env.container.name,)})
|
|
|
|
|
|
|
|
expires = int(time.time()) + 86400
|
|
|
|
sig = self.tempurl_sig(
|
|
|
|
'GET', expires, self.env.conn.make_path(manifest.path),
|
|
|
|
self.env.tempurl_key)
|
|
|
|
parms = {'temp_url_sig': sig,
|
|
|
|
'temp_url_expires': str(expires)}
|
|
|
|
|
|
|
|
# cross container tempurl works fine for account tempurl key
|
|
|
|
contents = manifest.read(parms=parms, cfg={'no_auth_token': True})
|
|
|
|
self.assertEqual(contents, "one fish two fish red fish blue fish")
|
|
|
|
self.assert_status([200])
|
|
|
|
|
2014-03-06 13:11:03 -08:00
|
|
|
def test_PUT(self):
|
|
|
|
new_obj = self.env.container.file(Utils.create_name())
|
|
|
|
|
|
|
|
expires = int(time.time()) + 86400
|
|
|
|
sig = self.tempurl_sig(
|
|
|
|
'PUT', expires, self.env.conn.make_path(new_obj.path),
|
|
|
|
self.env.tempurl_key)
|
|
|
|
put_parms = {'temp_url_sig': sig,
|
|
|
|
'temp_url_expires': str(expires)}
|
|
|
|
|
|
|
|
new_obj.write('new obj contents',
|
|
|
|
parms=put_parms, cfg={'no_auth_token': True})
|
|
|
|
self.assertEqual(new_obj.read(), "new obj contents")
|
|
|
|
|
|
|
|
# PUT tempurls also allow HEAD requests
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(new_obj.info(parms=put_parms,
|
|
|
|
cfg={'no_auth_token': True}))
|
2014-03-06 13:11:03 -08:00
|
|
|
|
2015-07-23 22:36:21 -07:00
|
|
|
def test_PUT_manifest_access(self):
|
|
|
|
new_obj = self.env.container.file(Utils.create_name())
|
|
|
|
|
|
|
|
# give out a signature which allows a PUT to new_obj
|
|
|
|
expires = int(time.time()) + 86400
|
|
|
|
sig = self.tempurl_sig(
|
|
|
|
'PUT', expires, self.env.conn.make_path(new_obj.path),
|
|
|
|
self.env.tempurl_key)
|
|
|
|
put_parms = {'temp_url_sig': sig,
|
|
|
|
'temp_url_expires': str(expires)}
|
|
|
|
|
|
|
|
# try to create manifest pointing to some random container
|
|
|
|
try:
|
|
|
|
new_obj.write('', {
|
|
|
|
'x-object-manifest': '%s/foo' % 'some_random_container'
|
|
|
|
}, parms=put_parms, cfg={'no_auth_token': True})
|
|
|
|
except ResponseError as e:
|
|
|
|
self.assertEqual(e.status, 400)
|
|
|
|
else:
|
|
|
|
self.fail('request did not error')
|
|
|
|
|
|
|
|
# create some other container
|
|
|
|
other_container = self.env.account.container(Utils.create_name())
|
|
|
|
if not other_container.create():
|
|
|
|
raise ResponseError(self.conn.response)
|
|
|
|
|
|
|
|
# try to create manifest pointing to new container
|
|
|
|
try:
|
|
|
|
new_obj.write('', {
|
|
|
|
'x-object-manifest': '%s/foo' % other_container
|
|
|
|
}, parms=put_parms, cfg={'no_auth_token': True})
|
2015-08-26 16:30:23 +01:00
|
|
|
except ResponseError as e:
|
|
|
|
self.assertEqual(e.status, 400)
|
|
|
|
else:
|
|
|
|
self.fail('request did not error')
|
|
|
|
|
|
|
|
# try again using a tempurl POST to an already created object
|
|
|
|
new_obj.write('', {}, parms=put_parms, cfg={'no_auth_token': True})
|
|
|
|
expires = int(time.time()) + 86400
|
|
|
|
sig = self.tempurl_sig(
|
|
|
|
'POST', expires, self.env.conn.make_path(new_obj.path),
|
|
|
|
self.env.tempurl_key)
|
|
|
|
post_parms = {'temp_url_sig': sig,
|
|
|
|
'temp_url_expires': str(expires)}
|
|
|
|
try:
|
|
|
|
new_obj.post({'x-object-manifest': '%s/foo' % other_container},
|
|
|
|
parms=post_parms, cfg={'no_auth_token': True})
|
2015-07-23 22:36:21 -07:00
|
|
|
except ResponseError as e:
|
|
|
|
self.assertEqual(e.status, 400)
|
|
|
|
else:
|
|
|
|
self.fail('request did not error')
|
|
|
|
|
2014-03-06 13:11:03 -08:00
|
|
|
def test_HEAD(self):
|
|
|
|
expires = int(time.time()) + 86400
|
|
|
|
sig = self.tempurl_sig(
|
|
|
|
'HEAD', expires, self.env.conn.make_path(self.env.obj.path),
|
|
|
|
self.env.tempurl_key)
|
|
|
|
head_parms = {'temp_url_sig': sig,
|
|
|
|
'temp_url_expires': str(expires)}
|
|
|
|
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(self.env.obj.info(parms=head_parms,
|
|
|
|
cfg={'no_auth_token': True}))
|
2014-03-06 13:11:03 -08:00
|
|
|
# HEAD tempurls don't allow PUT or GET requests, despite the fact that
|
|
|
|
# PUT and GET tempurls both allow HEAD requests
|
|
|
|
self.assertRaises(ResponseError, self.env.other_obj.read,
|
|
|
|
cfg={'no_auth_token': True},
|
|
|
|
parms=self.obj_tempurl_parms)
|
|
|
|
self.assert_status([401])
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, self.env.other_obj.write,
|
|
|
|
'new contents',
|
|
|
|
cfg={'no_auth_token': True},
|
|
|
|
parms=self.obj_tempurl_parms)
|
|
|
|
self.assert_status([401])
|
|
|
|
|
|
|
|
def test_different_object(self):
|
|
|
|
contents = self.env.obj.read(
|
|
|
|
parms=self.obj_tempurl_parms,
|
|
|
|
cfg={'no_auth_token': True})
|
|
|
|
self.assertEqual(contents, "obj contents")
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, self.env.other_obj.read,
|
|
|
|
cfg={'no_auth_token': True},
|
|
|
|
parms=self.obj_tempurl_parms)
|
|
|
|
self.assert_status([401])
|
|
|
|
|
|
|
|
def test_changing_sig(self):
|
|
|
|
contents = self.env.obj.read(
|
|
|
|
parms=self.obj_tempurl_parms,
|
|
|
|
cfg={'no_auth_token': True})
|
|
|
|
self.assertEqual(contents, "obj contents")
|
|
|
|
|
|
|
|
parms = self.obj_tempurl_parms.copy()
|
|
|
|
if parms['temp_url_sig'][0] == 'a':
|
|
|
|
parms['temp_url_sig'] = 'b' + parms['temp_url_sig'][1:]
|
|
|
|
else:
|
|
|
|
parms['temp_url_sig'] = 'a' + parms['temp_url_sig'][1:]
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, self.env.obj.read,
|
|
|
|
cfg={'no_auth_token': True},
|
|
|
|
parms=parms)
|
|
|
|
self.assert_status([401])
|
|
|
|
|
|
|
|
def test_changing_expires(self):
|
|
|
|
contents = self.env.obj.read(
|
|
|
|
parms=self.obj_tempurl_parms,
|
|
|
|
cfg={'no_auth_token': True})
|
|
|
|
self.assertEqual(contents, "obj contents")
|
|
|
|
|
|
|
|
parms = self.obj_tempurl_parms.copy()
|
|
|
|
if parms['temp_url_expires'][-1] == '0':
|
|
|
|
parms['temp_url_expires'] = parms['temp_url_expires'][:-1] + '1'
|
|
|
|
else:
|
|
|
|
parms['temp_url_expires'] = parms['temp_url_expires'][:-1] + '0'
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, self.env.obj.read,
|
|
|
|
cfg={'no_auth_token': True},
|
|
|
|
parms=parms)
|
|
|
|
self.assert_status([401])
|
|
|
|
|
|
|
|
|
|
|
|
class TestTempurlUTF8(Base2, TestTempurl):
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
|
2015-02-12 16:39:48 -06:00
|
|
|
class TestContainerTempurlEnv(object):
|
|
|
|
tempurl_enabled = None # tri-state: None initially, then True/False
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
|
|
|
cls.conn = Connection(tf.config)
|
|
|
|
cls.conn.authenticate()
|
|
|
|
|
|
|
|
if cls.tempurl_enabled is None:
|
|
|
|
cls.tempurl_enabled = 'tempurl' in cluster_info
|
|
|
|
if not cls.tempurl_enabled:
|
|
|
|
return
|
|
|
|
|
|
|
|
cls.tempurl_key = Utils.create_name()
|
|
|
|
cls.tempurl_key2 = Utils.create_name()
|
|
|
|
|
|
|
|
cls.account = Account(
|
|
|
|
cls.conn, tf.config.get('account', tf.config['username']))
|
|
|
|
cls.account.delete_containers()
|
|
|
|
|
2015-02-14 10:19:16 -06:00
|
|
|
# creating another account and connection
|
|
|
|
# for ACL tests
|
|
|
|
config2 = deepcopy(tf.config)
|
|
|
|
config2['account'] = tf.config['account2']
|
|
|
|
config2['username'] = tf.config['username2']
|
|
|
|
config2['password'] = tf.config['password2']
|
|
|
|
cls.conn2 = Connection(config2)
|
|
|
|
cls.conn2.authenticate()
|
|
|
|
cls.account2 = Account(
|
|
|
|
cls.conn2, config2.get('account', config2['username']))
|
|
|
|
cls.account2 = cls.conn2.get_account()
|
|
|
|
|
2015-02-12 16:39:48 -06:00
|
|
|
cls.container = cls.account.container(Utils.create_name())
|
|
|
|
if not cls.container.create({
|
|
|
|
'x-container-meta-temp-url-key': cls.tempurl_key,
|
2015-02-14 10:19:16 -06:00
|
|
|
'x-container-meta-temp-url-key-2': cls.tempurl_key2,
|
|
|
|
'x-container-read': cls.account2.name}):
|
2015-02-12 16:39:48 -06:00
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
cls.obj = cls.container.file(Utils.create_name())
|
|
|
|
cls.obj.write("obj contents")
|
|
|
|
cls.other_obj = cls.container.file(Utils.create_name())
|
|
|
|
cls.other_obj.write("other obj contents")
|
|
|
|
|
|
|
|
|
|
|
|
class TestContainerTempurl(Base):
|
|
|
|
env = TestContainerTempurlEnv
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
super(TestContainerTempurl, self).setUp()
|
|
|
|
if self.env.tempurl_enabled is False:
|
|
|
|
raise SkipTest("TempURL not enabled")
|
|
|
|
elif self.env.tempurl_enabled is not True:
|
|
|
|
# just some sanity checking
|
|
|
|
raise Exception(
|
|
|
|
"Expected tempurl_enabled to be True/False, got %r" %
|
|
|
|
(self.env.tempurl_enabled,))
|
|
|
|
|
|
|
|
expires = int(time.time()) + 86400
|
|
|
|
sig = self.tempurl_sig(
|
|
|
|
'GET', expires, self.env.conn.make_path(self.env.obj.path),
|
|
|
|
self.env.tempurl_key)
|
|
|
|
self.obj_tempurl_parms = {'temp_url_sig': sig,
|
|
|
|
'temp_url_expires': str(expires)}
|
|
|
|
|
|
|
|
def tempurl_sig(self, method, expires, path, key):
|
|
|
|
return hmac.new(
|
|
|
|
key,
|
2015-10-08 15:03:52 +02:00
|
|
|
'%s\n%s\n%s' % (method, expires, urllib.parse.unquote(path)),
|
2015-02-12 16:39:48 -06:00
|
|
|
hashlib.sha1).hexdigest()
|
|
|
|
|
|
|
|
def test_GET(self):
|
|
|
|
contents = self.env.obj.read(
|
|
|
|
parms=self.obj_tempurl_parms,
|
|
|
|
cfg={'no_auth_token': True})
|
|
|
|
self.assertEqual(contents, "obj contents")
|
|
|
|
|
|
|
|
# GET tempurls also allow HEAD requests
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(self.env.obj.info(parms=self.obj_tempurl_parms,
|
|
|
|
cfg={'no_auth_token': True}))
|
2015-02-12 16:39:48 -06:00
|
|
|
|
|
|
|
def test_GET_with_key_2(self):
|
|
|
|
expires = int(time.time()) + 86400
|
|
|
|
sig = self.tempurl_sig(
|
|
|
|
'GET', expires, self.env.conn.make_path(self.env.obj.path),
|
|
|
|
self.env.tempurl_key2)
|
|
|
|
parms = {'temp_url_sig': sig,
|
|
|
|
'temp_url_expires': str(expires)}
|
|
|
|
|
|
|
|
contents = self.env.obj.read(parms=parms, cfg={'no_auth_token': True})
|
|
|
|
self.assertEqual(contents, "obj contents")
|
|
|
|
|
|
|
|
def test_PUT(self):
|
|
|
|
new_obj = self.env.container.file(Utils.create_name())
|
|
|
|
|
|
|
|
expires = int(time.time()) + 86400
|
|
|
|
sig = self.tempurl_sig(
|
|
|
|
'PUT', expires, self.env.conn.make_path(new_obj.path),
|
|
|
|
self.env.tempurl_key)
|
|
|
|
put_parms = {'temp_url_sig': sig,
|
|
|
|
'temp_url_expires': str(expires)}
|
|
|
|
|
|
|
|
new_obj.write('new obj contents',
|
|
|
|
parms=put_parms, cfg={'no_auth_token': True})
|
|
|
|
self.assertEqual(new_obj.read(), "new obj contents")
|
|
|
|
|
|
|
|
# PUT tempurls also allow HEAD requests
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(new_obj.info(parms=put_parms,
|
|
|
|
cfg={'no_auth_token': True}))
|
2015-02-12 16:39:48 -06:00
|
|
|
|
|
|
|
def test_HEAD(self):
|
|
|
|
expires = int(time.time()) + 86400
|
|
|
|
sig = self.tempurl_sig(
|
|
|
|
'HEAD', expires, self.env.conn.make_path(self.env.obj.path),
|
|
|
|
self.env.tempurl_key)
|
|
|
|
head_parms = {'temp_url_sig': sig,
|
|
|
|
'temp_url_expires': str(expires)}
|
|
|
|
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(self.env.obj.info(parms=head_parms,
|
|
|
|
cfg={'no_auth_token': True}))
|
2015-02-12 16:39:48 -06:00
|
|
|
# HEAD tempurls don't allow PUT or GET requests, despite the fact that
|
|
|
|
# PUT and GET tempurls both allow HEAD requests
|
|
|
|
self.assertRaises(ResponseError, self.env.other_obj.read,
|
|
|
|
cfg={'no_auth_token': True},
|
|
|
|
parms=self.obj_tempurl_parms)
|
|
|
|
self.assert_status([401])
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, self.env.other_obj.write,
|
|
|
|
'new contents',
|
|
|
|
cfg={'no_auth_token': True},
|
|
|
|
parms=self.obj_tempurl_parms)
|
|
|
|
self.assert_status([401])
|
|
|
|
|
|
|
|
def test_different_object(self):
|
|
|
|
contents = self.env.obj.read(
|
|
|
|
parms=self.obj_tempurl_parms,
|
|
|
|
cfg={'no_auth_token': True})
|
|
|
|
self.assertEqual(contents, "obj contents")
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, self.env.other_obj.read,
|
|
|
|
cfg={'no_auth_token': True},
|
|
|
|
parms=self.obj_tempurl_parms)
|
|
|
|
self.assert_status([401])
|
|
|
|
|
|
|
|
def test_changing_sig(self):
|
|
|
|
contents = self.env.obj.read(
|
|
|
|
parms=self.obj_tempurl_parms,
|
|
|
|
cfg={'no_auth_token': True})
|
|
|
|
self.assertEqual(contents, "obj contents")
|
|
|
|
|
|
|
|
parms = self.obj_tempurl_parms.copy()
|
|
|
|
if parms['temp_url_sig'][0] == 'a':
|
|
|
|
parms['temp_url_sig'] = 'b' + parms['temp_url_sig'][1:]
|
|
|
|
else:
|
|
|
|
parms['temp_url_sig'] = 'a' + parms['temp_url_sig'][1:]
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, self.env.obj.read,
|
|
|
|
cfg={'no_auth_token': True},
|
|
|
|
parms=parms)
|
|
|
|
self.assert_status([401])
|
|
|
|
|
|
|
|
def test_changing_expires(self):
|
|
|
|
contents = self.env.obj.read(
|
|
|
|
parms=self.obj_tempurl_parms,
|
|
|
|
cfg={'no_auth_token': True})
|
|
|
|
self.assertEqual(contents, "obj contents")
|
|
|
|
|
|
|
|
parms = self.obj_tempurl_parms.copy()
|
|
|
|
if parms['temp_url_expires'][-1] == '0':
|
|
|
|
parms['temp_url_expires'] = parms['temp_url_expires'][:-1] + '1'
|
|
|
|
else:
|
|
|
|
parms['temp_url_expires'] = parms['temp_url_expires'][:-1] + '0'
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, self.env.obj.read,
|
|
|
|
cfg={'no_auth_token': True},
|
|
|
|
parms=parms)
|
|
|
|
self.assert_status([401])
|
|
|
|
|
2015-06-02 19:51:39 +00:00
|
|
|
@requires_acls
|
2015-02-14 10:19:16 -06:00
|
|
|
def test_tempurl_keys_visible_to_account_owner(self):
|
|
|
|
if not tf.cluster_info.get('tempauth'):
|
|
|
|
raise SkipTest('TEMP AUTH SPECIFIC TEST')
|
|
|
|
metadata = self.env.container.info()
|
|
|
|
self.assertEqual(metadata.get('tempurl_key'), self.env.tempurl_key)
|
|
|
|
self.assertEqual(metadata.get('tempurl_key2'), self.env.tempurl_key2)
|
|
|
|
|
2015-06-02 19:51:39 +00:00
|
|
|
@requires_acls
|
2015-02-14 10:19:16 -06:00
|
|
|
def test_tempurl_keys_hidden_from_acl_readonly(self):
|
|
|
|
if not tf.cluster_info.get('tempauth'):
|
|
|
|
raise SkipTest('TEMP AUTH SPECIFIC TEST')
|
|
|
|
original_token = self.env.container.conn.storage_token
|
|
|
|
self.env.container.conn.storage_token = self.env.conn2.storage_token
|
|
|
|
metadata = self.env.container.info()
|
|
|
|
self.env.container.conn.storage_token = original_token
|
|
|
|
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertNotIn(
|
|
|
|
'tempurl_key', metadata,
|
|
|
|
'Container TempURL key found, should not be visible '
|
|
|
|
'to readonly ACLs')
|
|
|
|
self.assertNotIn(
|
|
|
|
'tempurl_key2', metadata,
|
|
|
|
'Container TempURL key-2 found, should not be visible '
|
|
|
|
'to readonly ACLs')
|
2015-02-14 10:19:16 -06:00
|
|
|
|
Better scoping for tempurls, especially container tempurls
It used to be that a GET of a tempurl referencing a large object would
let you download that large object regardless of where its segments
lived. However, this led to some violated user expectations around
container tempurls.
(Note on shorthand: all tempurls reference objects. However, "account
tempurl" and "container tempurl" are shorthand meaning tempurls
generated using a key on the account or container, respectively.)
Let's say an application is given tempurl keys to a particular
container, and it does all its work therein using those keys. The user
expects that, if the application is compromised, then the attacker
only gains access to the "compromised-container". However, with the old
behavior, the attacker could read data from *any* container like so:
1) Choose a "victim-container" to download
2) Create PUT and GET tempurl for any object name within the
"compromised-container". The object doesn't need to exist;
we'll create it.
3) Using the PUT tempurl, upload a DLO manifest with
"X-Object-Manifest: /victim-container/"
4) Using the GET tempurl, download the object created in step 3. The
result will be the concatenation of all objects in the
"victim-container".
Step 3 need not be for all objects in the "victim-container"; for
example, a value "X-Object-Manifest: /victim-container/abc" would only
be the concatenation of all objects whose names begin with "abc". By
probing for object names in this way, individual objects may be found
and extracted.
A similar bug would exist for manifests referencing other accounts
except that neither the X-Object-Manifest (DLO) nor the JSON manifest
document (SLO) have a way of specifying a different account.
This change makes it so that a container tempurl only grants access to
objects within its container, *including* large-object segments. This
breaks backward compatibility for container tempurls that may have
pointed to cross container *LO's, but (a) there are security
implications, and (b) container tempurls are a relatively new feature.
This works by having the tempurl middleware install an authorization
callback ('swift.authorize' in the WSGI environment) that limits the
scope of any requests to the account or container from which the key
came.
This requires swift.authorize to persist for both the manifest request
and all segment requests; this is done by having the proxy server
restore it to the WSGI environment prior to returning from __call__.
[CVE-2015-5223]
Co-Authored-By: Clay Gerrard <clayg@swiftstack.com>
Co-Authored-By: Alistair Coles <alistair.coles@hp.com>
Co-Authored-By: Christian Schwede <cschwede@redhat.com>
Co-Authored-By: Matthew Oliver <matt@oliver.net.au>
Change-Id: Ie6d52f7a07e87f6fec21ed8b0ec1d84be8b2b11c
Closes-Bug: 1449212
2015-08-11 09:10:13 -05:00
|
|
|
def test_GET_DLO_inside_container(self):
|
|
|
|
seg1 = self.env.container.file(
|
|
|
|
"get-dlo-inside-seg1" + Utils.create_name())
|
|
|
|
seg2 = self.env.container.file(
|
|
|
|
"get-dlo-inside-seg2" + Utils.create_name())
|
|
|
|
seg1.write("one fish two fish ")
|
|
|
|
seg2.write("red fish blue fish")
|
|
|
|
|
|
|
|
manifest = self.env.container.file("manifest" + Utils.create_name())
|
|
|
|
manifest.write(
|
|
|
|
'',
|
|
|
|
hdrs={"X-Object-Manifest": "%s/get-dlo-inside-seg" %
|
|
|
|
(self.env.container.name,)})
|
|
|
|
|
|
|
|
expires = int(time.time()) + 86400
|
|
|
|
sig = self.tempurl_sig(
|
|
|
|
'GET', expires, self.env.conn.make_path(manifest.path),
|
|
|
|
self.env.tempurl_key)
|
|
|
|
parms = {'temp_url_sig': sig,
|
|
|
|
'temp_url_expires': str(expires)}
|
|
|
|
|
|
|
|
contents = manifest.read(parms=parms, cfg={'no_auth_token': True})
|
|
|
|
self.assertEqual(contents, "one fish two fish red fish blue fish")
|
|
|
|
|
|
|
|
def test_GET_DLO_outside_container(self):
|
|
|
|
container2 = self.env.account.container(Utils.create_name())
|
|
|
|
container2.create()
|
|
|
|
seg1 = container2.file(
|
|
|
|
"get-dlo-outside-seg1" + Utils.create_name())
|
|
|
|
seg2 = container2.file(
|
|
|
|
"get-dlo-outside-seg2" + Utils.create_name())
|
|
|
|
seg1.write("one fish two fish ")
|
|
|
|
seg2.write("red fish blue fish")
|
|
|
|
|
|
|
|
manifest = self.env.container.file("manifest" + Utils.create_name())
|
|
|
|
manifest.write(
|
|
|
|
'',
|
|
|
|
hdrs={"X-Object-Manifest": "%s/get-dlo-outside-seg" %
|
|
|
|
(container2.name,)})
|
|
|
|
|
|
|
|
expires = int(time.time()) + 86400
|
|
|
|
sig = self.tempurl_sig(
|
|
|
|
'GET', expires, self.env.conn.make_path(manifest.path),
|
|
|
|
self.env.tempurl_key)
|
|
|
|
parms = {'temp_url_sig': sig,
|
|
|
|
'temp_url_expires': str(expires)}
|
|
|
|
|
|
|
|
# cross container tempurl does not work for container tempurl key
|
|
|
|
try:
|
|
|
|
manifest.read(parms=parms, cfg={'no_auth_token': True})
|
|
|
|
except ResponseError as e:
|
|
|
|
self.assertEqual(e.status, 401)
|
|
|
|
else:
|
|
|
|
self.fail('request did not error')
|
|
|
|
try:
|
|
|
|
manifest.info(parms=parms, cfg={'no_auth_token': True})
|
|
|
|
except ResponseError as e:
|
|
|
|
self.assertEqual(e.status, 401)
|
|
|
|
else:
|
|
|
|
self.fail('request did not error')
|
|
|
|
|
2015-02-12 16:39:48 -06:00
|
|
|
|
|
|
|
class TestContainerTempurlUTF8(Base2, TestContainerTempurl):
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
|
2014-03-07 15:53:05 -08:00
|
|
|
class TestSloTempurlEnv(object):
|
|
|
|
enabled = None # tri-state: None initially, then True/False
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.conn = Connection(tf.config)
|
2014-03-07 15:53:05 -08:00
|
|
|
cls.conn.authenticate()
|
|
|
|
|
|
|
|
if cls.enabled is None:
|
|
|
|
cls.enabled = 'tempurl' in cluster_info and 'slo' in cluster_info
|
|
|
|
|
|
|
|
cls.tempurl_key = Utils.create_name()
|
|
|
|
|
|
|
|
cls.account = Account(
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.conn, tf.config.get('account', tf.config['username']))
|
2014-03-07 15:53:05 -08:00
|
|
|
cls.account.delete_containers()
|
|
|
|
cls.account.update_metadata({'temp-url-key': cls.tempurl_key})
|
|
|
|
|
|
|
|
cls.manifest_container = cls.account.container(Utils.create_name())
|
|
|
|
cls.segments_container = cls.account.container(Utils.create_name())
|
|
|
|
if not cls.manifest_container.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
if not cls.segments_container.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
seg1 = cls.segments_container.file(Utils.create_name())
|
|
|
|
seg1.write('1' * 1024 * 1024)
|
|
|
|
|
|
|
|
seg2 = cls.segments_container.file(Utils.create_name())
|
|
|
|
seg2.write('2' * 1024 * 1024)
|
|
|
|
|
|
|
|
cls.manifest_data = [{'size_bytes': 1024 * 1024,
|
|
|
|
'etag': seg1.md5,
|
|
|
|
'path': '/%s/%s' % (cls.segments_container.name,
|
|
|
|
seg1.name)},
|
|
|
|
{'size_bytes': 1024 * 1024,
|
|
|
|
'etag': seg2.md5,
|
|
|
|
'path': '/%s/%s' % (cls.segments_container.name,
|
|
|
|
seg2.name)}]
|
|
|
|
|
|
|
|
cls.manifest = cls.manifest_container.file(Utils.create_name())
|
|
|
|
cls.manifest.write(
|
|
|
|
json.dumps(cls.manifest_data),
|
|
|
|
parms={'multipart-manifest': 'put'})
|
|
|
|
|
|
|
|
|
|
|
|
class TestSloTempurl(Base):
|
|
|
|
env = TestSloTempurlEnv
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
super(TestSloTempurl, self).setUp()
|
|
|
|
if self.env.enabled is False:
|
|
|
|
raise SkipTest("TempURL and SLO not both enabled")
|
|
|
|
elif self.env.enabled is not True:
|
|
|
|
# just some sanity checking
|
|
|
|
raise Exception(
|
|
|
|
"Expected enabled to be True/False, got %r" %
|
|
|
|
(self.env.enabled,))
|
|
|
|
|
|
|
|
def tempurl_sig(self, method, expires, path, key):
|
|
|
|
return hmac.new(
|
|
|
|
key,
|
2015-10-08 15:03:52 +02:00
|
|
|
'%s\n%s\n%s' % (method, expires, urllib.parse.unquote(path)),
|
2014-03-07 15:53:05 -08:00
|
|
|
hashlib.sha1).hexdigest()
|
|
|
|
|
|
|
|
def test_GET(self):
|
|
|
|
expires = int(time.time()) + 86400
|
|
|
|
sig = self.tempurl_sig(
|
|
|
|
'GET', expires, self.env.conn.make_path(self.env.manifest.path),
|
|
|
|
self.env.tempurl_key)
|
|
|
|
parms = {'temp_url_sig': sig, 'temp_url_expires': str(expires)}
|
|
|
|
|
|
|
|
contents = self.env.manifest.read(
|
|
|
|
parms=parms,
|
|
|
|
cfg={'no_auth_token': True})
|
|
|
|
self.assertEqual(len(contents), 2 * 1024 * 1024)
|
|
|
|
|
|
|
|
# GET tempurls also allow HEAD requests
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(self.env.manifest.info(
|
2014-03-07 15:53:05 -08:00
|
|
|
parms=parms, cfg={'no_auth_token': True}))
|
|
|
|
|
|
|
|
|
|
|
|
class TestSloTempurlUTF8(Base2, TestSloTempurl):
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
|
2015-12-16 15:28:25 +00:00
|
|
|
class TestServiceToken(unittest2.TestCase):
|
2014-11-25 14:42:42 +00:00
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
if tf.skip_service_tokens:
|
|
|
|
raise SkipTest
|
|
|
|
|
|
|
|
self.SET_TO_USERS_TOKEN = 1
|
|
|
|
self.SET_TO_SERVICE_TOKEN = 2
|
|
|
|
|
|
|
|
# keystoneauth and tempauth differ in allowing PUT account
|
|
|
|
# Even if keystoneauth allows it, the proxy-server uses
|
|
|
|
# allow_account_management to decide if accounts can be created
|
|
|
|
self.put_account_expect = is_client_error
|
|
|
|
if tf.swift_test_auth_version != '1':
|
|
|
|
if cluster_info.get('swift').get('allow_account_management'):
|
|
|
|
self.put_account_expect = is_success
|
|
|
|
|
|
|
|
def _scenario_generator(self):
|
|
|
|
paths = ((None, None), ('c', None), ('c', 'o'))
|
|
|
|
for path in paths:
|
|
|
|
for method in ('PUT', 'POST', 'HEAD', 'GET', 'OPTIONS'):
|
|
|
|
yield method, path[0], path[1]
|
|
|
|
for path in reversed(paths):
|
|
|
|
yield 'DELETE', path[0], path[1]
|
|
|
|
|
|
|
|
def _assert_is_authed_response(self, method, container, object, resp):
|
|
|
|
resp.read()
|
|
|
|
expect = is_success
|
|
|
|
if method == 'DELETE' and not container:
|
|
|
|
expect = is_client_error
|
|
|
|
if method == 'PUT' and not container:
|
|
|
|
expect = self.put_account_expect
|
|
|
|
self.assertTrue(expect(resp.status), 'Unexpected %s for %s %s %s'
|
|
|
|
% (resp.status, method, container, object))
|
|
|
|
|
|
|
|
def _assert_not_authed_response(self, method, container, object, resp):
|
|
|
|
resp.read()
|
|
|
|
expect = is_client_error
|
|
|
|
if method == 'OPTIONS':
|
|
|
|
expect = is_success
|
|
|
|
self.assertTrue(expect(resp.status), 'Unexpected %s for %s %s %s'
|
|
|
|
% (resp.status, method, container, object))
|
|
|
|
|
|
|
|
def prepare_request(self, method, use_service_account=False,
|
|
|
|
container=None, obj=None, body=None, headers=None,
|
|
|
|
x_auth_token=None,
|
|
|
|
x_service_token=None, dbg=False):
|
|
|
|
"""
|
|
|
|
Setup for making the request
|
|
|
|
|
|
|
|
When retry() calls the do_request() function, it calls it the
|
|
|
|
test user's token, the parsed path, a connection and (optionally)
|
|
|
|
a token from the test service user. We save options here so that
|
|
|
|
do_request() can make the appropriate request.
|
|
|
|
|
2016-01-25 09:25:20 -08:00
|
|
|
:param method: The operation (e.g. 'HEAD')
|
2014-11-25 14:42:42 +00:00
|
|
|
:param use_service_account: Optional. Set True to change the path to
|
|
|
|
be the service account
|
|
|
|
:param container: Optional. Adds a container name to the path
|
|
|
|
:param obj: Optional. Adds an object name to the path
|
|
|
|
:param body: Optional. Adds a body (string) in the request
|
|
|
|
:param headers: Optional. Adds additional headers.
|
|
|
|
:param x_auth_token: Optional. Default is SET_TO_USERS_TOKEN. One of:
|
|
|
|
SET_TO_USERS_TOKEN Put the test user's token in
|
|
|
|
X-Auth-Token
|
|
|
|
SET_TO_SERVICE_TOKEN Put the service token in X-Auth-Token
|
|
|
|
:param x_service_token: Optional. Default is to not set X-Service-Token
|
|
|
|
to any value. If specified, is one of following:
|
|
|
|
SET_TO_USERS_TOKEN Put the test user's token in
|
|
|
|
X-Service-Token
|
|
|
|
SET_TO_SERVICE_TOKEN Put the service token in
|
|
|
|
X-Service-Token
|
|
|
|
:param dbg: Optional. Set true to check request arguments
|
|
|
|
"""
|
|
|
|
self.method = method
|
|
|
|
self.use_service_account = use_service_account
|
|
|
|
self.container = container
|
|
|
|
self.obj = obj
|
|
|
|
self.body = body
|
|
|
|
self.headers = headers
|
|
|
|
if x_auth_token:
|
|
|
|
self.x_auth_token = x_auth_token
|
|
|
|
else:
|
|
|
|
self.x_auth_token = self.SET_TO_USERS_TOKEN
|
|
|
|
self.x_service_token = x_service_token
|
|
|
|
self.dbg = dbg
|
|
|
|
|
|
|
|
def do_request(self, url, token, parsed, conn, service_token=''):
|
|
|
|
if self.use_service_account:
|
|
|
|
path = self._service_account(parsed.path)
|
|
|
|
else:
|
|
|
|
path = parsed.path
|
|
|
|
if self.container:
|
|
|
|
path += '/%s' % self.container
|
|
|
|
if self.obj:
|
|
|
|
path += '/%s' % self.obj
|
|
|
|
headers = {}
|
|
|
|
if self.body:
|
|
|
|
headers.update({'Content-Length': len(self.body)})
|
|
|
|
if self.x_auth_token == self.SET_TO_USERS_TOKEN:
|
|
|
|
headers.update({'X-Auth-Token': token})
|
|
|
|
elif self.x_auth_token == self.SET_TO_SERVICE_TOKEN:
|
|
|
|
headers.update({'X-Auth-Token': service_token})
|
|
|
|
if self.x_service_token == self.SET_TO_USERS_TOKEN:
|
|
|
|
headers.update({'X-Service-Token': token})
|
|
|
|
elif self.x_service_token == self.SET_TO_SERVICE_TOKEN:
|
|
|
|
headers.update({'X-Service-Token': service_token})
|
|
|
|
if self.dbg:
|
|
|
|
print('DEBUG: conn.request: method:%s path:%s'
|
|
|
|
' body:%s headers:%s' % (self.method, path, self.body,
|
|
|
|
headers))
|
|
|
|
conn.request(self.method, path, self.body, headers=headers)
|
|
|
|
return check_response(conn)
|
|
|
|
|
|
|
|
def _service_account(self, path):
|
|
|
|
parts = path.split('/', 3)
|
|
|
|
account = parts[2]
|
|
|
|
try:
|
|
|
|
project_id = account[account.index('_') + 1:]
|
|
|
|
except ValueError:
|
|
|
|
project_id = account
|
|
|
|
parts[2] = '%s%s' % (tf.swift_test_service_prefix, project_id)
|
|
|
|
return '/'.join(parts)
|
|
|
|
|
|
|
|
def test_user_access_own_auth_account(self):
|
|
|
|
# This covers ground tested elsewhere (tests a user doing HEAD
|
|
|
|
# on own account). However, if this fails, none of the remaining
|
|
|
|
# tests will work
|
|
|
|
self.prepare_request('HEAD')
|
|
|
|
resp = retry(self.do_request)
|
|
|
|
resp.read()
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(resp.status, (200, 204))
|
2014-11-25 14:42:42 +00:00
|
|
|
|
|
|
|
def test_user_cannot_access_service_account(self):
|
|
|
|
for method, container, obj in self._scenario_generator():
|
|
|
|
self.prepare_request(method, use_service_account=True,
|
|
|
|
container=container, obj=obj)
|
|
|
|
resp = retry(self.do_request)
|
|
|
|
self._assert_not_authed_response(method, container, obj, resp)
|
|
|
|
|
|
|
|
def test_service_user_denied_with_x_auth_token(self):
|
|
|
|
for method, container, obj in self._scenario_generator():
|
|
|
|
self.prepare_request(method, use_service_account=True,
|
|
|
|
container=container, obj=obj,
|
|
|
|
x_auth_token=self.SET_TO_SERVICE_TOKEN)
|
|
|
|
resp = retry(self.do_request, service_user=5)
|
|
|
|
self._assert_not_authed_response(method, container, obj, resp)
|
|
|
|
|
|
|
|
def test_service_user_denied_with_x_service_token(self):
|
|
|
|
for method, container, obj in self._scenario_generator():
|
|
|
|
self.prepare_request(method, use_service_account=True,
|
|
|
|
container=container, obj=obj,
|
|
|
|
x_auth_token=self.SET_TO_SERVICE_TOKEN,
|
|
|
|
x_service_token=self.SET_TO_SERVICE_TOKEN)
|
|
|
|
resp = retry(self.do_request, service_user=5)
|
|
|
|
self._assert_not_authed_response(method, container, obj, resp)
|
|
|
|
|
|
|
|
def test_user_plus_service_can_access_service_account(self):
|
|
|
|
for method, container, obj in self._scenario_generator():
|
|
|
|
self.prepare_request(method, use_service_account=True,
|
|
|
|
container=container, obj=obj,
|
|
|
|
x_auth_token=self.SET_TO_USERS_TOKEN,
|
|
|
|
x_service_token=self.SET_TO_SERVICE_TOKEN)
|
|
|
|
resp = retry(self.do_request, service_user=5)
|
|
|
|
self._assert_is_authed_response(method, container, obj, resp)
|
|
|
|
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
if __name__ == '__main__':
|
2015-12-16 15:28:25 +00:00
|
|
|
unittest2.main()
|