2010-07-12 17:03:45 -05:00
|
|
|
#!/usr/bin/python -u
|
2013-09-20 01:00:54 +08:00
|
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
|
2010-07-12 17:03:45 -05:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
|
|
# implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
from datetime import datetime
|
2013-11-18 13:17:48 -08:00
|
|
|
import hashlib
|
2014-03-06 13:11:03 -08:00
|
|
|
import hmac
|
2013-11-18 13:17:48 -08:00
|
|
|
import json
|
2010-07-12 17:03:45 -05:00
|
|
|
import locale
|
|
|
|
import random
|
|
|
|
import StringIO
|
|
|
|
import time
|
|
|
|
import unittest
|
2014-03-06 13:11:03 -08:00
|
|
|
import urllib
|
|
|
|
import uuid
|
2014-04-07 13:01:44 -04:00
|
|
|
import eventlet
|
2012-06-06 03:39:53 +09:00
|
|
|
from nose import SkipTest
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2014-04-10 15:37:15 -04:00
|
|
|
from test.functional import normalized_urls, load_constraint, cluster_info
|
2014-03-31 23:22:49 -04:00
|
|
|
import test.functional as tf
|
2012-09-05 20:49:50 -07:00
|
|
|
from test.functional.swift_test_client import Account, Connection, File, \
|
|
|
|
ResponseError
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2011-02-22 22:25:38 -06:00
|
|
|
|
Privileged acct ACL header, new ACL syntax, TempAuth impl.
* Introduce a new privileged account header: X-Account-Access-Control
* Introduce JSON-based version 2 ACL syntax -- see below for discussion
* Implement account ACL authorization in TempAuth
X-Account-Access-Control Header
-------------------------------
Accounts now have a new privileged header to represent ACLs or any other
form of account-level access control. The value of the header is an opaque
string to be interpreted by the auth system, but it must be a JSON-encoded
dictionary. A reference implementation is given in TempAuth, with the
knowledge that historically other auth systems often use TempAuth as a
starting point.
The reference implementation describes three levels of account access:
"admin", "read-write", and "read-only". Adding new access control
features in a future patch (e.g. "write-only" account access) will
automatically be forward- and backward-compatible, due to the JSON
dictionary header format.
The privileged X-Account-Access-Control header may only be read or written
by a user with "swift_owner" status, traditionally the account owner but
now also any user on the "admin" ACL.
Access Levels:
Read-only access is intended to indicate to the auth system that this
list of identities can read everything (except privileged headers) in
the account. Specifically, a user with read-only account access can get
a list of containers in the account, list the contents of any container,
retrieve any object, and see the (non-privileged) headers of the
account, any container, or any object.
Read-write access is intended to indicate to the auth system that this
list of identities can read or write (or create) any container. A user
with read-write account access can create new containers, set any
unprivileged container headers, overwrite objects, delete containers,
etc. A read-write user can NOT set account headers (or perform any
PUT/POST/DELETE requests on the account).
Admin access is intended to indicate to the auth system that this list of
identities has "swift_owner" privileges. A user with admin account access
can do anything the account owner can, including setting account headers
and any privileged headers -- and thus changing the value of
X-Account-Access-Control and thereby granting read-only, read-write, or
admin access to other users.
The auth system is responsible for making decisions based on this header,
if it chooses to support its use. Therefore the above access level
descriptions are necessarily advisory only for other auth systems.
When setting the value of the header, callers are urged to use the new
format_acl() method, described below.
New ACL Format
--------------
The account ACLs introduce a new format for ACLs, rather than reusing the
existing format from X-Container-Read/X-Container-Write. There are several
reasons for this:
* Container ACL format does not support Unicode
* Container ACLs have a different structure than account ACLs
+ account ACLs have no concept of referrers or rlistings
+ accounts have additional "admin" access level
+ account access levels are structured as admin > rw > ro, which seems more
appropriate for how people access accounts, rather than reusing
container ACLs' orthogonal read and write access
In addition, the container ACL syntax is a bit arbitrary and highly custom,
so instead of parsing additional custom syntax, I'd rather propose a next
version and introduce a means for migration. The V2 ACL syntax has the
following benefits:
* JSON is a well-known standard syntax with parsers in all languages
* no artificial value restrictions (you can grant access to a user named
".rlistings" if you want)
* forward and backward compatibility: you may have extraneous keys, but
your attempt to parse the header won't raise an exception
I've introduced hooks in parse_acl and format_acl which currently default
to the old V1 syntax but tolerate the V2 syntax and can easily be flipped
to default to V2. I'm not changing the default or adding code to rewrite
V1 ACLs to V2, because this patch has suffered a lot of scope creep already,
but this seems like a sensible milestone in the migration.
TempAuth Account ACL Implementation
-----------------------------------
As stated above, core Swift is responsible for privileging the
X-Account-Access-Control header (making it only accessible to swift_owners),
for translating it to -sysmeta-* headers to trigger persistence by the
account server, and for including the header in the responses to requests
by privileged users. Core Swift puts no expectation on the *content* of
this header. Auth systems (including TempAuth) are responsible for
defining the content of the header and taking action based on it.
In addition to the changes described above, this patch defines a format
to be used by TempAuth for these headers in the common.middleware.acl
module, in the methods format_v2_acl() and parse_v2_acl(). This patch
also teaches TempAuth to take action based on the header contents. TempAuth
now sets swift_owner=True if the user is on the Admin ACL, authorizes
GET/HEAD/OPTIONS requests if the user is on any ACL, authorizes
PUT/POST/DELETE requests if the user is on the admin or read-write ACL, etc.
Note that the action of setting swift_owner=True triggers core Swift to
add or strip the privileged headers from the responses. Core Swift (not
the auth system) is responsible for that.
DocImpact: Documentation for the new ACL usage and format appears in
summary form in doc/source/overview_auth.rst, and in more detail in
swift/common/middleware/tempauth.py in the TempAuth class docstring.
I leave it to the Swift doc team to determine whether more is needed.
Change-Id: I836a99eaaa6bb0e92dc03e1ca46a474522e6e826
2013-11-13 20:55:14 +00:00
|
|
|
class Utils(object):
|
2010-07-12 17:03:45 -05:00
|
|
|
@classmethod
|
|
|
|
def create_ascii_name(cls, length=None):
|
|
|
|
return uuid.uuid4().hex
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def create_utf8_name(cls, length=None):
|
2012-01-04 14:43:16 +08:00
|
|
|
if length is None:
|
2010-07-12 17:03:45 -05:00
|
|
|
length = 15
|
|
|
|
else:
|
|
|
|
length = int(length)
|
|
|
|
|
|
|
|
utf8_chars = u'\uF10F\uD20D\uB30B\u9409\u8508\u5605\u3703\u1801'\
|
|
|
|
u'\u0900\uF110\uD20E\uB30C\u940A\u8509\u5606\u3704'\
|
|
|
|
u'\u1802\u0901\uF111\uD20F\uB30D\u940B\u850A\u5607'\
|
|
|
|
u'\u3705\u1803\u0902\uF112\uD210\uB30E\u940C\u850B'\
|
|
|
|
u'\u5608\u3706\u1804\u0903\u03A9\u2603'
|
2012-09-03 23:30:52 +08:00
|
|
|
return ''.join([random.choice(utf8_chars)
|
|
|
|
for x in xrange(length)]).encode('utf-8')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
create_name = create_ascii_name
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class Base(unittest.TestCase):
|
|
|
|
def setUp(self):
|
|
|
|
cls = type(self)
|
|
|
|
if not cls.set_up:
|
|
|
|
cls.env.setUp()
|
|
|
|
cls.set_up = True
|
|
|
|
|
|
|
|
def assert_body(self, body):
|
|
|
|
response_body = self.env.conn.response.read()
|
|
|
|
self.assert_(response_body == body,
|
2012-09-03 23:30:52 +08:00
|
|
|
'Body returned: %s' % (response_body))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2010-09-02 21:50:16 -07:00
|
|
|
def assert_status(self, status_or_statuses):
|
|
|
|
self.assert_(self.env.conn.response.status == status_or_statuses or
|
|
|
|
(hasattr(status_or_statuses, '__iter__') and
|
|
|
|
self.env.conn.response.status in status_or_statuses),
|
2012-09-03 23:30:52 +08:00
|
|
|
'Status returned: %d Expected: %s' %
|
|
|
|
(self.env.conn.response.status, status_or_statuses))
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
class Base2(object):
|
|
|
|
def setUp(self):
|
|
|
|
Utils.create_name = Utils.create_utf8_name
|
|
|
|
super(Base2, self).setUp()
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
Utils.create_name = Utils.create_ascii_name
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
Privileged acct ACL header, new ACL syntax, TempAuth impl.
* Introduce a new privileged account header: X-Account-Access-Control
* Introduce JSON-based version 2 ACL syntax -- see below for discussion
* Implement account ACL authorization in TempAuth
X-Account-Access-Control Header
-------------------------------
Accounts now have a new privileged header to represent ACLs or any other
form of account-level access control. The value of the header is an opaque
string to be interpreted by the auth system, but it must be a JSON-encoded
dictionary. A reference implementation is given in TempAuth, with the
knowledge that historically other auth systems often use TempAuth as a
starting point.
The reference implementation describes three levels of account access:
"admin", "read-write", and "read-only". Adding new access control
features in a future patch (e.g. "write-only" account access) will
automatically be forward- and backward-compatible, due to the JSON
dictionary header format.
The privileged X-Account-Access-Control header may only be read or written
by a user with "swift_owner" status, traditionally the account owner but
now also any user on the "admin" ACL.
Access Levels:
Read-only access is intended to indicate to the auth system that this
list of identities can read everything (except privileged headers) in
the account. Specifically, a user with read-only account access can get
a list of containers in the account, list the contents of any container,
retrieve any object, and see the (non-privileged) headers of the
account, any container, or any object.
Read-write access is intended to indicate to the auth system that this
list of identities can read or write (or create) any container. A user
with read-write account access can create new containers, set any
unprivileged container headers, overwrite objects, delete containers,
etc. A read-write user can NOT set account headers (or perform any
PUT/POST/DELETE requests on the account).
Admin access is intended to indicate to the auth system that this list of
identities has "swift_owner" privileges. A user with admin account access
can do anything the account owner can, including setting account headers
and any privileged headers -- and thus changing the value of
X-Account-Access-Control and thereby granting read-only, read-write, or
admin access to other users.
The auth system is responsible for making decisions based on this header,
if it chooses to support its use. Therefore the above access level
descriptions are necessarily advisory only for other auth systems.
When setting the value of the header, callers are urged to use the new
format_acl() method, described below.
New ACL Format
--------------
The account ACLs introduce a new format for ACLs, rather than reusing the
existing format from X-Container-Read/X-Container-Write. There are several
reasons for this:
* Container ACL format does not support Unicode
* Container ACLs have a different structure than account ACLs
+ account ACLs have no concept of referrers or rlistings
+ accounts have additional "admin" access level
+ account access levels are structured as admin > rw > ro, which seems more
appropriate for how people access accounts, rather than reusing
container ACLs' orthogonal read and write access
In addition, the container ACL syntax is a bit arbitrary and highly custom,
so instead of parsing additional custom syntax, I'd rather propose a next
version and introduce a means for migration. The V2 ACL syntax has the
following benefits:
* JSON is a well-known standard syntax with parsers in all languages
* no artificial value restrictions (you can grant access to a user named
".rlistings" if you want)
* forward and backward compatibility: you may have extraneous keys, but
your attempt to parse the header won't raise an exception
I've introduced hooks in parse_acl and format_acl which currently default
to the old V1 syntax but tolerate the V2 syntax and can easily be flipped
to default to V2. I'm not changing the default or adding code to rewrite
V1 ACLs to V2, because this patch has suffered a lot of scope creep already,
but this seems like a sensible milestone in the migration.
TempAuth Account ACL Implementation
-----------------------------------
As stated above, core Swift is responsible for privileging the
X-Account-Access-Control header (making it only accessible to swift_owners),
for translating it to -sysmeta-* headers to trigger persistence by the
account server, and for including the header in the responses to requests
by privileged users. Core Swift puts no expectation on the *content* of
this header. Auth systems (including TempAuth) are responsible for
defining the content of the header and taking action based on it.
In addition to the changes described above, this patch defines a format
to be used by TempAuth for these headers in the common.middleware.acl
module, in the methods format_v2_acl() and parse_v2_acl(). This patch
also teaches TempAuth to take action based on the header contents. TempAuth
now sets swift_owner=True if the user is on the Admin ACL, authorizes
GET/HEAD/OPTIONS requests if the user is on any ACL, authorizes
PUT/POST/DELETE requests if the user is on the admin or read-write ACL, etc.
Note that the action of setting swift_owner=True triggers core Swift to
add or strip the privileged headers from the responses. Core Swift (not
the auth system) is responsible for that.
DocImpact: Documentation for the new ACL usage and format appears in
summary form in doc/source/overview_auth.rst, and in more detail in
swift/common/middleware/tempauth.py in the TempAuth class docstring.
I leave it to the Swift doc team to determine whether more is needed.
Change-Id: I836a99eaaa6bb0e92dc03e1ca46a474522e6e826
2013-11-13 20:55:14 +00:00
|
|
|
class TestAccountEnv(object):
|
2010-07-12 17:03:45 -05:00
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.conn = Connection(tf.config)
|
2010-07-12 17:03:45 -05:00
|
|
|
cls.conn.authenticate()
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.account = Account(cls.conn, tf.config.get('account',
|
|
|
|
tf.config['username']))
|
2010-07-12 17:03:45 -05:00
|
|
|
cls.account.delete_containers()
|
|
|
|
|
|
|
|
cls.containers = []
|
|
|
|
for i in range(10):
|
|
|
|
cont = cls.account.container(Utils.create_name())
|
|
|
|
if not cont.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
cls.containers.append(cont)
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestAccountDev(Base):
|
|
|
|
env = TestAccountEnv
|
|
|
|
set_up = False
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestAccountDevUTF8(Base2, TestAccountDev):
|
|
|
|
set_up = False
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestAccount(Base):
|
|
|
|
env = TestAccountEnv
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
def testNoAuthToken(self):
|
|
|
|
self.assertRaises(ResponseError, self.env.account.info,
|
2012-09-03 23:30:52 +08:00
|
|
|
cfg={'no_auth_token': True})
|
2010-09-02 21:50:16 -07:00
|
|
|
self.assert_status([401, 412])
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
self.assertRaises(ResponseError, self.env.account.containers,
|
2012-09-03 23:30:52 +08:00
|
|
|
cfg={'no_auth_token': True})
|
2010-09-02 21:50:16 -07:00
|
|
|
self.assert_status([401, 412])
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testInvalidUTF8Path(self):
|
|
|
|
invalid_utf8 = Utils.create_utf8_name()[::-1]
|
|
|
|
container = self.env.account.container(invalid_utf8)
|
2012-09-03 23:30:52 +08:00
|
|
|
self.assert_(not container.create(cfg={'no_path_quote': True}))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
2013-01-12 06:54:17 +00:00
|
|
|
self.assert_body('Invalid UTF8 or contains NULL')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testVersionOnlyPath(self):
|
|
|
|
self.env.account.conn.make_request('PUT',
|
2012-09-03 23:30:52 +08:00
|
|
|
cfg={'version_only_path': True})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
|
|
|
self.assert_body('Bad URL')
|
|
|
|
|
2010-10-29 13:30:34 -07:00
|
|
|
def testInvalidPath(self):
|
|
|
|
was_url = self.env.account.conn.storage_url
|
2013-03-04 23:38:48 +02:00
|
|
|
if (normalized_urls):
|
|
|
|
self.env.account.conn.storage_url = '/'
|
|
|
|
else:
|
|
|
|
self.env.account.conn.storage_url = "/%s" % was_url
|
2010-10-29 13:30:34 -07:00
|
|
|
self.env.account.conn.make_request('GET')
|
|
|
|
try:
|
|
|
|
self.assert_status(404)
|
|
|
|
finally:
|
|
|
|
self.env.account.conn.storage_url = was_url
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def testPUT(self):
|
|
|
|
self.env.account.conn.make_request('PUT')
|
2010-09-10 13:40:43 -07:00
|
|
|
self.assert_status([403, 405])
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testAccountHead(self):
|
|
|
|
try_count = 0
|
|
|
|
while try_count < 5:
|
|
|
|
try_count += 1
|
|
|
|
|
|
|
|
info = self.env.account.info()
|
|
|
|
for field in ['object_count', 'container_count', 'bytes_used']:
|
|
|
|
self.assert_(info[field] >= 0)
|
|
|
|
|
|
|
|
if info['container_count'] == len(self.env.containers):
|
|
|
|
break
|
|
|
|
|
|
|
|
if try_count < 5:
|
|
|
|
time.sleep(1)
|
|
|
|
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(info['container_count'], len(self.env.containers))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(204)
|
|
|
|
|
|
|
|
def testContainerSerializedInfo(self):
|
|
|
|
container_info = {}
|
|
|
|
for container in self.env.containers:
|
|
|
|
info = {'bytes': 0}
|
|
|
|
info['count'] = random.randint(10, 30)
|
|
|
|
for i in range(info['count']):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = container.file(Utils.create_name())
|
2010-07-12 17:03:45 -05:00
|
|
|
bytes = random.randint(1, 32768)
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.write_random(bytes)
|
2010-07-12 17:03:45 -05:00
|
|
|
info['bytes'] += bytes
|
|
|
|
|
|
|
|
container_info[container.name] = info
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in ['json', 'xml']:
|
|
|
|
for a in self.env.account.containers(
|
2013-08-31 20:25:25 -04:00
|
|
|
parms={'format': format_type}):
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_(a['count'] >= 0)
|
|
|
|
self.assert_(a['bytes'] >= 0)
|
|
|
|
|
|
|
|
headers = dict(self.env.conn.response.getheaders())
|
2013-08-04 11:15:53 +08:00
|
|
|
if format_type == 'json':
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(headers['content-type'],
|
|
|
|
'application/json; charset=utf-8')
|
2013-08-04 11:15:53 +08:00
|
|
|
elif format_type == 'xml':
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(headers['content-type'],
|
|
|
|
'application/xml; charset=utf-8')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testListingLimit(self):
|
2012-09-05 20:49:50 -07:00
|
|
|
limit = load_constraint('account_listing_limit')
|
2012-09-03 23:30:52 +08:00
|
|
|
for l in (1, 100, limit / 2, limit - 1, limit, limit + 1, limit * 2):
|
|
|
|
p = {'limit': l}
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
if l <= limit:
|
|
|
|
self.assert_(len(self.env.account.containers(parms=p)) <= l)
|
|
|
|
self.assert_status(200)
|
|
|
|
else:
|
|
|
|
self.assertRaises(ResponseError,
|
2012-09-03 23:30:52 +08:00
|
|
|
self.env.account.containers, parms=p)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
|
|
|
|
|
|
|
def testContainerListing(self):
|
|
|
|
a = sorted([c.name for c in self.env.containers])
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
|
|
|
b = self.env.account.containers(parms={'format': format_type})
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
if isinstance(b[0], dict):
|
|
|
|
b = [x['name'] for x in b]
|
|
|
|
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(a, b)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testInvalidAuthToken(self):
|
|
|
|
hdrs = {'X-Auth-Token': 'bogus_auth_token'}
|
|
|
|
self.assertRaises(ResponseError, self.env.account.info, hdrs=hdrs)
|
|
|
|
self.assert_status(401)
|
|
|
|
|
|
|
|
def testLastContainerMarker(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
|
|
|
containers = self.env.account.containers({'format': format_type})
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(len(containers), len(self.env.containers))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(200)
|
|
|
|
|
|
|
|
containers = self.env.account.containers(
|
2013-08-04 11:15:53 +08:00
|
|
|
parms={'format': format_type, 'marker': containers[-1]})
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(len(containers), 0)
|
2013-08-04 11:15:53 +08:00
|
|
|
if format_type is None:
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(204)
|
|
|
|
else:
|
|
|
|
self.assert_status(200)
|
|
|
|
|
|
|
|
def testMarkerLimitContainerList(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
2012-09-03 23:30:52 +08:00
|
|
|
for marker in ['0', 'A', 'I', 'R', 'Z', 'a', 'i', 'r', 'z',
|
|
|
|
'abc123', 'mnop', 'xyz']:
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
limit = random.randint(2, 9)
|
|
|
|
containers = self.env.account.containers(
|
2013-08-04 11:15:53 +08:00
|
|
|
parms={'format': format_type,
|
|
|
|
'marker': marker,
|
|
|
|
'limit': limit})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_(len(containers) <= limit)
|
|
|
|
if containers:
|
2012-09-03 23:30:52 +08:00
|
|
|
if isinstance(containers[0], dict):
|
|
|
|
containers = [x['name'] for x in containers]
|
|
|
|
self.assert_(locale.strcoll(containers[0], marker) > 0)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testContainersOrderedByName(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
2010-07-12 17:03:45 -05:00
|
|
|
containers = self.env.account.containers(
|
2013-08-04 11:15:53 +08:00
|
|
|
parms={'format': format_type})
|
2010-07-12 17:03:45 -05:00
|
|
|
if isinstance(containers[0], dict):
|
|
|
|
containers = [x['name'] for x in containers]
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(sorted(containers, cmp=locale.strcoll),
|
|
|
|
containers)
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
class TestAccountUTF8(Base2, TestAccount):
|
|
|
|
set_up = False
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
Privileged acct ACL header, new ACL syntax, TempAuth impl.
* Introduce a new privileged account header: X-Account-Access-Control
* Introduce JSON-based version 2 ACL syntax -- see below for discussion
* Implement account ACL authorization in TempAuth
X-Account-Access-Control Header
-------------------------------
Accounts now have a new privileged header to represent ACLs or any other
form of account-level access control. The value of the header is an opaque
string to be interpreted by the auth system, but it must be a JSON-encoded
dictionary. A reference implementation is given in TempAuth, with the
knowledge that historically other auth systems often use TempAuth as a
starting point.
The reference implementation describes three levels of account access:
"admin", "read-write", and "read-only". Adding new access control
features in a future patch (e.g. "write-only" account access) will
automatically be forward- and backward-compatible, due to the JSON
dictionary header format.
The privileged X-Account-Access-Control header may only be read or written
by a user with "swift_owner" status, traditionally the account owner but
now also any user on the "admin" ACL.
Access Levels:
Read-only access is intended to indicate to the auth system that this
list of identities can read everything (except privileged headers) in
the account. Specifically, a user with read-only account access can get
a list of containers in the account, list the contents of any container,
retrieve any object, and see the (non-privileged) headers of the
account, any container, or any object.
Read-write access is intended to indicate to the auth system that this
list of identities can read or write (or create) any container. A user
with read-write account access can create new containers, set any
unprivileged container headers, overwrite objects, delete containers,
etc. A read-write user can NOT set account headers (or perform any
PUT/POST/DELETE requests on the account).
Admin access is intended to indicate to the auth system that this list of
identities has "swift_owner" privileges. A user with admin account access
can do anything the account owner can, including setting account headers
and any privileged headers -- and thus changing the value of
X-Account-Access-Control and thereby granting read-only, read-write, or
admin access to other users.
The auth system is responsible for making decisions based on this header,
if it chooses to support its use. Therefore the above access level
descriptions are necessarily advisory only for other auth systems.
When setting the value of the header, callers are urged to use the new
format_acl() method, described below.
New ACL Format
--------------
The account ACLs introduce a new format for ACLs, rather than reusing the
existing format from X-Container-Read/X-Container-Write. There are several
reasons for this:
* Container ACL format does not support Unicode
* Container ACLs have a different structure than account ACLs
+ account ACLs have no concept of referrers or rlistings
+ accounts have additional "admin" access level
+ account access levels are structured as admin > rw > ro, which seems more
appropriate for how people access accounts, rather than reusing
container ACLs' orthogonal read and write access
In addition, the container ACL syntax is a bit arbitrary and highly custom,
so instead of parsing additional custom syntax, I'd rather propose a next
version and introduce a means for migration. The V2 ACL syntax has the
following benefits:
* JSON is a well-known standard syntax with parsers in all languages
* no artificial value restrictions (you can grant access to a user named
".rlistings" if you want)
* forward and backward compatibility: you may have extraneous keys, but
your attempt to parse the header won't raise an exception
I've introduced hooks in parse_acl and format_acl which currently default
to the old V1 syntax but tolerate the V2 syntax and can easily be flipped
to default to V2. I'm not changing the default or adding code to rewrite
V1 ACLs to V2, because this patch has suffered a lot of scope creep already,
but this seems like a sensible milestone in the migration.
TempAuth Account ACL Implementation
-----------------------------------
As stated above, core Swift is responsible for privileging the
X-Account-Access-Control header (making it only accessible to swift_owners),
for translating it to -sysmeta-* headers to trigger persistence by the
account server, and for including the header in the responses to requests
by privileged users. Core Swift puts no expectation on the *content* of
this header. Auth systems (including TempAuth) are responsible for
defining the content of the header and taking action based on it.
In addition to the changes described above, this patch defines a format
to be used by TempAuth for these headers in the common.middleware.acl
module, in the methods format_v2_acl() and parse_v2_acl(). This patch
also teaches TempAuth to take action based on the header contents. TempAuth
now sets swift_owner=True if the user is on the Admin ACL, authorizes
GET/HEAD/OPTIONS requests if the user is on any ACL, authorizes
PUT/POST/DELETE requests if the user is on the admin or read-write ACL, etc.
Note that the action of setting swift_owner=True triggers core Swift to
add or strip the privileged headers from the responses. Core Swift (not
the auth system) is responsible for that.
DocImpact: Documentation for the new ACL usage and format appears in
summary form in doc/source/overview_auth.rst, and in more detail in
swift/common/middleware/tempauth.py in the TempAuth class docstring.
I leave it to the Swift doc team to determine whether more is needed.
Change-Id: I836a99eaaa6bb0e92dc03e1ca46a474522e6e826
2013-11-13 20:55:14 +00:00
|
|
|
class TestAccountNoContainersEnv(object):
|
2010-07-12 17:03:45 -05:00
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.conn = Connection(tf.config)
|
2010-07-12 17:03:45 -05:00
|
|
|
cls.conn.authenticate()
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.account = Account(cls.conn, tf.config.get('account',
|
|
|
|
tf.config['username']))
|
2010-07-12 17:03:45 -05:00
|
|
|
cls.account.delete_containers()
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestAccountNoContainers(Base):
|
|
|
|
env = TestAccountNoContainersEnv
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
def testGetRequest(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_(not self.env.account.containers(
|
2013-08-04 11:15:53 +08:00
|
|
|
parms={'format': format_type}))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
if format_type is None:
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(204)
|
|
|
|
else:
|
|
|
|
self.assert_status(200)
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestAccountNoContainersUTF8(Base2, TestAccountNoContainers):
|
|
|
|
set_up = False
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
Privileged acct ACL header, new ACL syntax, TempAuth impl.
* Introduce a new privileged account header: X-Account-Access-Control
* Introduce JSON-based version 2 ACL syntax -- see below for discussion
* Implement account ACL authorization in TempAuth
X-Account-Access-Control Header
-------------------------------
Accounts now have a new privileged header to represent ACLs or any other
form of account-level access control. The value of the header is an opaque
string to be interpreted by the auth system, but it must be a JSON-encoded
dictionary. A reference implementation is given in TempAuth, with the
knowledge that historically other auth systems often use TempAuth as a
starting point.
The reference implementation describes three levels of account access:
"admin", "read-write", and "read-only". Adding new access control
features in a future patch (e.g. "write-only" account access) will
automatically be forward- and backward-compatible, due to the JSON
dictionary header format.
The privileged X-Account-Access-Control header may only be read or written
by a user with "swift_owner" status, traditionally the account owner but
now also any user on the "admin" ACL.
Access Levels:
Read-only access is intended to indicate to the auth system that this
list of identities can read everything (except privileged headers) in
the account. Specifically, a user with read-only account access can get
a list of containers in the account, list the contents of any container,
retrieve any object, and see the (non-privileged) headers of the
account, any container, or any object.
Read-write access is intended to indicate to the auth system that this
list of identities can read or write (or create) any container. A user
with read-write account access can create new containers, set any
unprivileged container headers, overwrite objects, delete containers,
etc. A read-write user can NOT set account headers (or perform any
PUT/POST/DELETE requests on the account).
Admin access is intended to indicate to the auth system that this list of
identities has "swift_owner" privileges. A user with admin account access
can do anything the account owner can, including setting account headers
and any privileged headers -- and thus changing the value of
X-Account-Access-Control and thereby granting read-only, read-write, or
admin access to other users.
The auth system is responsible for making decisions based on this header,
if it chooses to support its use. Therefore the above access level
descriptions are necessarily advisory only for other auth systems.
When setting the value of the header, callers are urged to use the new
format_acl() method, described below.
New ACL Format
--------------
The account ACLs introduce a new format for ACLs, rather than reusing the
existing format from X-Container-Read/X-Container-Write. There are several
reasons for this:
* Container ACL format does not support Unicode
* Container ACLs have a different structure than account ACLs
+ account ACLs have no concept of referrers or rlistings
+ accounts have additional "admin" access level
+ account access levels are structured as admin > rw > ro, which seems more
appropriate for how people access accounts, rather than reusing
container ACLs' orthogonal read and write access
In addition, the container ACL syntax is a bit arbitrary and highly custom,
so instead of parsing additional custom syntax, I'd rather propose a next
version and introduce a means for migration. The V2 ACL syntax has the
following benefits:
* JSON is a well-known standard syntax with parsers in all languages
* no artificial value restrictions (you can grant access to a user named
".rlistings" if you want)
* forward and backward compatibility: you may have extraneous keys, but
your attempt to parse the header won't raise an exception
I've introduced hooks in parse_acl and format_acl which currently default
to the old V1 syntax but tolerate the V2 syntax and can easily be flipped
to default to V2. I'm not changing the default or adding code to rewrite
V1 ACLs to V2, because this patch has suffered a lot of scope creep already,
but this seems like a sensible milestone in the migration.
TempAuth Account ACL Implementation
-----------------------------------
As stated above, core Swift is responsible for privileging the
X-Account-Access-Control header (making it only accessible to swift_owners),
for translating it to -sysmeta-* headers to trigger persistence by the
account server, and for including the header in the responses to requests
by privileged users. Core Swift puts no expectation on the *content* of
this header. Auth systems (including TempAuth) are responsible for
defining the content of the header and taking action based on it.
In addition to the changes described above, this patch defines a format
to be used by TempAuth for these headers in the common.middleware.acl
module, in the methods format_v2_acl() and parse_v2_acl(). This patch
also teaches TempAuth to take action based on the header contents. TempAuth
now sets swift_owner=True if the user is on the Admin ACL, authorizes
GET/HEAD/OPTIONS requests if the user is on any ACL, authorizes
PUT/POST/DELETE requests if the user is on the admin or read-write ACL, etc.
Note that the action of setting swift_owner=True triggers core Swift to
add or strip the privileged headers from the responses. Core Swift (not
the auth system) is responsible for that.
DocImpact: Documentation for the new ACL usage and format appears in
summary form in doc/source/overview_auth.rst, and in more detail in
swift/common/middleware/tempauth.py in the TempAuth class docstring.
I leave it to the Swift doc team to determine whether more is needed.
Change-Id: I836a99eaaa6bb0e92dc03e1ca46a474522e6e826
2013-11-13 20:55:14 +00:00
|
|
|
class TestContainerEnv(object):
|
2010-07-12 17:03:45 -05:00
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.conn = Connection(tf.config)
|
2010-07-12 17:03:45 -05:00
|
|
|
cls.conn.authenticate()
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.account = Account(cls.conn, tf.config.get('account',
|
|
|
|
tf.config['username']))
|
2010-07-12 17:03:45 -05:00
|
|
|
cls.account.delete_containers()
|
|
|
|
|
|
|
|
cls.container = cls.account.container(Utils.create_name())
|
|
|
|
if not cls.container.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
cls.file_count = 10
|
|
|
|
cls.file_size = 128
|
|
|
|
cls.files = list()
|
|
|
|
for x in range(cls.file_count):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cls.container.file(Utils.create_name())
|
|
|
|
file_item.write_random(cls.file_size)
|
|
|
|
cls.files.append(file_item.name)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestContainerDev(Base):
|
|
|
|
env = TestContainerEnv
|
|
|
|
set_up = False
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestContainerDevUTF8(Base2, TestContainerDev):
|
|
|
|
set_up = False
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestContainer(Base):
|
|
|
|
env = TestContainerEnv
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
def testContainerNameLimit(self):
|
2012-09-05 20:49:50 -07:00
|
|
|
limit = load_constraint('max_container_name_length')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
for l in (limit - 100, limit - 10, limit - 1, limit,
|
|
|
|
limit + 1, limit + 10, limit + 100):
|
|
|
|
cont = self.env.account.container('a' * l)
|
2010-07-12 17:03:45 -05:00
|
|
|
if l <= limit:
|
|
|
|
self.assert_(cont.create())
|
|
|
|
self.assert_status(201)
|
|
|
|
else:
|
|
|
|
self.assert_(not cont.create())
|
|
|
|
self.assert_status(400)
|
|
|
|
|
|
|
|
def testFileThenContainerDelete(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
|
|
|
self.assert_(cont.create())
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cont.file(Utils.create_name())
|
|
|
|
self.assert_(file_item.write_random())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(file_item.delete())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(204)
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(file_item.name not in cont.files())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
self.assert_(cont.delete())
|
|
|
|
self.assert_status(204)
|
|
|
|
self.assert_(cont.name not in self.env.account.containers())
|
|
|
|
|
|
|
|
def testFileListingLimitMarkerPrefix(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
|
|
|
self.assert_(cont.create())
|
|
|
|
|
|
|
|
files = sorted([Utils.create_name() for x in xrange(10)])
|
|
|
|
for f in files:
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cont.file(f)
|
|
|
|
self.assert_(file_item.write_random())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
for i in xrange(len(files)):
|
|
|
|
f = files[i]
|
2012-09-03 23:30:52 +08:00
|
|
|
for j in xrange(1, len(files) - i):
|
|
|
|
self.assert_(cont.files(parms={'limit': j, 'marker': f}) ==
|
|
|
|
files[i + 1: i + j + 1])
|
|
|
|
self.assert_(cont.files(parms={'marker': f}) == files[i + 1:])
|
|
|
|
self.assert_(cont.files(parms={'marker': f, 'prefix': f}) == [])
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_(cont.files(parms={'prefix': f}) == [f])
|
|
|
|
|
|
|
|
def testPrefixAndLimit(self):
|
2012-09-05 20:49:50 -07:00
|
|
|
load_constraint('container_listing_limit')
|
2010-07-12 17:03:45 -05:00
|
|
|
cont = self.env.account.container(Utils.create_name())
|
|
|
|
self.assert_(cont.create())
|
|
|
|
|
|
|
|
prefix_file_count = 10
|
|
|
|
limit_count = 2
|
|
|
|
prefixs = ['alpha/', 'beta/', 'kappa/']
|
|
|
|
prefix_files = {}
|
|
|
|
|
|
|
|
for prefix in prefixs:
|
|
|
|
prefix_files[prefix] = []
|
|
|
|
|
|
|
|
for i in range(prefix_file_count):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cont.file(prefix + Utils.create_name())
|
|
|
|
file_item.write()
|
|
|
|
prefix_files[prefix].append(file_item.name)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
2010-07-12 17:03:45 -05:00
|
|
|
for prefix in prefixs:
|
2012-09-03 23:30:52 +08:00
|
|
|
files = cont.files(parms={'prefix': prefix})
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(files, sorted(prefix_files[prefix]))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
2010-07-12 17:03:45 -05:00
|
|
|
for prefix in prefixs:
|
2012-09-03 23:30:52 +08:00
|
|
|
files = cont.files(parms={'limit': limit_count,
|
|
|
|
'prefix': prefix})
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(len(files), limit_count)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in files:
|
|
|
|
self.assert_(file_item.startswith(prefix))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testCreate(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
|
|
|
self.assert_(cont.create())
|
|
|
|
self.assert_status(201)
|
|
|
|
self.assert_(cont.name in self.env.account.containers())
|
|
|
|
|
|
|
|
def testContainerFileListOnContainerThatDoesNotExist(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
2010-07-12 17:03:45 -05:00
|
|
|
container = self.env.account.container(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, container.files,
|
2013-08-04 11:15:53 +08:00
|
|
|
parms={'format': format_type})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
def testUtf8Container(self):
|
|
|
|
valid_utf8 = Utils.create_utf8_name()
|
|
|
|
invalid_utf8 = valid_utf8[::-1]
|
|
|
|
container = self.env.account.container(valid_utf8)
|
2012-09-03 23:30:52 +08:00
|
|
|
self.assert_(container.create(cfg={'no_path_quote': True}))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_(container.name in self.env.account.containers())
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(container.files(), [])
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_(container.delete())
|
|
|
|
|
|
|
|
container = self.env.account.container(invalid_utf8)
|
2012-09-03 23:30:52 +08:00
|
|
|
self.assert_(not container.create(cfg={'no_path_quote': True}))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
|
|
|
self.assertRaises(ResponseError, container.files,
|
2012-09-03 23:30:52 +08:00
|
|
|
cfg={'no_path_quote': True})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
|
|
|
|
|
|
|
def testCreateOnExisting(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
|
|
|
self.assert_(cont.create())
|
|
|
|
self.assert_status(201)
|
|
|
|
self.assert_(cont.create())
|
|
|
|
self.assert_status(202)
|
|
|
|
|
|
|
|
def testSlashInName(self):
|
|
|
|
if Utils.create_name == Utils.create_utf8_name:
|
|
|
|
cont_name = list(unicode(Utils.create_name(), 'utf-8'))
|
|
|
|
else:
|
|
|
|
cont_name = list(Utils.create_name())
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
cont_name[random.randint(2, len(cont_name) - 2)] = '/'
|
2010-07-12 17:03:45 -05:00
|
|
|
cont_name = ''.join(cont_name)
|
|
|
|
|
|
|
|
if Utils.create_name == Utils.create_utf8_name:
|
|
|
|
cont_name = cont_name.encode('utf-8')
|
|
|
|
|
|
|
|
cont = self.env.account.container(cont_name)
|
2012-09-03 23:30:52 +08:00
|
|
|
self.assert_(not cont.create(cfg={'no_path_quote': True}),
|
|
|
|
'created container with name %s' % (cont_name))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
self.assert_(cont.name not in self.env.account.containers())
|
|
|
|
|
|
|
|
def testDelete(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
|
|
|
self.assert_(cont.create())
|
|
|
|
self.assert_status(201)
|
|
|
|
self.assert_(cont.delete())
|
|
|
|
self.assert_status(204)
|
|
|
|
self.assert_(cont.name not in self.env.account.containers())
|
|
|
|
|
|
|
|
def testDeleteOnContainerThatDoesNotExist(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
|
|
|
self.assert_(not cont.delete())
|
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
def testDeleteOnContainerWithFiles(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
|
|
|
self.assert_(cont.create())
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cont.file(Utils.create_name())
|
|
|
|
file_item.write_random(self.env.file_size)
|
|
|
|
self.assert_(file_item.name in cont.files())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_(not cont.delete())
|
|
|
|
self.assert_status(409)
|
|
|
|
|
|
|
|
def testFileCreateInContainerThatDoesNotExist(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = File(self.env.conn, self.env.account, Utils.create_name(),
|
2013-08-31 20:25:25 -04:00
|
|
|
Utils.create_name())
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.write)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
def testLastFileMarker(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
|
|
|
files = self.env.container.files({'format': format_type})
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(len(files), len(self.env.files))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(200)
|
|
|
|
|
|
|
|
files = self.env.container.files(
|
2013-08-04 11:15:53 +08:00
|
|
|
parms={'format': format_type, 'marker': files[-1]})
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(len(files), 0)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
if format_type is None:
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(204)
|
|
|
|
else:
|
|
|
|
self.assert_status(200)
|
|
|
|
|
|
|
|
def testContainerFileList(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
|
|
|
files = self.env.container.files(parms={'format': format_type})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(200)
|
|
|
|
if isinstance(files[0], dict):
|
|
|
|
files = [x['name'] for x in files]
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in self.env.files:
|
|
|
|
self.assert_(file_item in files)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in files:
|
|
|
|
self.assert_(file_item in self.env.files)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testMarkerLimitFileList(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
2012-09-03 23:30:52 +08:00
|
|
|
for marker in ['0', 'A', 'I', 'R', 'Z', 'a', 'i', 'r', 'z',
|
|
|
|
'abc123', 'mnop', 'xyz']:
|
|
|
|
limit = random.randint(2, self.env.file_count - 1)
|
2013-08-04 11:15:53 +08:00
|
|
|
files = self.env.container.files(parms={'format': format_type,
|
2012-09-03 23:30:52 +08:00
|
|
|
'marker': marker,
|
|
|
|
'limit': limit})
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
if not files:
|
|
|
|
continue
|
|
|
|
|
|
|
|
if isinstance(files[0], dict):
|
|
|
|
files = [x['name'] for x in files]
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_(len(files) <= limit)
|
|
|
|
if files:
|
|
|
|
if isinstance(files[0], dict):
|
|
|
|
files = [x['name'] for x in files]
|
|
|
|
self.assert_(locale.strcoll(files[0], marker) > 0)
|
|
|
|
|
|
|
|
def testFileOrder(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
|
|
|
files = self.env.container.files(parms={'format': format_type})
|
2010-07-12 17:03:45 -05:00
|
|
|
if isinstance(files[0], dict):
|
|
|
|
files = [x['name'] for x in files]
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(sorted(files, cmp=locale.strcoll), files)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testContainerInfo(self):
|
|
|
|
info = self.env.container.info()
|
|
|
|
self.assert_status(204)
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(info['object_count'], self.env.file_count)
|
|
|
|
self.assertEqual(info['bytes_used'],
|
|
|
|
self.env.file_count * self.env.file_size)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testContainerInfoOnContainerThatDoesNotExist(self):
|
|
|
|
container = self.env.account.container(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, container.info)
|
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
def testContainerFileListWithLimit(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
|
|
|
files = self.env.container.files(parms={'format': format_type,
|
2012-09-03 23:30:52 +08:00
|
|
|
'limit': 2})
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(len(files), 2)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testTooLongName(self):
|
2012-09-03 23:30:52 +08:00
|
|
|
cont = self.env.account.container('x' * 257)
|
|
|
|
self.assert_(not cont.create(),
|
|
|
|
'created container with name %s' % (cont.name))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(400)
|
|
|
|
|
|
|
|
def testContainerExistenceCachingProblem(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, cont.files)
|
|
|
|
self.assert_(cont.create())
|
|
|
|
cont.files()
|
|
|
|
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, cont.files)
|
|
|
|
self.assert_(cont.create())
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cont.file(Utils.create_name())
|
|
|
|
file_item.write_random()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestContainerUTF8(Base2, TestContainer):
|
|
|
|
set_up = False
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
Privileged acct ACL header, new ACL syntax, TempAuth impl.
* Introduce a new privileged account header: X-Account-Access-Control
* Introduce JSON-based version 2 ACL syntax -- see below for discussion
* Implement account ACL authorization in TempAuth
X-Account-Access-Control Header
-------------------------------
Accounts now have a new privileged header to represent ACLs or any other
form of account-level access control. The value of the header is an opaque
string to be interpreted by the auth system, but it must be a JSON-encoded
dictionary. A reference implementation is given in TempAuth, with the
knowledge that historically other auth systems often use TempAuth as a
starting point.
The reference implementation describes three levels of account access:
"admin", "read-write", and "read-only". Adding new access control
features in a future patch (e.g. "write-only" account access) will
automatically be forward- and backward-compatible, due to the JSON
dictionary header format.
The privileged X-Account-Access-Control header may only be read or written
by a user with "swift_owner" status, traditionally the account owner but
now also any user on the "admin" ACL.
Access Levels:
Read-only access is intended to indicate to the auth system that this
list of identities can read everything (except privileged headers) in
the account. Specifically, a user with read-only account access can get
a list of containers in the account, list the contents of any container,
retrieve any object, and see the (non-privileged) headers of the
account, any container, or any object.
Read-write access is intended to indicate to the auth system that this
list of identities can read or write (or create) any container. A user
with read-write account access can create new containers, set any
unprivileged container headers, overwrite objects, delete containers,
etc. A read-write user can NOT set account headers (or perform any
PUT/POST/DELETE requests on the account).
Admin access is intended to indicate to the auth system that this list of
identities has "swift_owner" privileges. A user with admin account access
can do anything the account owner can, including setting account headers
and any privileged headers -- and thus changing the value of
X-Account-Access-Control and thereby granting read-only, read-write, or
admin access to other users.
The auth system is responsible for making decisions based on this header,
if it chooses to support its use. Therefore the above access level
descriptions are necessarily advisory only for other auth systems.
When setting the value of the header, callers are urged to use the new
format_acl() method, described below.
New ACL Format
--------------
The account ACLs introduce a new format for ACLs, rather than reusing the
existing format from X-Container-Read/X-Container-Write. There are several
reasons for this:
* Container ACL format does not support Unicode
* Container ACLs have a different structure than account ACLs
+ account ACLs have no concept of referrers or rlistings
+ accounts have additional "admin" access level
+ account access levels are structured as admin > rw > ro, which seems more
appropriate for how people access accounts, rather than reusing
container ACLs' orthogonal read and write access
In addition, the container ACL syntax is a bit arbitrary and highly custom,
so instead of parsing additional custom syntax, I'd rather propose a next
version and introduce a means for migration. The V2 ACL syntax has the
following benefits:
* JSON is a well-known standard syntax with parsers in all languages
* no artificial value restrictions (you can grant access to a user named
".rlistings" if you want)
* forward and backward compatibility: you may have extraneous keys, but
your attempt to parse the header won't raise an exception
I've introduced hooks in parse_acl and format_acl which currently default
to the old V1 syntax but tolerate the V2 syntax and can easily be flipped
to default to V2. I'm not changing the default or adding code to rewrite
V1 ACLs to V2, because this patch has suffered a lot of scope creep already,
but this seems like a sensible milestone in the migration.
TempAuth Account ACL Implementation
-----------------------------------
As stated above, core Swift is responsible for privileging the
X-Account-Access-Control header (making it only accessible to swift_owners),
for translating it to -sysmeta-* headers to trigger persistence by the
account server, and for including the header in the responses to requests
by privileged users. Core Swift puts no expectation on the *content* of
this header. Auth systems (including TempAuth) are responsible for
defining the content of the header and taking action based on it.
In addition to the changes described above, this patch defines a format
to be used by TempAuth for these headers in the common.middleware.acl
module, in the methods format_v2_acl() and parse_v2_acl(). This patch
also teaches TempAuth to take action based on the header contents. TempAuth
now sets swift_owner=True if the user is on the Admin ACL, authorizes
GET/HEAD/OPTIONS requests if the user is on any ACL, authorizes
PUT/POST/DELETE requests if the user is on the admin or read-write ACL, etc.
Note that the action of setting swift_owner=True triggers core Swift to
add or strip the privileged headers from the responses. Core Swift (not
the auth system) is responsible for that.
DocImpact: Documentation for the new ACL usage and format appears in
summary form in doc/source/overview_auth.rst, and in more detail in
swift/common/middleware/tempauth.py in the TempAuth class docstring.
I leave it to the Swift doc team to determine whether more is needed.
Change-Id: I836a99eaaa6bb0e92dc03e1ca46a474522e6e826
2013-11-13 20:55:14 +00:00
|
|
|
class TestContainerPathsEnv(object):
|
2010-07-12 17:03:45 -05:00
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.conn = Connection(tf.config)
|
2010-07-12 17:03:45 -05:00
|
|
|
cls.conn.authenticate()
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.account = Account(cls.conn, tf.config.get('account',
|
|
|
|
tf.config['username']))
|
2010-07-12 17:03:45 -05:00
|
|
|
cls.account.delete_containers()
|
|
|
|
|
|
|
|
cls.file_size = 8
|
|
|
|
|
|
|
|
cls.container = cls.account.container(Utils.create_name())
|
|
|
|
if not cls.container.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
cls.files = [
|
|
|
|
'/file1',
|
|
|
|
'/file A',
|
|
|
|
'/dir1/',
|
|
|
|
'/dir2/',
|
|
|
|
'/dir1/file2',
|
|
|
|
'/dir1/subdir1/',
|
|
|
|
'/dir1/subdir2/',
|
|
|
|
'/dir1/subdir1/file2',
|
|
|
|
'/dir1/subdir1/file3',
|
|
|
|
'/dir1/subdir1/file4',
|
|
|
|
'/dir1/subdir1/subsubdir1/',
|
|
|
|
'/dir1/subdir1/subsubdir1/file5',
|
|
|
|
'/dir1/subdir1/subsubdir1/file6',
|
|
|
|
'/dir1/subdir1/subsubdir1/file7',
|
|
|
|
'/dir1/subdir1/subsubdir1/file8',
|
|
|
|
'/dir1/subdir1/subsubdir2/',
|
|
|
|
'/dir1/subdir1/subsubdir2/file9',
|
|
|
|
'/dir1/subdir1/subsubdir2/file0',
|
|
|
|
'file1',
|
|
|
|
'dir1/',
|
|
|
|
'dir2/',
|
|
|
|
'dir1/file2',
|
|
|
|
'dir1/subdir1/',
|
|
|
|
'dir1/subdir2/',
|
|
|
|
'dir1/subdir1/file2',
|
|
|
|
'dir1/subdir1/file3',
|
|
|
|
'dir1/subdir1/file4',
|
|
|
|
'dir1/subdir1/subsubdir1/',
|
|
|
|
'dir1/subdir1/subsubdir1/file5',
|
|
|
|
'dir1/subdir1/subsubdir1/file6',
|
|
|
|
'dir1/subdir1/subsubdir1/file7',
|
|
|
|
'dir1/subdir1/subsubdir1/file8',
|
|
|
|
'dir1/subdir1/subsubdir2/',
|
|
|
|
'dir1/subdir1/subsubdir2/file9',
|
|
|
|
'dir1/subdir1/subsubdir2/file0',
|
|
|
|
'dir1/subdir with spaces/',
|
|
|
|
'dir1/subdir with spaces/file B',
|
|
|
|
'dir1/subdir+with{whatever/',
|
|
|
|
'dir1/subdir+with{whatever/file D',
|
|
|
|
]
|
|
|
|
|
2013-03-04 23:38:48 +02:00
|
|
|
stored_files = set()
|
2010-07-12 17:03:45 -05:00
|
|
|
for f in cls.files:
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cls.container.file(f)
|
2010-07-12 17:03:45 -05:00
|
|
|
if f.endswith('/'):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.write(hdrs={'Content-Type': 'application/directory'})
|
2010-07-12 17:03:45 -05:00
|
|
|
else:
|
2013-08-31 20:25:25 -04:00
|
|
|
file_item.write_random(cls.file_size,
|
|
|
|
hdrs={'Content-Type':
|
|
|
|
'application/directory'})
|
2013-03-04 23:38:48 +02:00
|
|
|
if (normalized_urls):
|
|
|
|
nfile = '/'.join(filter(None, f.split('/')))
|
|
|
|
if (f[-1] == '/'):
|
|
|
|
nfile += '/'
|
|
|
|
stored_files.add(nfile)
|
|
|
|
else:
|
|
|
|
stored_files.add(f)
|
|
|
|
cls.stored_files = sorted(stored_files)
|
|
|
|
|
2013-03-26 20:42:26 +00:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestContainerPaths(Base):
|
|
|
|
env = TestContainerPathsEnv
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
def testTraverseContainer(self):
|
|
|
|
found_files = []
|
|
|
|
found_dirs = []
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def recurse_path(path, count=0):
|
|
|
|
if count > 10:
|
|
|
|
raise ValueError('too deep recursion')
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in self.env.container.files(parms={'path': path}):
|
|
|
|
self.assert_(file_item.startswith(path))
|
|
|
|
if file_item.endswith('/'):
|
|
|
|
recurse_path(file_item, count + 1)
|
|
|
|
found_dirs.append(file_item)
|
2010-07-12 17:03:45 -05:00
|
|
|
else:
|
2013-08-04 11:15:53 +08:00
|
|
|
found_files.append(file_item)
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
recurse_path('')
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in self.env.stored_files:
|
|
|
|
if file_item.startswith('/'):
|
|
|
|
self.assert_(file_item not in found_dirs)
|
|
|
|
self.assert_(file_item not in found_files)
|
|
|
|
elif file_item.endswith('/'):
|
|
|
|
self.assert_(file_item in found_dirs)
|
|
|
|
self.assert_(file_item not in found_files)
|
2010-07-12 17:03:45 -05:00
|
|
|
else:
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(file_item in found_files)
|
|
|
|
self.assert_(file_item not in found_dirs)
|
2013-03-04 23:38:48 +02:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
found_files = []
|
|
|
|
found_dirs = []
|
|
|
|
recurse_path('/')
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in self.env.stored_files:
|
|
|
|
if not file_item.startswith('/'):
|
|
|
|
self.assert_(file_item not in found_dirs)
|
|
|
|
self.assert_(file_item not in found_files)
|
|
|
|
elif file_item.endswith('/'):
|
|
|
|
self.assert_(file_item in found_dirs)
|
|
|
|
self.assert_(file_item not in found_files)
|
2010-07-12 17:03:45 -05:00
|
|
|
else:
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(file_item in found_files)
|
|
|
|
self.assert_(file_item not in found_dirs)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testContainerListing(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in (None, 'json', 'xml'):
|
|
|
|
files = self.env.container.files(parms={'format': format_type})
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
if isinstance(files[0], dict):
|
|
|
|
files = [str(x['name']) for x in files]
|
|
|
|
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(files, self.env.stored_files)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in ('json', 'xml'):
|
|
|
|
for file_item in self.env.container.files(parms={'format':
|
|
|
|
format_type}):
|
|
|
|
self.assert_(int(file_item['bytes']) >= 0)
|
|
|
|
self.assert_('last_modified' in file_item)
|
|
|
|
if file_item['name'].endswith('/'):
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(file_item['content_type'],
|
|
|
|
'application/directory')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testStructure(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
def assert_listing(path, file_list):
|
2012-09-03 23:30:52 +08:00
|
|
|
files = self.env.container.files(parms={'path': path})
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(sorted(file_list, cmp=locale.strcoll), files)
|
2013-03-04 23:38:48 +02:00
|
|
|
if not normalized_urls:
|
|
|
|
assert_listing('/', ['/dir1/', '/dir2/', '/file1', '/file A'])
|
|
|
|
assert_listing('/dir1',
|
|
|
|
['/dir1/file2', '/dir1/subdir1/', '/dir1/subdir2/'])
|
|
|
|
assert_listing('/dir1/',
|
|
|
|
['/dir1/file2', '/dir1/subdir1/', '/dir1/subdir2/'])
|
|
|
|
assert_listing('/dir1/subdir1',
|
|
|
|
['/dir1/subdir1/subsubdir2/', '/dir1/subdir1/file2',
|
|
|
|
'/dir1/subdir1/file3', '/dir1/subdir1/file4',
|
|
|
|
'/dir1/subdir1/subsubdir1/'])
|
|
|
|
assert_listing('/dir1/subdir2', [])
|
|
|
|
assert_listing('', ['file1', 'dir1/', 'dir2/'])
|
|
|
|
else:
|
|
|
|
assert_listing('', ['file1', 'dir1/', 'dir2/', 'file A'])
|
2010-07-12 17:03:45 -05:00
|
|
|
assert_listing('dir1', ['dir1/file2', 'dir1/subdir1/',
|
2012-09-03 23:30:52 +08:00
|
|
|
'dir1/subdir2/', 'dir1/subdir with spaces/',
|
|
|
|
'dir1/subdir+with{whatever/'])
|
2010-07-12 17:03:45 -05:00
|
|
|
assert_listing('dir1/subdir1',
|
2012-09-03 23:30:52 +08:00
|
|
|
['dir1/subdir1/file4', 'dir1/subdir1/subsubdir2/',
|
|
|
|
'dir1/subdir1/file2', 'dir1/subdir1/file3',
|
|
|
|
'dir1/subdir1/subsubdir1/'])
|
2010-07-12 17:03:45 -05:00
|
|
|
assert_listing('dir1/subdir1/subsubdir1',
|
2012-09-03 23:30:52 +08:00
|
|
|
['dir1/subdir1/subsubdir1/file7',
|
|
|
|
'dir1/subdir1/subsubdir1/file5',
|
|
|
|
'dir1/subdir1/subsubdir1/file8',
|
|
|
|
'dir1/subdir1/subsubdir1/file6'])
|
2010-07-12 17:03:45 -05:00
|
|
|
assert_listing('dir1/subdir1/subsubdir1/',
|
2012-09-03 23:30:52 +08:00
|
|
|
['dir1/subdir1/subsubdir1/file7',
|
|
|
|
'dir1/subdir1/subsubdir1/file5',
|
|
|
|
'dir1/subdir1/subsubdir1/file8',
|
|
|
|
'dir1/subdir1/subsubdir1/file6'])
|
2010-07-12 17:03:45 -05:00
|
|
|
assert_listing('dir1/subdir with spaces/',
|
2012-09-03 23:30:52 +08:00
|
|
|
['dir1/subdir with spaces/file B'])
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
|
Privileged acct ACL header, new ACL syntax, TempAuth impl.
* Introduce a new privileged account header: X-Account-Access-Control
* Introduce JSON-based version 2 ACL syntax -- see below for discussion
* Implement account ACL authorization in TempAuth
X-Account-Access-Control Header
-------------------------------
Accounts now have a new privileged header to represent ACLs or any other
form of account-level access control. The value of the header is an opaque
string to be interpreted by the auth system, but it must be a JSON-encoded
dictionary. A reference implementation is given in TempAuth, with the
knowledge that historically other auth systems often use TempAuth as a
starting point.
The reference implementation describes three levels of account access:
"admin", "read-write", and "read-only". Adding new access control
features in a future patch (e.g. "write-only" account access) will
automatically be forward- and backward-compatible, due to the JSON
dictionary header format.
The privileged X-Account-Access-Control header may only be read or written
by a user with "swift_owner" status, traditionally the account owner but
now also any user on the "admin" ACL.
Access Levels:
Read-only access is intended to indicate to the auth system that this
list of identities can read everything (except privileged headers) in
the account. Specifically, a user with read-only account access can get
a list of containers in the account, list the contents of any container,
retrieve any object, and see the (non-privileged) headers of the
account, any container, or any object.
Read-write access is intended to indicate to the auth system that this
list of identities can read or write (or create) any container. A user
with read-write account access can create new containers, set any
unprivileged container headers, overwrite objects, delete containers,
etc. A read-write user can NOT set account headers (or perform any
PUT/POST/DELETE requests on the account).
Admin access is intended to indicate to the auth system that this list of
identities has "swift_owner" privileges. A user with admin account access
can do anything the account owner can, including setting account headers
and any privileged headers -- and thus changing the value of
X-Account-Access-Control and thereby granting read-only, read-write, or
admin access to other users.
The auth system is responsible for making decisions based on this header,
if it chooses to support its use. Therefore the above access level
descriptions are necessarily advisory only for other auth systems.
When setting the value of the header, callers are urged to use the new
format_acl() method, described below.
New ACL Format
--------------
The account ACLs introduce a new format for ACLs, rather than reusing the
existing format from X-Container-Read/X-Container-Write. There are several
reasons for this:
* Container ACL format does not support Unicode
* Container ACLs have a different structure than account ACLs
+ account ACLs have no concept of referrers or rlistings
+ accounts have additional "admin" access level
+ account access levels are structured as admin > rw > ro, which seems more
appropriate for how people access accounts, rather than reusing
container ACLs' orthogonal read and write access
In addition, the container ACL syntax is a bit arbitrary and highly custom,
so instead of parsing additional custom syntax, I'd rather propose a next
version and introduce a means for migration. The V2 ACL syntax has the
following benefits:
* JSON is a well-known standard syntax with parsers in all languages
* no artificial value restrictions (you can grant access to a user named
".rlistings" if you want)
* forward and backward compatibility: you may have extraneous keys, but
your attempt to parse the header won't raise an exception
I've introduced hooks in parse_acl and format_acl which currently default
to the old V1 syntax but tolerate the V2 syntax and can easily be flipped
to default to V2. I'm not changing the default or adding code to rewrite
V1 ACLs to V2, because this patch has suffered a lot of scope creep already,
but this seems like a sensible milestone in the migration.
TempAuth Account ACL Implementation
-----------------------------------
As stated above, core Swift is responsible for privileging the
X-Account-Access-Control header (making it only accessible to swift_owners),
for translating it to -sysmeta-* headers to trigger persistence by the
account server, and for including the header in the responses to requests
by privileged users. Core Swift puts no expectation on the *content* of
this header. Auth systems (including TempAuth) are responsible for
defining the content of the header and taking action based on it.
In addition to the changes described above, this patch defines a format
to be used by TempAuth for these headers in the common.middleware.acl
module, in the methods format_v2_acl() and parse_v2_acl(). This patch
also teaches TempAuth to take action based on the header contents. TempAuth
now sets swift_owner=True if the user is on the Admin ACL, authorizes
GET/HEAD/OPTIONS requests if the user is on any ACL, authorizes
PUT/POST/DELETE requests if the user is on the admin or read-write ACL, etc.
Note that the action of setting swift_owner=True triggers core Swift to
add or strip the privileged headers from the responses. Core Swift (not
the auth system) is responsible for that.
DocImpact: Documentation for the new ACL usage and format appears in
summary form in doc/source/overview_auth.rst, and in more detail in
swift/common/middleware/tempauth.py in the TempAuth class docstring.
I leave it to the Swift doc team to determine whether more is needed.
Change-Id: I836a99eaaa6bb0e92dc03e1ca46a474522e6e826
2013-11-13 20:55:14 +00:00
|
|
|
class TestFileEnv(object):
|
2010-07-12 17:03:45 -05:00
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.conn = Connection(tf.config)
|
2010-07-12 17:03:45 -05:00
|
|
|
cls.conn.authenticate()
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.account = Account(cls.conn, tf.config.get('account',
|
|
|
|
tf.config['username']))
|
2010-07-12 17:03:45 -05:00
|
|
|
cls.account.delete_containers()
|
|
|
|
|
|
|
|
cls.container = cls.account.container(Utils.create_name())
|
|
|
|
if not cls.container.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
cls.file_size = 128
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestFileDev(Base):
|
|
|
|
env = TestFileEnv
|
|
|
|
set_up = False
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestFileDevUTF8(Base2, TestFileDev):
|
|
|
|
set_up = False
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestFile(Base):
|
|
|
|
env = TestFileEnv
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
def testCopy(self):
|
2013-11-18 13:17:48 -08:00
|
|
|
# makes sure to test encoded characters
|
2011-10-19 09:21:14 -05:00
|
|
|
source_filename = 'dealde%2Fl04 011e%204c8df/flash.png'
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(source_filename)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
metadata = {}
|
|
|
|
for i in range(1):
|
2012-10-23 09:48:24 +02:00
|
|
|
metadata[Utils.create_ascii_name()] = Utils.create_name()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
data = file_item.write_random()
|
|
|
|
file_item.sync_metadata(metadata)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
dest_cont = self.env.account.container(Utils.create_name())
|
|
|
|
self.assert_(dest_cont.create())
|
|
|
|
|
|
|
|
# copy both from within and across containers
|
|
|
|
for cont in (self.env.container, dest_cont):
|
|
|
|
# copy both with and without initial slash
|
|
|
|
for prefix in ('', '/'):
|
|
|
|
dest_filename = Utils.create_name()
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(source_filename)
|
|
|
|
file_item.copy('%s%s' % (prefix, cont), dest_filename)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
self.assert_(dest_filename in cont.files())
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cont.file(dest_filename)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(data == file_item.read())
|
|
|
|
self.assert_(file_item.initialize())
|
|
|
|
self.assert_(metadata == file_item.metadata)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testCopy404s(self):
|
|
|
|
source_filename = Utils.create_name()
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(source_filename)
|
|
|
|
file_item.write_random()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
dest_cont = self.env.account.container(Utils.create_name())
|
|
|
|
self.assert_(dest_cont.create())
|
|
|
|
|
|
|
|
for prefix in ('', '/'):
|
|
|
|
# invalid source container
|
|
|
|
source_cont = self.env.account.container(Utils.create_name())
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = source_cont.file(source_filename)
|
|
|
|
self.assert_(not file_item.copy(
|
2013-08-31 20:25:25 -04:00
|
|
|
'%s%s' % (prefix, self.env.container),
|
|
|
|
Utils.create_name()))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(not file_item.copy('%s%s' % (prefix, dest_cont),
|
2013-08-31 20:25:25 -04:00
|
|
|
Utils.create_name()))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
# invalid source object
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
self.assert_(not file_item.copy(
|
2013-08-31 20:25:25 -04:00
|
|
|
'%s%s' % (prefix, self.env.container),
|
|
|
|
Utils.create_name()))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(not file_item.copy('%s%s' % (prefix, dest_cont),
|
2013-08-31 20:25:25 -04:00
|
|
|
Utils.create_name()))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
# invalid destination container
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(source_filename)
|
|
|
|
self.assert_(not file_item.copy(
|
2013-08-31 20:25:25 -04:00
|
|
|
'%s%s' % (prefix, Utils.create_name()),
|
|
|
|
Utils.create_name()))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testCopyNoDestinationHeader(self):
|
|
|
|
source_filename = Utils.create_name()
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(source_filename)
|
|
|
|
file_item.write_random()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(source_filename)
|
|
|
|
self.assert_(not file_item.copy(Utils.create_name(),
|
|
|
|
Utils.create_name(),
|
2012-09-03 23:30:52 +08:00
|
|
|
cfg={'no_destination': True}))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
|
|
|
|
|
|
|
def testCopyDestinationSlashProblems(self):
|
|
|
|
source_filename = Utils.create_name()
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(source_filename)
|
|
|
|
file_item.write_random()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
# no slash
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(not file_item.copy(Utils.create_name(),
|
|
|
|
Utils.create_name(),
|
2012-09-03 23:30:52 +08:00
|
|
|
cfg={'destination': Utils.create_name()}))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
|
|
|
|
|
|
|
def testCopyFromHeader(self):
|
|
|
|
source_filename = Utils.create_name()
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(source_filename)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
metadata = {}
|
|
|
|
for i in range(1):
|
2012-10-23 09:48:24 +02:00
|
|
|
metadata[Utils.create_ascii_name()] = Utils.create_name()
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.metadata = metadata
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
data = file_item.write_random()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
dest_cont = self.env.account.container(Utils.create_name())
|
|
|
|
self.assert_(dest_cont.create())
|
|
|
|
|
|
|
|
# copy both from within and across containers
|
|
|
|
for cont in (self.env.container, dest_cont):
|
|
|
|
# copy both with and without initial slash
|
|
|
|
for prefix in ('', '/'):
|
|
|
|
dest_filename = Utils.create_name()
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cont.file(dest_filename)
|
2013-08-31 20:25:25 -04:00
|
|
|
file_item.write(hdrs={'X-Copy-From': '%s%s/%s' % (
|
|
|
|
prefix, self.env.container.name, source_filename)})
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
self.assert_(dest_filename in cont.files())
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cont.file(dest_filename)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(data == file_item.read())
|
|
|
|
self.assert_(file_item.initialize())
|
|
|
|
self.assert_(metadata == file_item.metadata)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testCopyFromHeader404s(self):
|
|
|
|
source_filename = Utils.create_name()
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(source_filename)
|
|
|
|
file_item.write_random()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
for prefix in ('', '/'):
|
|
|
|
# invalid source container
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.write,
|
2012-09-03 23:30:52 +08:00
|
|
|
hdrs={'X-Copy-From': '%s%s/%s' %
|
|
|
|
(prefix,
|
|
|
|
Utils.create_name(), source_filename)})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
# invalid source object
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.write,
|
2012-09-03 23:30:52 +08:00
|
|
|
hdrs={'X-Copy-From': '%s%s/%s' %
|
|
|
|
(prefix,
|
|
|
|
self.env.container.name, Utils.create_name())})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
# invalid destination container
|
|
|
|
dest_cont = self.env.account.container(Utils.create_name())
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = dest_cont.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.write,
|
2012-09-03 23:30:52 +08:00
|
|
|
hdrs={'X-Copy-From': '%s%s/%s' %
|
|
|
|
(prefix,
|
|
|
|
self.env.container.name, source_filename)})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
def testNameLimit(self):
|
2012-09-05 20:49:50 -07:00
|
|
|
limit = load_constraint('max_object_name_length')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
for l in (1, 10, limit / 2, limit - 1, limit, limit + 1, limit * 2):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file('a' * l)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
if l <= limit:
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(file_item.write())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
|
|
|
else:
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.write)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(400)
|
|
|
|
|
|
|
|
def testQuestionMarkInName(self):
|
|
|
|
if Utils.create_name == Utils.create_ascii_name:
|
|
|
|
file_name = list(Utils.create_name())
|
2012-09-03 23:30:52 +08:00
|
|
|
file_name[random.randint(2, len(file_name) - 2)] = '?'
|
2010-07-12 17:03:45 -05:00
|
|
|
file_name = "".join(file_name)
|
|
|
|
else:
|
|
|
|
file_name = Utils.create_name(6) + '?' + Utils.create_name(6)
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(file_name)
|
|
|
|
self.assert_(file_item.write(cfg={'no_path_quote': True}))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_(file_name not in self.env.container.files())
|
|
|
|
self.assert_(file_name.split('?')[0] in self.env.container.files())
|
|
|
|
|
|
|
|
def testDeleteThen404s(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
self.assert_(file_item.write_random())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(file_item.delete())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(204)
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.metadata = {Utils.create_ascii_name(): Utils.create_name()}
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for method in (file_item.info,
|
|
|
|
file_item.read,
|
|
|
|
file_item.sync_metadata,
|
|
|
|
file_item.delete):
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assertRaises(ResponseError, method)
|
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
def testBlankMetadataName(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
file_item.metadata = {'': Utils.create_name()}
|
|
|
|
self.assertRaises(ResponseError, file_item.write_random)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(400)
|
|
|
|
|
|
|
|
def testMetadataNumberLimit(self):
|
2012-09-05 20:49:50 -07:00
|
|
|
number_limit = load_constraint('max_meta_count')
|
|
|
|
size_limit = load_constraint('max_meta_overall_size')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
for i in (number_limit - 10, number_limit - 1, number_limit,
|
|
|
|
number_limit + 1, number_limit + 10, number_limit + 100):
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
j = size_limit / (i * 2)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
size = 0
|
|
|
|
metadata = {}
|
|
|
|
while len(metadata.keys()) < i:
|
2012-10-23 09:48:24 +02:00
|
|
|
key = Utils.create_ascii_name()
|
2010-07-12 17:03:45 -05:00
|
|
|
val = Utils.create_name()
|
|
|
|
|
|
|
|
if len(key) > j:
|
|
|
|
key = key[:j]
|
|
|
|
val = val[:j]
|
|
|
|
|
|
|
|
size += len(key) + len(val)
|
|
|
|
metadata[key] = val
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
file_item.metadata = metadata
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
if i <= number_limit:
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(file_item.write())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(file_item.sync_metadata())
|
2011-06-08 04:19:34 +00:00
|
|
|
self.assert_status((201, 202))
|
2010-07-12 17:03:45 -05:00
|
|
|
else:
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.write)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(400)
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.metadata = {}
|
|
|
|
self.assert_(file_item.write())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.metadata = metadata
|
|
|
|
self.assertRaises(ResponseError, file_item.sync_metadata)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(400)
|
|
|
|
|
|
|
|
def testContentTypeGuessing(self):
|
|
|
|
file_types = {'wav': 'audio/x-wav', 'txt': 'text/plain',
|
|
|
|
'zip': 'application/zip'}
|
|
|
|
|
|
|
|
container = self.env.account.container(Utils.create_name())
|
|
|
|
self.assert_(container.create())
|
|
|
|
|
|
|
|
for i in file_types.keys():
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = container.file(Utils.create_name() + '.' + i)
|
|
|
|
file_item.write('', cfg={'no_content_type': True})
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
file_types_read = {}
|
|
|
|
for i in container.files(parms={'format': 'json'}):
|
|
|
|
file_types_read[i['name'].split('.')[1]] = i['content_type']
|
|
|
|
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(file_types, file_types_read)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testRangedGets(self):
|
|
|
|
file_length = 10000
|
2012-09-03 23:30:52 +08:00
|
|
|
range_size = file_length / 10
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
data = file_item.write_random(file_length)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
for i in range(0, file_length, range_size):
|
2012-09-03 23:30:52 +08:00
|
|
|
range_string = 'bytes=%d-%d' % (i, i + range_size - 1)
|
2010-07-12 17:03:45 -05:00
|
|
|
hdrs = {'Range': range_string}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(data[i: i + range_size] == file_item.read(hdrs=hdrs),
|
2012-09-03 23:30:52 +08:00
|
|
|
range_string)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
range_string = 'bytes=-%d' % (i)
|
|
|
|
hdrs = {'Range': range_string}
|
2012-10-03 14:20:52 -07:00
|
|
|
if i == 0:
|
|
|
|
# RFC 2616 14.35.1
|
|
|
|
# "If a syntactically valid byte-range-set includes ... at
|
|
|
|
# least one suffix-byte-range-spec with a NON-ZERO
|
|
|
|
# suffix-length, then the byte-range-set is satisfiable.
|
|
|
|
# Otherwise, the byte-range-set is unsatisfiable.
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
2012-10-03 14:20:52 -07:00
|
|
|
self.assert_status(416)
|
|
|
|
else:
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(file_item.read(hdrs=hdrs), data[-i:])
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
range_string = 'bytes=%d-' % (i)
|
|
|
|
hdrs = {'Range': range_string}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(file_item.read(hdrs=hdrs) == data[i - file_length:],
|
2012-09-03 23:30:52 +08:00
|
|
|
range_string)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
range_string = 'bytes=%d-%d' % (file_length + 1000, file_length + 2000)
|
2010-07-12 17:03:45 -05:00
|
|
|
hdrs = {'Range': range_string}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(416)
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
range_string = 'bytes=%d-%d' % (file_length - 1000, file_length + 2000)
|
2010-07-12 17:03:45 -05:00
|
|
|
hdrs = {'Range': range_string}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(file_item.read(hdrs=hdrs) == data[-1000:], range_string)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
hdrs = {'Range': '0-4'}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(file_item.read(hdrs=hdrs) == data, range_string)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-10-03 14:20:52 -07:00
|
|
|
# RFC 2616 14.35.1
|
|
|
|
# "If the entity is shorter than the specified suffix-length, the
|
|
|
|
# entire entity-body is used."
|
|
|
|
range_string = 'bytes=-%d' % (file_length + 10)
|
|
|
|
hdrs = {'Range': range_string}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(file_item.read(hdrs=hdrs) == data, range_string)
|
2012-10-03 14:20:52 -07:00
|
|
|
|
2012-06-06 03:39:53 +09:00
|
|
|
def testRangedGetsWithLWSinHeader(self):
|
|
|
|
#Skip this test until webob 1.2 can tolerate LWS in Range header.
|
|
|
|
file_length = 10000
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
data = file_item.write_random(file_length)
|
2012-06-06 03:39:53 +09:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
for r in ('BYTES=0-999', 'bytes = 0-999', 'BYTES = 0 - 999',
|
2012-09-03 23:30:52 +08:00
|
|
|
'bytes = 0 - 999', 'bytes=0 - 999', 'bytes=0-999 '):
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(file_item.read(hdrs={'Range': r}) == data[0:1000])
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testFileSizeLimit(self):
|
2012-09-05 20:49:50 -07:00
|
|
|
limit = load_constraint('max_file_size')
|
2010-07-12 17:03:45 -05:00
|
|
|
tsecs = 3
|
|
|
|
|
2014-04-07 13:01:44 -04:00
|
|
|
def timeout(seconds, method, *args, **kwargs):
|
|
|
|
try:
|
|
|
|
with eventlet.Timeout(seconds):
|
|
|
|
method(*args, **kwargs)
|
|
|
|
except eventlet.Timeout:
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
for i in (limit - 100, limit - 10, limit - 1, limit, limit + 1,
|
|
|
|
limit + 10, limit + 100):
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
if i <= limit:
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(timeout(tsecs, file_item.write,
|
2012-09-03 23:30:52 +08:00
|
|
|
cfg={'set_content_length': i}))
|
2010-07-12 17:03:45 -05:00
|
|
|
else:
|
|
|
|
self.assertRaises(ResponseError, timeout, tsecs,
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.write,
|
|
|
|
cfg={'set_content_length': i})
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testNoContentLengthForPut(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.write, 'testing',
|
2012-09-03 23:30:52 +08:00
|
|
|
cfg={'no_content_length': True})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(411)
|
|
|
|
|
|
|
|
def testDelete(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
file_item.write_random(self.env.file_size)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(file_item.name in self.env.container.files())
|
|
|
|
self.assert_(file_item.delete())
|
|
|
|
self.assert_(file_item.name not in self.env.container.files())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testBadHeaders(self):
|
|
|
|
file_length = 100
|
|
|
|
|
|
|
|
# no content type on puts should be ok
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
file_item.write_random(file_length, cfg={'no_content_type': True})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
|
|
|
|
|
|
|
# content length x
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.write_random, file_length,
|
2012-09-03 23:30:52 +08:00
|
|
|
hdrs={'Content-Length': 'X'},
|
|
|
|
cfg={'no_content_length': True})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(400)
|
|
|
|
|
|
|
|
# bad request types
|
2012-09-03 23:30:52 +08:00
|
|
|
#for req in ('LICK', 'GETorHEAD_base', 'container_info',
|
|
|
|
# 'best_response'):
|
2010-09-02 21:50:16 -07:00
|
|
|
for req in ('LICK', 'GETorHEAD_base'):
|
2010-07-12 17:03:45 -05:00
|
|
|
self.env.account.conn.make_request(req)
|
|
|
|
self.assert_status(405)
|
|
|
|
|
|
|
|
# bad range headers
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(len(file_item.read(hdrs={'Range': 'parsecs=8-12'})) ==
|
2012-09-03 23:30:52 +08:00
|
|
|
file_length)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(200)
|
|
|
|
|
|
|
|
def testMetadataLengthLimits(self):
|
2012-09-05 20:49:50 -07:00
|
|
|
key_limit = load_constraint('max_meta_name_length')
|
|
|
|
value_limit = load_constraint('max_meta_value_length')
|
2012-09-03 23:30:52 +08:00
|
|
|
lengths = [[key_limit, value_limit], [key_limit, value_limit + 1],
|
|
|
|
[key_limit + 1, value_limit], [key_limit, 0],
|
|
|
|
[key_limit, value_limit * 10],
|
|
|
|
[key_limit * 10, value_limit]]
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
for l in lengths:
|
2012-09-03 23:30:52 +08:00
|
|
|
metadata = {'a' * l[0]: 'b' * l[1]}
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
file_item.metadata = metadata
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
if l[0] <= key_limit and l[1] <= value_limit:
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(file_item.write())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(file_item.sync_metadata())
|
2010-07-12 17:03:45 -05:00
|
|
|
else:
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.write)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(400)
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.metadata = {}
|
|
|
|
self.assert_(file_item.write())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.metadata = metadata
|
|
|
|
self.assertRaises(ResponseError, file_item.sync_metadata)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(400)
|
|
|
|
|
|
|
|
def testEtagWayoff(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
2010-07-12 17:03:45 -05:00
|
|
|
hdrs = {'etag': 'reallylonganddefinitelynotavalidetagvalue'}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.write_random, hdrs=hdrs)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(422)
|
|
|
|
|
|
|
|
def testFileCreate(self):
|
|
|
|
for i in range(10):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
data = file_item.write_random()
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(data == file_item.read())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(200)
|
|
|
|
|
|
|
|
def testHead(self):
|
|
|
|
file_name = Utils.create_name()
|
|
|
|
content_type = Utils.create_name()
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(file_name)
|
|
|
|
file_item.content_type = content_type
|
|
|
|
file_item.write_random(self.env.file_size)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
md5 = file_item.md5
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(file_name)
|
|
|
|
info = file_item.info()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
self.assert_status(200)
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(info['content_length'], self.env.file_size)
|
|
|
|
self.assertEqual(info['etag'], md5)
|
|
|
|
self.assertEqual(info['content_type'], content_type)
|
2012-09-03 23:30:52 +08:00
|
|
|
self.assert_('last_modified' in info)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testDeleteOfFileThatDoesNotExist(self):
|
|
|
|
# in container that exists
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.delete)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
# in container that does not exist
|
|
|
|
container = self.env.account.container(Utils.create_name())
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = container.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.delete)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
def testHeadOnFileThatDoesNotExist(self):
|
|
|
|
# in container that exists
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.info)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
# in container that does not exist
|
|
|
|
container = self.env.account.container(Utils.create_name())
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = container.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.info)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
def testMetadataOnPost(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
file_item.write_random(self.env.file_size)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
for i in range(10):
|
|
|
|
metadata = {}
|
2013-08-13 21:57:51 +08:00
|
|
|
for j in range(10):
|
2012-10-23 09:48:24 +02:00
|
|
|
metadata[Utils.create_ascii_name()] = Utils.create_name()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.metadata = metadata
|
|
|
|
self.assert_(file_item.sync_metadata())
|
2011-06-08 04:19:34 +00:00
|
|
|
self.assert_status((201, 202))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(file_item.name)
|
|
|
|
self.assert_(file_item.initialize())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(200)
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(file_item.metadata, metadata)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testGetContentType(self):
|
|
|
|
file_name = Utils.create_name()
|
|
|
|
content_type = Utils.create_name()
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(file_name)
|
|
|
|
file_item.content_type = content_type
|
|
|
|
file_item.write_random()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(file_name)
|
|
|
|
file_item.read()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(content_type, file_item.content_type)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testGetOnFileThatDoesNotExist(self):
|
|
|
|
# in container that exists
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.read)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
# in container that does not exist
|
|
|
|
container = self.env.account.container(Utils.create_name())
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = container.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.read)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
def testPostOnFileThatDoesNotExist(self):
|
|
|
|
# in container that exists
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
file_item.metadata['Field'] = 'Value'
|
|
|
|
self.assertRaises(ResponseError, file_item.sync_metadata)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
# in container that does not exist
|
|
|
|
container = self.env.account.container(Utils.create_name())
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = container.file(Utils.create_name())
|
|
|
|
file_item.metadata['Field'] = 'Value'
|
|
|
|
self.assertRaises(ResponseError, file_item.sync_metadata)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
def testMetadataOnPut(self):
|
|
|
|
for i in range(10):
|
|
|
|
metadata = {}
|
|
|
|
for j in range(10):
|
2012-10-23 09:48:24 +02:00
|
|
|
metadata[Utils.create_ascii_name()] = Utils.create_name()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
file_item.metadata = metadata
|
|
|
|
file_item.write_random(self.env.file_size)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(file_item.name)
|
|
|
|
self.assert_(file_item.initialize())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(200)
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(file_item.metadata, metadata)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testSerialization(self):
|
|
|
|
container = self.env.account.container(Utils.create_name())
|
|
|
|
self.assert_(container.create())
|
|
|
|
|
|
|
|
files = []
|
|
|
|
for i in (0, 1, 10, 100, 1000, 10000):
|
2012-09-03 23:30:52 +08:00
|
|
|
files.append({'name': Utils.create_name(),
|
|
|
|
'content_type': Utils.create_name(), 'bytes': i})
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
write_time = time.time()
|
|
|
|
for f in files:
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = container.file(f['name'])
|
|
|
|
file_item.content_type = f['content_type']
|
|
|
|
file_item.write_random(f['bytes'])
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
f['hash'] = file_item.md5
|
2010-07-12 17:03:45 -05:00
|
|
|
f['json'] = False
|
|
|
|
f['xml'] = False
|
|
|
|
write_time = time.time() - write_time
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in ['json', 'xml']:
|
|
|
|
for file_item in container.files(parms={'format': format_type}):
|
2010-07-12 17:03:45 -05:00
|
|
|
found = False
|
|
|
|
for f in files:
|
2013-08-04 11:15:53 +08:00
|
|
|
if f['name'] != file_item['name']:
|
2010-07-12 17:03:45 -05:00
|
|
|
continue
|
|
|
|
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(file_item['content_type'],
|
|
|
|
f['content_type'])
|
|
|
|
self.assertEqual(int(file_item['bytes']), f['bytes'])
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
d = datetime.strptime(
|
|
|
|
file_item['last_modified'].split('.')[0],
|
|
|
|
"%Y-%m-%dT%H:%M:%S")
|
2010-07-12 17:03:45 -05:00
|
|
|
lm = time.mktime(d.timetuple())
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
if 'last_modified' in f:
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(f['last_modified'], lm)
|
2010-07-12 17:03:45 -05:00
|
|
|
else:
|
2012-09-03 23:30:52 +08:00
|
|
|
f['last_modified'] = lm
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
f[format_type] = True
|
2010-07-12 17:03:45 -05:00
|
|
|
found = True
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
self.assert_(found, 'Unexpected file %s found in '
|
2013-08-04 11:15:53 +08:00
|
|
|
'%s listing' % (file_item['name'], format_type))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
headers = dict(self.env.conn.response.getheaders())
|
2013-08-04 11:15:53 +08:00
|
|
|
if format_type == 'json':
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(headers['content-type'],
|
|
|
|
'application/json; charset=utf-8')
|
2013-08-04 11:15:53 +08:00
|
|
|
elif format_type == 'xml':
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(headers['content-type'],
|
|
|
|
'application/xml; charset=utf-8')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
lm_diff = max([f['last_modified'] for f in files]) -\
|
2010-07-12 17:03:45 -05:00
|
|
|
min([f['last_modified'] for f in files])
|
2012-09-03 23:30:52 +08:00
|
|
|
self.assert_(lm_diff < write_time + 1, 'Diff in last '
|
|
|
|
'modified times should be less than time to write files')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
for f in files:
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in ['json', 'xml']:
|
|
|
|
self.assert_(f[format_type], 'File %s not found in %s listing'
|
|
|
|
% (f['name'], format_type))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testStackedOverwrite(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
for i in range(1, 11):
|
2013-08-04 11:15:53 +08:00
|
|
|
data = file_item.write_random(512)
|
|
|
|
file_item.write(data)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(file_item.read() == data)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testTooLongName(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file('x' * 1025)
|
|
|
|
self.assertRaises(ResponseError, file_item.write)
|
2012-09-03 23:30:52 +08:00
|
|
|
self.assert_status(400)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testZeroByteFile(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(file_item.write(''))
|
|
|
|
self.assert_(file_item.name in self.env.container.files())
|
|
|
|
self.assert_(file_item.read() == '')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testEtagResponse(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
data = StringIO.StringIO(file_item.write_random(512))
|
2010-07-12 17:03:45 -05:00
|
|
|
etag = File.compute_md5sum(data)
|
|
|
|
|
|
|
|
headers = dict(self.env.conn.response.getheaders())
|
|
|
|
self.assert_('etag' in headers.keys())
|
|
|
|
|
|
|
|
header_etag = headers['etag'].strip('"')
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(etag, header_etag)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testChunkedPut(self):
|
2014-03-31 23:22:49 -04:00
|
|
|
if (tf.web_front_end == 'apache2'):
|
|
|
|
raise SkipTest("Chunked PUT can only be tested with apache2 web"
|
|
|
|
" front end")
|
2014-04-07 13:01:44 -04:00
|
|
|
|
|
|
|
def chunks(s, length=3):
|
|
|
|
i, j = 0, length
|
|
|
|
while i < len(s):
|
|
|
|
yield s[i:j]
|
|
|
|
i, j = j, j + length
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
data = File.random_data(10000)
|
|
|
|
etag = File.compute_md5sum(data)
|
|
|
|
|
|
|
|
for i in (1, 10, 100, 1000):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
for j in chunks(data, i):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.chunked_write(j)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(file_item.chunked_write())
|
|
|
|
self.assert_(data == file_item.read())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
info = file_item.info()
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(etag, info['etag'])
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestFileUTF8(Base2, TestFile):
|
|
|
|
set_up = False
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2013-11-21 11:03:46 -08:00
|
|
|
class TestDloEnv(object):
|
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.conn = Connection(tf.config)
|
2013-11-21 11:03:46 -08:00
|
|
|
cls.conn.authenticate()
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.account = Account(cls.conn, tf.config.get('account',
|
|
|
|
tf.config['username']))
|
2013-11-21 11:03:46 -08:00
|
|
|
cls.account.delete_containers()
|
|
|
|
|
|
|
|
cls.container = cls.account.container(Utils.create_name())
|
|
|
|
|
|
|
|
if not cls.container.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
# avoid getting a prefix that stops halfway through an encoded
|
|
|
|
# character
|
|
|
|
prefix = Utils.create_name().decode("utf-8")[:10].encode("utf-8")
|
|
|
|
cls.segment_prefix = prefix
|
|
|
|
|
|
|
|
for letter in ('a', 'b', 'c', 'd', 'e'):
|
|
|
|
file_item = cls.container.file("%s/seg_lower%s" % (prefix, letter))
|
|
|
|
file_item.write(letter * 10)
|
|
|
|
|
|
|
|
file_item = cls.container.file("%s/seg_upper%s" % (prefix, letter))
|
|
|
|
file_item.write(letter.upper() * 10)
|
|
|
|
|
|
|
|
man1 = cls.container.file("man1")
|
|
|
|
man1.write('man1-contents',
|
|
|
|
hdrs={"X-Object-Manifest": "%s/%s/seg_lower" %
|
|
|
|
(cls.container.name, prefix)})
|
|
|
|
|
|
|
|
man1 = cls.container.file("man2")
|
|
|
|
man1.write('man2-contents',
|
|
|
|
hdrs={"X-Object-Manifest": "%s/%s/seg_upper" %
|
|
|
|
(cls.container.name, prefix)})
|
|
|
|
|
|
|
|
manall = cls.container.file("manall")
|
|
|
|
manall.write('manall-contents',
|
|
|
|
hdrs={"X-Object-Manifest": "%s/%s/seg" %
|
|
|
|
(cls.container.name, prefix)})
|
|
|
|
|
|
|
|
|
|
|
|
class TestDlo(Base):
|
|
|
|
env = TestDloEnv
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
def test_get_manifest(self):
|
|
|
|
file_item = self.env.container.file('man1')
|
|
|
|
file_contents = file_item.read()
|
|
|
|
self.assertEqual(
|
|
|
|
file_contents,
|
|
|
|
"aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeee")
|
|
|
|
|
|
|
|
file_item = self.env.container.file('man2')
|
|
|
|
file_contents = file_item.read()
|
|
|
|
self.assertEqual(
|
|
|
|
file_contents,
|
|
|
|
"AAAAAAAAAABBBBBBBBBBCCCCCCCCCCDDDDDDDDDDEEEEEEEEEE")
|
|
|
|
|
|
|
|
file_item = self.env.container.file('manall')
|
|
|
|
file_contents = file_item.read()
|
|
|
|
self.assertEqual(
|
|
|
|
file_contents,
|
|
|
|
("aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeee" +
|
|
|
|
"AAAAAAAAAABBBBBBBBBBCCCCCCCCCCDDDDDDDDDDEEEEEEEEEE"))
|
|
|
|
|
|
|
|
def test_get_manifest_document_itself(self):
|
|
|
|
file_item = self.env.container.file('man1')
|
|
|
|
file_contents = file_item.read(parms={'multipart-manifest': 'get'})
|
|
|
|
self.assertEqual(file_contents, "man1-contents")
|
|
|
|
|
|
|
|
def test_get_range(self):
|
|
|
|
file_item = self.env.container.file('man1')
|
|
|
|
file_contents = file_item.read(size=25, offset=8)
|
|
|
|
self.assertEqual(file_contents, "aabbbbbbbbbbccccccccccddd")
|
|
|
|
|
|
|
|
file_contents = file_item.read(size=1, offset=47)
|
|
|
|
self.assertEqual(file_contents, "e")
|
|
|
|
|
|
|
|
def test_get_range_out_of_range(self):
|
|
|
|
file_item = self.env.container.file('man1')
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, file_item.read, size=7, offset=50)
|
|
|
|
self.assert_status(416)
|
|
|
|
|
|
|
|
def test_copy(self):
|
|
|
|
# Adding a new segment, copying the manifest, and then deleting the
|
|
|
|
# segment proves that the new object is really the concatenated
|
|
|
|
# segments and not just a manifest.
|
|
|
|
f_segment = self.env.container.file("%s/seg_lowerf" %
|
|
|
|
(self.env.segment_prefix))
|
|
|
|
f_segment.write('ffffffffff')
|
|
|
|
try:
|
|
|
|
man1_item = self.env.container.file('man1')
|
|
|
|
man1_item.copy(self.env.container.name, "copied-man1")
|
|
|
|
finally:
|
|
|
|
# try not to leave this around for other tests to stumble over
|
|
|
|
f_segment.delete()
|
|
|
|
|
|
|
|
file_item = self.env.container.file('copied-man1')
|
|
|
|
file_contents = file_item.read()
|
|
|
|
self.assertEqual(
|
|
|
|
file_contents,
|
|
|
|
"aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffff")
|
|
|
|
|
2014-02-27 22:38:53 -08:00
|
|
|
def test_copy_manifest(self):
|
|
|
|
# Copying the manifest should result in another manifest
|
|
|
|
try:
|
|
|
|
man1_item = self.env.container.file('man1')
|
|
|
|
man1_item.copy(self.env.container.name, "copied-man1",
|
|
|
|
parms={'multipart-manifest': 'get'})
|
|
|
|
|
|
|
|
copied = self.env.container.file("copied-man1")
|
|
|
|
copied_contents = copied.read(parms={'multipart-manifest': 'get'})
|
|
|
|
self.assertEqual(copied_contents, "man1-contents")
|
|
|
|
|
|
|
|
copied_contents = copied.read()
|
|
|
|
self.assertEqual(
|
|
|
|
copied_contents,
|
|
|
|
"aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeee")
|
|
|
|
finally:
|
|
|
|
# try not to leave this around for other tests to stumble over
|
|
|
|
self.env.container.file("copied-man1").delete()
|
|
|
|
|
2014-02-20 23:01:00 -08:00
|
|
|
def test_dlo_if_match_get(self):
|
|
|
|
manifest = self.env.container.file("man1")
|
|
|
|
etag = manifest.info()['etag']
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, manifest.read,
|
|
|
|
hdrs={'If-Match': 'not-%s' % etag})
|
|
|
|
self.assert_status(412)
|
|
|
|
|
|
|
|
manifest.read(hdrs={'If-Match': etag})
|
|
|
|
self.assert_status(200)
|
|
|
|
|
|
|
|
def test_dlo_if_none_match_get(self):
|
|
|
|
manifest = self.env.container.file("man1")
|
|
|
|
etag = manifest.info()['etag']
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, manifest.read,
|
|
|
|
hdrs={'If-None-Match': etag})
|
|
|
|
self.assert_status(304)
|
|
|
|
|
|
|
|
manifest.read(hdrs={'If-None-Match': "not-%s" % etag})
|
|
|
|
self.assert_status(200)
|
|
|
|
|
|
|
|
def test_dlo_if_match_head(self):
|
|
|
|
manifest = self.env.container.file("man1")
|
|
|
|
etag = manifest.info()['etag']
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, manifest.info,
|
|
|
|
hdrs={'If-Match': 'not-%s' % etag})
|
|
|
|
self.assert_status(412)
|
|
|
|
|
|
|
|
manifest.info(hdrs={'If-Match': etag})
|
|
|
|
self.assert_status(200)
|
|
|
|
|
|
|
|
def test_dlo_if_none_match_head(self):
|
|
|
|
manifest = self.env.container.file("man1")
|
|
|
|
etag = manifest.info()['etag']
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, manifest.info,
|
|
|
|
hdrs={'If-None-Match': etag})
|
|
|
|
self.assert_status(304)
|
|
|
|
|
|
|
|
manifest.info(hdrs={'If-None-Match': "not-%s" % etag})
|
|
|
|
self.assert_status(200)
|
|
|
|
|
2013-11-21 11:03:46 -08:00
|
|
|
|
|
|
|
class TestDloUTF8(Base2, TestDlo):
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
|
Privileged acct ACL header, new ACL syntax, TempAuth impl.
* Introduce a new privileged account header: X-Account-Access-Control
* Introduce JSON-based version 2 ACL syntax -- see below for discussion
* Implement account ACL authorization in TempAuth
X-Account-Access-Control Header
-------------------------------
Accounts now have a new privileged header to represent ACLs or any other
form of account-level access control. The value of the header is an opaque
string to be interpreted by the auth system, but it must be a JSON-encoded
dictionary. A reference implementation is given in TempAuth, with the
knowledge that historically other auth systems often use TempAuth as a
starting point.
The reference implementation describes three levels of account access:
"admin", "read-write", and "read-only". Adding new access control
features in a future patch (e.g. "write-only" account access) will
automatically be forward- and backward-compatible, due to the JSON
dictionary header format.
The privileged X-Account-Access-Control header may only be read or written
by a user with "swift_owner" status, traditionally the account owner but
now also any user on the "admin" ACL.
Access Levels:
Read-only access is intended to indicate to the auth system that this
list of identities can read everything (except privileged headers) in
the account. Specifically, a user with read-only account access can get
a list of containers in the account, list the contents of any container,
retrieve any object, and see the (non-privileged) headers of the
account, any container, or any object.
Read-write access is intended to indicate to the auth system that this
list of identities can read or write (or create) any container. A user
with read-write account access can create new containers, set any
unprivileged container headers, overwrite objects, delete containers,
etc. A read-write user can NOT set account headers (or perform any
PUT/POST/DELETE requests on the account).
Admin access is intended to indicate to the auth system that this list of
identities has "swift_owner" privileges. A user with admin account access
can do anything the account owner can, including setting account headers
and any privileged headers -- and thus changing the value of
X-Account-Access-Control and thereby granting read-only, read-write, or
admin access to other users.
The auth system is responsible for making decisions based on this header,
if it chooses to support its use. Therefore the above access level
descriptions are necessarily advisory only for other auth systems.
When setting the value of the header, callers are urged to use the new
format_acl() method, described below.
New ACL Format
--------------
The account ACLs introduce a new format for ACLs, rather than reusing the
existing format from X-Container-Read/X-Container-Write. There are several
reasons for this:
* Container ACL format does not support Unicode
* Container ACLs have a different structure than account ACLs
+ account ACLs have no concept of referrers or rlistings
+ accounts have additional "admin" access level
+ account access levels are structured as admin > rw > ro, which seems more
appropriate for how people access accounts, rather than reusing
container ACLs' orthogonal read and write access
In addition, the container ACL syntax is a bit arbitrary and highly custom,
so instead of parsing additional custom syntax, I'd rather propose a next
version and introduce a means for migration. The V2 ACL syntax has the
following benefits:
* JSON is a well-known standard syntax with parsers in all languages
* no artificial value restrictions (you can grant access to a user named
".rlistings" if you want)
* forward and backward compatibility: you may have extraneous keys, but
your attempt to parse the header won't raise an exception
I've introduced hooks in parse_acl and format_acl which currently default
to the old V1 syntax but tolerate the V2 syntax and can easily be flipped
to default to V2. I'm not changing the default or adding code to rewrite
V1 ACLs to V2, because this patch has suffered a lot of scope creep already,
but this seems like a sensible milestone in the migration.
TempAuth Account ACL Implementation
-----------------------------------
As stated above, core Swift is responsible for privileging the
X-Account-Access-Control header (making it only accessible to swift_owners),
for translating it to -sysmeta-* headers to trigger persistence by the
account server, and for including the header in the responses to requests
by privileged users. Core Swift puts no expectation on the *content* of
this header. Auth systems (including TempAuth) are responsible for
defining the content of the header and taking action based on it.
In addition to the changes described above, this patch defines a format
to be used by TempAuth for these headers in the common.middleware.acl
module, in the methods format_v2_acl() and parse_v2_acl(). This patch
also teaches TempAuth to take action based on the header contents. TempAuth
now sets swift_owner=True if the user is on the Admin ACL, authorizes
GET/HEAD/OPTIONS requests if the user is on any ACL, authorizes
PUT/POST/DELETE requests if the user is on the admin or read-write ACL, etc.
Note that the action of setting swift_owner=True triggers core Swift to
add or strip the privileged headers from the responses. Core Swift (not
the auth system) is responsible for that.
DocImpact: Documentation for the new ACL usage and format appears in
summary form in doc/source/overview_auth.rst, and in more detail in
swift/common/middleware/tempauth.py in the TempAuth class docstring.
I leave it to the Swift doc team to determine whether more is needed.
Change-Id: I836a99eaaa6bb0e92dc03e1ca46a474522e6e826
2013-11-13 20:55:14 +00:00
|
|
|
class TestFileComparisonEnv(object):
|
2010-07-12 17:03:45 -05:00
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.conn = Connection(tf.config)
|
2010-07-12 17:03:45 -05:00
|
|
|
cls.conn.authenticate()
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.account = Account(cls.conn, tf.config.get('account',
|
|
|
|
tf.config['username']))
|
2010-07-12 17:03:45 -05:00
|
|
|
cls.account.delete_containers()
|
|
|
|
|
|
|
|
cls.container = cls.account.container(Utils.create_name())
|
|
|
|
|
|
|
|
if not cls.container.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
cls.file_count = 20
|
|
|
|
cls.file_size = 128
|
|
|
|
cls.files = list()
|
|
|
|
for x in range(cls.file_count):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cls.container.file(Utils.create_name())
|
|
|
|
file_item.write_random(cls.file_size)
|
|
|
|
cls.files.append(file_item)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-11-21 11:14:34 +08:00
|
|
|
cls.time_old_f1 = time.strftime("%a, %d %b %Y %H:%M:%S GMT",
|
|
|
|
time.gmtime(time.time() - 86400))
|
|
|
|
cls.time_old_f2 = time.strftime("%A, %d-%b-%y %H:%M:%S GMT",
|
|
|
|
time.gmtime(time.time() - 86400))
|
|
|
|
cls.time_old_f3 = time.strftime("%a %b %d %H:%M:%S %Y",
|
|
|
|
time.gmtime(time.time() - 86400))
|
|
|
|
cls.time_new = time.strftime("%a, %d %b %Y %H:%M:%S GMT",
|
|
|
|
time.gmtime(time.time() + 86400))
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
class TestFileComparison(Base):
|
|
|
|
env = TestFileComparisonEnv
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
def testIfMatch(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in self.env.files:
|
|
|
|
hdrs = {'If-Match': file_item.md5}
|
|
|
|
self.assert_(file_item.read(hdrs=hdrs))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
hdrs = {'If-Match': 'bogus'}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
|
|
|
|
|
|
|
def testIfNoneMatch(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in self.env.files:
|
2010-07-12 17:03:45 -05:00
|
|
|
hdrs = {'If-None-Match': 'bogus'}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(file_item.read(hdrs=hdrs))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
hdrs = {'If-None-Match': file_item.md5}
|
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(304)
|
|
|
|
|
|
|
|
def testIfModifiedSince(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in self.env.files:
|
2013-11-21 11:14:34 +08:00
|
|
|
hdrs = {'If-Modified-Since': self.env.time_old_f1}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(file_item.read(hdrs=hdrs))
|
2014-03-04 11:52:48 -08:00
|
|
|
self.assert_(file_item.info(hdrs=hdrs))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
hdrs = {'If-Modified-Since': self.env.time_new}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(304)
|
2014-03-04 11:52:48 -08:00
|
|
|
self.assertRaises(ResponseError, file_item.info, hdrs=hdrs)
|
|
|
|
self.assert_status(304)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testIfUnmodifiedSince(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in self.env.files:
|
2010-07-12 17:03:45 -05:00
|
|
|
hdrs = {'If-Unmodified-Since': self.env.time_new}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(file_item.read(hdrs=hdrs))
|
2014-03-04 11:52:48 -08:00
|
|
|
self.assert_(file_item.info(hdrs=hdrs))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-11-21 11:14:34 +08:00
|
|
|
hdrs = {'If-Unmodified-Since': self.env.time_old_f2}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
2014-03-04 11:52:48 -08:00
|
|
|
self.assertRaises(ResponseError, file_item.info, hdrs=hdrs)
|
|
|
|
self.assert_status(412)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testIfMatchAndUnmodified(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in self.env.files:
|
|
|
|
hdrs = {'If-Match': file_item.md5,
|
2012-09-03 23:30:52 +08:00
|
|
|
'If-Unmodified-Since': self.env.time_new}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assert_(file_item.read(hdrs=hdrs))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
hdrs = {'If-Match': 'bogus',
|
|
|
|
'If-Unmodified-Since': self.env.time_new}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
hdrs = {'If-Match': file_item.md5,
|
2013-11-21 11:14:34 +08:00
|
|
|
'If-Unmodified-Since': self.env.time_old_f3}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
|
|
|
|
2013-11-07 04:45:27 +00:00
|
|
|
def testLastModified(self):
|
|
|
|
file_name = Utils.create_name()
|
|
|
|
content_type = Utils.create_name()
|
|
|
|
|
|
|
|
file = self.env.container.file(file_name)
|
|
|
|
file.content_type = content_type
|
|
|
|
resp = file.write_random_return_resp(self.env.file_size)
|
|
|
|
put_last_modified = resp.getheader('last-modified')
|
|
|
|
|
|
|
|
file = self.env.container.file(file_name)
|
|
|
|
info = file.info()
|
|
|
|
self.assert_('last_modified' in info)
|
|
|
|
last_modified = info['last_modified']
|
|
|
|
self.assertEqual(put_last_modified, info['last_modified'])
|
|
|
|
|
|
|
|
hdrs = {'If-Modified-Since': last_modified}
|
|
|
|
self.assertRaises(ResponseError, file.read, hdrs=hdrs)
|
|
|
|
self.assert_status(304)
|
|
|
|
|
|
|
|
hdrs = {'If-Unmodified-Since': last_modified}
|
|
|
|
self.assert_(file.read(hdrs=hdrs))
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestFileComparisonUTF8(Base2, TestFileComparison):
|
|
|
|
set_up = False
|
|
|
|
|
2013-11-18 13:17:48 -08:00
|
|
|
|
|
|
|
class TestSloEnv(object):
|
|
|
|
slo_enabled = None # tri-state: None initially, then True/False
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.conn = Connection(tf.config)
|
2013-11-18 13:17:48 -08:00
|
|
|
cls.conn.authenticate()
|
2013-12-08 18:00:47 -08:00
|
|
|
|
|
|
|
if cls.slo_enabled is None:
|
2014-03-06 13:11:03 -08:00
|
|
|
cls.slo_enabled = 'slo' in cluster_info
|
2013-12-08 18:00:47 -08:00
|
|
|
if not cls.slo_enabled:
|
|
|
|
return
|
|
|
|
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.account = Account(cls.conn, tf.config.get('account',
|
|
|
|
tf.config['username']))
|
2013-11-18 13:17:48 -08:00
|
|
|
cls.account.delete_containers()
|
|
|
|
|
|
|
|
cls.container = cls.account.container(Utils.create_name())
|
|
|
|
|
|
|
|
if not cls.container.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
seg_info = {}
|
|
|
|
for letter, size in (('a', 1024 * 1024),
|
|
|
|
('b', 1024 * 1024),
|
|
|
|
('c', 1024 * 1024),
|
|
|
|
('d', 1024 * 1024),
|
|
|
|
('e', 1)):
|
|
|
|
seg_name = "seg_%s" % letter
|
|
|
|
file_item = cls.container.file(seg_name)
|
|
|
|
file_item.write(letter * size)
|
|
|
|
seg_info[seg_name] = {
|
|
|
|
'size_bytes': size,
|
|
|
|
'etag': file_item.md5,
|
|
|
|
'path': '/%s/%s' % (cls.container.name, seg_name)}
|
|
|
|
|
|
|
|
file_item = cls.container.file("manifest-abcde")
|
|
|
|
file_item.write(
|
|
|
|
json.dumps([seg_info['seg_a'], seg_info['seg_b'],
|
|
|
|
seg_info['seg_c'], seg_info['seg_d'],
|
|
|
|
seg_info['seg_e']]),
|
|
|
|
parms={'multipart-manifest': 'put'})
|
|
|
|
|
|
|
|
file_item = cls.container.file('manifest-cd')
|
|
|
|
cd_json = json.dumps([seg_info['seg_c'], seg_info['seg_d']])
|
|
|
|
file_item.write(cd_json, parms={'multipart-manifest': 'put'})
|
|
|
|
cd_etag = hashlib.md5(seg_info['seg_c']['etag'] +
|
|
|
|
seg_info['seg_d']['etag']).hexdigest()
|
|
|
|
|
|
|
|
file_item = cls.container.file("manifest-bcd-submanifest")
|
|
|
|
file_item.write(
|
|
|
|
json.dumps([seg_info['seg_b'],
|
|
|
|
{'etag': cd_etag,
|
|
|
|
'size_bytes': (seg_info['seg_c']['size_bytes'] +
|
|
|
|
seg_info['seg_d']['size_bytes']),
|
|
|
|
'path': '/%s/%s' % (cls.container.name,
|
|
|
|
'manifest-cd')}]),
|
|
|
|
parms={'multipart-manifest': 'put'})
|
|
|
|
bcd_submanifest_etag = hashlib.md5(
|
|
|
|
seg_info['seg_b']['etag'] + cd_etag).hexdigest()
|
|
|
|
|
|
|
|
file_item = cls.container.file("manifest-abcde-submanifest")
|
|
|
|
file_item.write(
|
|
|
|
json.dumps([
|
|
|
|
seg_info['seg_a'],
|
|
|
|
{'etag': bcd_submanifest_etag,
|
|
|
|
'size_bytes': (seg_info['seg_b']['size_bytes'] +
|
|
|
|
seg_info['seg_c']['size_bytes'] +
|
|
|
|
seg_info['seg_d']['size_bytes']),
|
|
|
|
'path': '/%s/%s' % (cls.container.name,
|
|
|
|
'manifest-bcd-submanifest')},
|
|
|
|
seg_info['seg_e']]),
|
|
|
|
parms={'multipart-manifest': 'put'})
|
|
|
|
|
|
|
|
|
|
|
|
class TestSlo(Base):
|
|
|
|
env = TestSloEnv
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
super(TestSlo, self).setUp()
|
|
|
|
if self.env.slo_enabled is False:
|
|
|
|
raise SkipTest("SLO not enabled")
|
|
|
|
elif self.env.slo_enabled is not True:
|
|
|
|
# just some sanity checking
|
|
|
|
raise Exception(
|
|
|
|
"Expected slo_enabled to be True/False, got %r" %
|
|
|
|
(self.env.slo_enabled,))
|
|
|
|
|
|
|
|
def test_slo_get_simple_manifest(self):
|
|
|
|
file_item = self.env.container.file('manifest-abcde')
|
|
|
|
file_contents = file_item.read()
|
|
|
|
self.assertEqual(4 * 1024 * 1024 + 1, len(file_contents))
|
|
|
|
self.assertEqual('a', file_contents[0])
|
|
|
|
self.assertEqual('a', file_contents[1024 * 1024 - 1])
|
|
|
|
self.assertEqual('b', file_contents[1024 * 1024])
|
|
|
|
self.assertEqual('d', file_contents[-2])
|
|
|
|
self.assertEqual('e', file_contents[-1])
|
|
|
|
|
|
|
|
def test_slo_get_nested_manifest(self):
|
|
|
|
file_item = self.env.container.file('manifest-abcde-submanifest')
|
|
|
|
file_contents = file_item.read()
|
|
|
|
self.assertEqual(4 * 1024 * 1024 + 1, len(file_contents))
|
|
|
|
self.assertEqual('a', file_contents[0])
|
|
|
|
self.assertEqual('a', file_contents[1024 * 1024 - 1])
|
|
|
|
self.assertEqual('b', file_contents[1024 * 1024])
|
|
|
|
self.assertEqual('d', file_contents[-2])
|
|
|
|
self.assertEqual('e', file_contents[-1])
|
|
|
|
|
|
|
|
def test_slo_ranged_get(self):
|
|
|
|
file_item = self.env.container.file('manifest-abcde')
|
|
|
|
file_contents = file_item.read(size=1024 * 1024 + 2,
|
|
|
|
offset=1024 * 1024 - 1)
|
|
|
|
self.assertEqual('a', file_contents[0])
|
|
|
|
self.assertEqual('b', file_contents[1])
|
|
|
|
self.assertEqual('b', file_contents[-2])
|
|
|
|
self.assertEqual('c', file_contents[-1])
|
|
|
|
|
|
|
|
def test_slo_ranged_submanifest(self):
|
|
|
|
file_item = self.env.container.file('manifest-abcde-submanifest')
|
|
|
|
file_contents = file_item.read(size=1024 * 1024 + 2,
|
|
|
|
offset=1024 * 1024 * 2 - 1)
|
|
|
|
self.assertEqual('b', file_contents[0])
|
|
|
|
self.assertEqual('c', file_contents[1])
|
|
|
|
self.assertEqual('c', file_contents[-2])
|
|
|
|
self.assertEqual('d', file_contents[-1])
|
|
|
|
|
|
|
|
def test_slo_etag_is_hash_of_etags(self):
|
|
|
|
expected_hash = hashlib.md5()
|
|
|
|
expected_hash.update(hashlib.md5('a' * 1024 * 1024).hexdigest())
|
|
|
|
expected_hash.update(hashlib.md5('b' * 1024 * 1024).hexdigest())
|
|
|
|
expected_hash.update(hashlib.md5('c' * 1024 * 1024).hexdigest())
|
|
|
|
expected_hash.update(hashlib.md5('d' * 1024 * 1024).hexdigest())
|
|
|
|
expected_hash.update(hashlib.md5('e').hexdigest())
|
|
|
|
expected_etag = expected_hash.hexdigest()
|
|
|
|
|
|
|
|
file_item = self.env.container.file('manifest-abcde')
|
|
|
|
self.assertEqual(expected_etag, file_item.info()['etag'])
|
|
|
|
|
|
|
|
def test_slo_etag_is_hash_of_etags_submanifests(self):
|
|
|
|
|
|
|
|
def hd(x):
|
|
|
|
return hashlib.md5(x).hexdigest()
|
|
|
|
|
|
|
|
expected_etag = hd(hd('a' * 1024 * 1024) +
|
|
|
|
hd(hd('b' * 1024 * 1024) +
|
|
|
|
hd(hd('c' * 1024 * 1024) +
|
|
|
|
hd('d' * 1024 * 1024))) +
|
|
|
|
hd('e'))
|
|
|
|
|
|
|
|
file_item = self.env.container.file('manifest-abcde-submanifest')
|
|
|
|
self.assertEqual(expected_etag, file_item.info()['etag'])
|
|
|
|
|
|
|
|
def test_slo_etag_mismatch(self):
|
|
|
|
file_item = self.env.container.file("manifest-a-bad-etag")
|
|
|
|
try:
|
|
|
|
file_item.write(
|
|
|
|
json.dumps([{
|
|
|
|
'size_bytes': 1024 * 1024,
|
|
|
|
'etag': 'not it',
|
|
|
|
'path': '/%s/%s' % (self.env.container.name, 'seg_a')}]),
|
|
|
|
parms={'multipart-manifest': 'put'})
|
|
|
|
except ResponseError as err:
|
|
|
|
self.assertEqual(400, err.status)
|
|
|
|
else:
|
|
|
|
self.fail("Expected ResponseError but didn't get it")
|
|
|
|
|
|
|
|
def test_slo_size_mismatch(self):
|
|
|
|
file_item = self.env.container.file("manifest-a-bad-size")
|
|
|
|
try:
|
|
|
|
file_item.write(
|
|
|
|
json.dumps([{
|
|
|
|
'size_bytes': 1024 * 1024 - 1,
|
|
|
|
'etag': hashlib.md5('a' * 1024 * 1024).hexdigest(),
|
|
|
|
'path': '/%s/%s' % (self.env.container.name, 'seg_a')}]),
|
|
|
|
parms={'multipart-manifest': 'put'})
|
|
|
|
except ResponseError as err:
|
|
|
|
self.assertEqual(400, err.status)
|
|
|
|
else:
|
|
|
|
self.fail("Expected ResponseError but didn't get it")
|
|
|
|
|
|
|
|
def test_slo_copy(self):
|
|
|
|
file_item = self.env.container.file("manifest-abcde")
|
|
|
|
file_item.copy(self.env.container.name, "copied-abcde")
|
|
|
|
|
|
|
|
copied = self.env.container.file("copied-abcde")
|
|
|
|
copied_contents = copied.read(parms={'multipart-manifest': 'get'})
|
|
|
|
self.assertEqual(4 * 1024 * 1024 + 1, len(copied_contents))
|
|
|
|
|
|
|
|
def test_slo_copy_the_manifest(self):
|
|
|
|
file_item = self.env.container.file("manifest-abcde")
|
Move all SLO functionality to middleware
This way, with zero additional effort, SLO will support enhancements
to object storage and retrieval, such as:
* automatic resume of GETs on broken connection (today)
* storage policies (in the near future)
* erasure-coded object segments (in the far future)
This also lets SLOs work with other sorts of hypothetical third-party
middleware, for example object compression or encryption.
Getting COPY to work here is sort of a hack; the proxy's object
controller now checks for "swift.copy_response_hook" in the request's
environment and feeds the GET response (the source of the new object's
data) through it. This lets a COPY of a SLO manifest actually combine
the segments instead of merely copying the manifest document.
Updated ObjectController to expect a response's app_iter to be an
iterable, not just an iterator. (PEP 333 says "When called by the
server, the application object must return an iterable yielding zero
or more strings." ObjectController was just being too strict.) This
way, SLO can re-use the same response-generation logic for GET and
COPY requests.
Added a (sort of hokey) mechanism to allow middlewares to close
incompletely-consumed app iterators without triggering a warning. SLO
does this when it realizes it's performed a ranged GET on a manifest;
it closes the iterable, removes the range, and retries the
request. Without this change, the proxy logs would get 'Client
disconnected on read' in them.
DocImpact
blueprint multi-ring-large-objects
Change-Id: Ic11662eb5c7176fbf422a6fc87a569928d6f85a1
2013-11-13 12:06:55 -08:00
|
|
|
file_item.copy(self.env.container.name, "copied-abcde-manifest-only",
|
2013-11-18 13:17:48 -08:00
|
|
|
parms={'multipart-manifest': 'get'})
|
|
|
|
|
Move all SLO functionality to middleware
This way, with zero additional effort, SLO will support enhancements
to object storage and retrieval, such as:
* automatic resume of GETs on broken connection (today)
* storage policies (in the near future)
* erasure-coded object segments (in the far future)
This also lets SLOs work with other sorts of hypothetical third-party
middleware, for example object compression or encryption.
Getting COPY to work here is sort of a hack; the proxy's object
controller now checks for "swift.copy_response_hook" in the request's
environment and feeds the GET response (the source of the new object's
data) through it. This lets a COPY of a SLO manifest actually combine
the segments instead of merely copying the manifest document.
Updated ObjectController to expect a response's app_iter to be an
iterable, not just an iterator. (PEP 333 says "When called by the
server, the application object must return an iterable yielding zero
or more strings." ObjectController was just being too strict.) This
way, SLO can re-use the same response-generation logic for GET and
COPY requests.
Added a (sort of hokey) mechanism to allow middlewares to close
incompletely-consumed app iterators without triggering a warning. SLO
does this when it realizes it's performed a ranged GET on a manifest;
it closes the iterable, removes the range, and retries the
request. Without this change, the proxy logs would get 'Client
disconnected on read' in them.
DocImpact
blueprint multi-ring-large-objects
Change-Id: Ic11662eb5c7176fbf422a6fc87a569928d6f85a1
2013-11-13 12:06:55 -08:00
|
|
|
copied = self.env.container.file("copied-abcde-manifest-only")
|
2013-11-18 13:17:48 -08:00
|
|
|
copied_contents = copied.read(parms={'multipart-manifest': 'get'})
|
|
|
|
try:
|
|
|
|
json.loads(copied_contents)
|
|
|
|
except ValueError:
|
|
|
|
self.fail("COPY didn't copy the manifest (invalid json on GET)")
|
|
|
|
|
Move all SLO functionality to middleware
This way, with zero additional effort, SLO will support enhancements
to object storage and retrieval, such as:
* automatic resume of GETs on broken connection (today)
* storage policies (in the near future)
* erasure-coded object segments (in the far future)
This also lets SLOs work with other sorts of hypothetical third-party
middleware, for example object compression or encryption.
Getting COPY to work here is sort of a hack; the proxy's object
controller now checks for "swift.copy_response_hook" in the request's
environment and feeds the GET response (the source of the new object's
data) through it. This lets a COPY of a SLO manifest actually combine
the segments instead of merely copying the manifest document.
Updated ObjectController to expect a response's app_iter to be an
iterable, not just an iterator. (PEP 333 says "When called by the
server, the application object must return an iterable yielding zero
or more strings." ObjectController was just being too strict.) This
way, SLO can re-use the same response-generation logic for GET and
COPY requests.
Added a (sort of hokey) mechanism to allow middlewares to close
incompletely-consumed app iterators without triggering a warning. SLO
does this when it realizes it's performed a ranged GET on a manifest;
it closes the iterable, removes the range, and retries the
request. Without this change, the proxy logs would get 'Client
disconnected on read' in them.
DocImpact
blueprint multi-ring-large-objects
Change-Id: Ic11662eb5c7176fbf422a6fc87a569928d6f85a1
2013-11-13 12:06:55 -08:00
|
|
|
def test_slo_get_the_manifest(self):
|
|
|
|
manifest = self.env.container.file("manifest-abcde")
|
|
|
|
got_body = manifest.read(parms={'multipart-manifest': 'get'})
|
|
|
|
|
|
|
|
self.assertEqual('application/json; charset=utf-8',
|
|
|
|
manifest.content_type)
|
|
|
|
try:
|
|
|
|
json.loads(got_body)
|
|
|
|
except ValueError:
|
|
|
|
self.fail("GET with multipart-manifest=get got invalid json")
|
|
|
|
|
|
|
|
def test_slo_head_the_manifest(self):
|
|
|
|
manifest = self.env.container.file("manifest-abcde")
|
|
|
|
got_info = manifest.info(parms={'multipart-manifest': 'get'})
|
|
|
|
|
|
|
|
self.assertEqual('application/json; charset=utf-8',
|
|
|
|
got_info['content_type'])
|
|
|
|
|
2014-02-20 22:01:39 -08:00
|
|
|
def test_slo_if_match_get(self):
|
|
|
|
manifest = self.env.container.file("manifest-abcde")
|
|
|
|
etag = manifest.info()['etag']
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, manifest.read,
|
|
|
|
hdrs={'If-Match': 'not-%s' % etag})
|
|
|
|
self.assert_status(412)
|
|
|
|
|
|
|
|
manifest.read(hdrs={'If-Match': etag})
|
|
|
|
self.assert_status(200)
|
|
|
|
|
|
|
|
def test_slo_if_none_match_get(self):
|
|
|
|
manifest = self.env.container.file("manifest-abcde")
|
|
|
|
etag = manifest.info()['etag']
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, manifest.read,
|
|
|
|
hdrs={'If-None-Match': etag})
|
|
|
|
self.assert_status(304)
|
|
|
|
|
|
|
|
manifest.read(hdrs={'If-None-Match': "not-%s" % etag})
|
|
|
|
self.assert_status(200)
|
|
|
|
|
|
|
|
def test_slo_if_match_head(self):
|
|
|
|
manifest = self.env.container.file("manifest-abcde")
|
|
|
|
etag = manifest.info()['etag']
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, manifest.info,
|
|
|
|
hdrs={'If-Match': 'not-%s' % etag})
|
|
|
|
self.assert_status(412)
|
|
|
|
|
|
|
|
manifest.info(hdrs={'If-Match': etag})
|
|
|
|
self.assert_status(200)
|
|
|
|
|
|
|
|
def test_slo_if_none_match_head(self):
|
|
|
|
manifest = self.env.container.file("manifest-abcde")
|
|
|
|
etag = manifest.info()['etag']
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, manifest.info,
|
|
|
|
hdrs={'If-None-Match': etag})
|
|
|
|
self.assert_status(304)
|
|
|
|
|
|
|
|
manifest.info(hdrs={'If-None-Match': "not-%s" % etag})
|
|
|
|
self.assert_status(200)
|
|
|
|
|
2013-11-18 13:17:48 -08:00
|
|
|
|
|
|
|
class TestSloUTF8(Base2, TestSlo):
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
|
2013-12-18 10:43:29 -08:00
|
|
|
class TestObjectVersioningEnv(object):
|
|
|
|
versioning_enabled = None # tri-state: None initially, then True/False
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.conn = Connection(tf.config)
|
2013-12-18 10:43:29 -08:00
|
|
|
cls.conn.authenticate()
|
|
|
|
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.account = Account(cls.conn, tf.config.get('account',
|
|
|
|
tf.config['username']))
|
2013-12-18 10:43:29 -08:00
|
|
|
|
|
|
|
# avoid getting a prefix that stops halfway through an encoded
|
|
|
|
# character
|
|
|
|
prefix = Utils.create_name().decode("utf-8")[:10].encode("utf-8")
|
|
|
|
|
|
|
|
cls.versions_container = cls.account.container(prefix + "-versions")
|
|
|
|
if not cls.versions_container.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
cls.container = cls.account.container(prefix + "-objs")
|
|
|
|
if not cls.container.create(
|
|
|
|
hdrs={'X-Versions-Location': cls.versions_container.name}):
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
container_info = cls.container.info()
|
|
|
|
# if versioning is off, then X-Versions-Location won't persist
|
|
|
|
cls.versioning_enabled = 'versions' in container_info
|
|
|
|
|
|
|
|
|
|
|
|
class TestObjectVersioning(Base):
|
|
|
|
env = TestObjectVersioningEnv
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
super(TestObjectVersioning, self).setUp()
|
|
|
|
if self.env.versioning_enabled is False:
|
|
|
|
raise SkipTest("Object versioning not enabled")
|
|
|
|
elif self.env.versioning_enabled is not True:
|
|
|
|
# just some sanity checking
|
|
|
|
raise Exception(
|
|
|
|
"Expected versioning_enabled to be True/False, got %r" %
|
|
|
|
(self.env.versioning_enabled,))
|
|
|
|
|
|
|
|
def test_overwriting(self):
|
|
|
|
container = self.env.container
|
|
|
|
versions_container = self.env.versions_container
|
|
|
|
obj_name = Utils.create_name()
|
|
|
|
|
|
|
|
versioned_obj = container.file(obj_name)
|
|
|
|
versioned_obj.write("aaaaa")
|
|
|
|
|
|
|
|
self.assertEqual(0, versions_container.info()['object_count'])
|
|
|
|
|
|
|
|
versioned_obj.write("bbbbb")
|
|
|
|
|
|
|
|
# the old version got saved off
|
|
|
|
self.assertEqual(1, versions_container.info()['object_count'])
|
|
|
|
versioned_obj_name = versions_container.files()[0]
|
|
|
|
self.assertEqual(
|
|
|
|
"aaaaa", versions_container.file(versioned_obj_name).read())
|
|
|
|
|
|
|
|
# if we overwrite it again, there are two versions
|
|
|
|
versioned_obj.write("ccccc")
|
|
|
|
self.assertEqual(2, versions_container.info()['object_count'])
|
|
|
|
|
|
|
|
# as we delete things, the old contents return
|
|
|
|
self.assertEqual("ccccc", versioned_obj.read())
|
|
|
|
versioned_obj.delete()
|
|
|
|
self.assertEqual("bbbbb", versioned_obj.read())
|
|
|
|
versioned_obj.delete()
|
|
|
|
self.assertEqual("aaaaa", versioned_obj.read())
|
|
|
|
versioned_obj.delete()
|
|
|
|
self.assertRaises(ResponseError, versioned_obj.read)
|
|
|
|
|
|
|
|
|
|
|
|
class TestObjectVersioningUTF8(Base2, TestObjectVersioning):
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
|
2014-03-06 13:11:03 -08:00
|
|
|
class TestTempurlEnv(object):
|
|
|
|
tempurl_enabled = None # tri-state: None initially, then True/False
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.conn = Connection(tf.config)
|
2014-03-06 13:11:03 -08:00
|
|
|
cls.conn.authenticate()
|
|
|
|
|
|
|
|
if cls.tempurl_enabled is None:
|
|
|
|
cls.tempurl_enabled = 'tempurl' in cluster_info
|
|
|
|
if not cls.tempurl_enabled:
|
|
|
|
return
|
|
|
|
|
|
|
|
cls.tempurl_key = Utils.create_name()
|
|
|
|
cls.tempurl_key2 = Utils.create_name()
|
|
|
|
|
|
|
|
cls.account = Account(
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.conn, tf.config.get('account', tf.config['username']))
|
2014-03-06 13:11:03 -08:00
|
|
|
cls.account.delete_containers()
|
|
|
|
cls.account.update_metadata({
|
|
|
|
'temp-url-key': cls.tempurl_key,
|
|
|
|
'temp-url-key-2': cls.tempurl_key2
|
|
|
|
})
|
|
|
|
|
|
|
|
cls.container = cls.account.container(Utils.create_name())
|
|
|
|
if not cls.container.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
cls.obj = cls.container.file(Utils.create_name())
|
|
|
|
cls.obj.write("obj contents")
|
|
|
|
cls.other_obj = cls.container.file(Utils.create_name())
|
|
|
|
cls.other_obj.write("other obj contents")
|
|
|
|
|
|
|
|
|
|
|
|
class TestTempurl(Base):
|
|
|
|
env = TestTempurlEnv
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
super(TestTempurl, self).setUp()
|
|
|
|
if self.env.tempurl_enabled is False:
|
|
|
|
raise SkipTest("TempURL not enabled")
|
|
|
|
elif self.env.tempurl_enabled is not True:
|
|
|
|
# just some sanity checking
|
|
|
|
raise Exception(
|
|
|
|
"Expected tempurl_enabled to be True/False, got %r" %
|
|
|
|
(self.env.tempurl_enabled,))
|
|
|
|
|
|
|
|
expires = int(time.time()) + 86400
|
|
|
|
sig = self.tempurl_sig(
|
|
|
|
'GET', expires, self.env.conn.make_path(self.env.obj.path),
|
|
|
|
self.env.tempurl_key)
|
|
|
|
self.obj_tempurl_parms = {'temp_url_sig': sig,
|
|
|
|
'temp_url_expires': str(expires)}
|
|
|
|
|
|
|
|
def tempurl_sig(self, method, expires, path, key):
|
|
|
|
return hmac.new(
|
|
|
|
key,
|
|
|
|
'%s\n%s\n%s' % (method, expires, urllib.unquote(path)),
|
|
|
|
hashlib.sha1).hexdigest()
|
|
|
|
|
|
|
|
def test_GET(self):
|
|
|
|
contents = self.env.obj.read(
|
|
|
|
parms=self.obj_tempurl_parms,
|
|
|
|
cfg={'no_auth_token': True})
|
|
|
|
self.assertEqual(contents, "obj contents")
|
|
|
|
|
|
|
|
# GET tempurls also allow HEAD requests
|
|
|
|
self.assert_(self.env.obj.info(parms=self.obj_tempurl_parms,
|
|
|
|
cfg={'no_auth_token': True}))
|
|
|
|
|
|
|
|
def test_GET_with_key_2(self):
|
|
|
|
expires = int(time.time()) + 86400
|
|
|
|
sig = self.tempurl_sig(
|
|
|
|
'GET', expires, self.env.conn.make_path(self.env.obj.path),
|
|
|
|
self.env.tempurl_key2)
|
|
|
|
parms = {'temp_url_sig': sig,
|
|
|
|
'temp_url_expires': str(expires)}
|
|
|
|
|
|
|
|
contents = self.env.obj.read(parms=parms, cfg={'no_auth_token': True})
|
|
|
|
self.assertEqual(contents, "obj contents")
|
|
|
|
|
|
|
|
def test_PUT(self):
|
|
|
|
new_obj = self.env.container.file(Utils.create_name())
|
|
|
|
|
|
|
|
expires = int(time.time()) + 86400
|
|
|
|
sig = self.tempurl_sig(
|
|
|
|
'PUT', expires, self.env.conn.make_path(new_obj.path),
|
|
|
|
self.env.tempurl_key)
|
|
|
|
put_parms = {'temp_url_sig': sig,
|
|
|
|
'temp_url_expires': str(expires)}
|
|
|
|
|
|
|
|
new_obj.write('new obj contents',
|
|
|
|
parms=put_parms, cfg={'no_auth_token': True})
|
|
|
|
self.assertEqual(new_obj.read(), "new obj contents")
|
|
|
|
|
|
|
|
# PUT tempurls also allow HEAD requests
|
|
|
|
self.assert_(new_obj.info(parms=put_parms,
|
|
|
|
cfg={'no_auth_token': True}))
|
|
|
|
|
|
|
|
def test_HEAD(self):
|
|
|
|
expires = int(time.time()) + 86400
|
|
|
|
sig = self.tempurl_sig(
|
|
|
|
'HEAD', expires, self.env.conn.make_path(self.env.obj.path),
|
|
|
|
self.env.tempurl_key)
|
|
|
|
head_parms = {'temp_url_sig': sig,
|
|
|
|
'temp_url_expires': str(expires)}
|
|
|
|
|
|
|
|
self.assert_(self.env.obj.info(parms=head_parms,
|
|
|
|
cfg={'no_auth_token': True}))
|
|
|
|
# HEAD tempurls don't allow PUT or GET requests, despite the fact that
|
|
|
|
# PUT and GET tempurls both allow HEAD requests
|
|
|
|
self.assertRaises(ResponseError, self.env.other_obj.read,
|
|
|
|
cfg={'no_auth_token': True},
|
|
|
|
parms=self.obj_tempurl_parms)
|
|
|
|
self.assert_status([401])
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, self.env.other_obj.write,
|
|
|
|
'new contents',
|
|
|
|
cfg={'no_auth_token': True},
|
|
|
|
parms=self.obj_tempurl_parms)
|
|
|
|
self.assert_status([401])
|
|
|
|
|
|
|
|
def test_different_object(self):
|
|
|
|
contents = self.env.obj.read(
|
|
|
|
parms=self.obj_tempurl_parms,
|
|
|
|
cfg={'no_auth_token': True})
|
|
|
|
self.assertEqual(contents, "obj contents")
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, self.env.other_obj.read,
|
|
|
|
cfg={'no_auth_token': True},
|
|
|
|
parms=self.obj_tempurl_parms)
|
|
|
|
self.assert_status([401])
|
|
|
|
|
|
|
|
def test_changing_sig(self):
|
|
|
|
contents = self.env.obj.read(
|
|
|
|
parms=self.obj_tempurl_parms,
|
|
|
|
cfg={'no_auth_token': True})
|
|
|
|
self.assertEqual(contents, "obj contents")
|
|
|
|
|
|
|
|
parms = self.obj_tempurl_parms.copy()
|
|
|
|
if parms['temp_url_sig'][0] == 'a':
|
|
|
|
parms['temp_url_sig'] = 'b' + parms['temp_url_sig'][1:]
|
|
|
|
else:
|
|
|
|
parms['temp_url_sig'] = 'a' + parms['temp_url_sig'][1:]
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, self.env.obj.read,
|
|
|
|
cfg={'no_auth_token': True},
|
|
|
|
parms=parms)
|
|
|
|
self.assert_status([401])
|
|
|
|
|
|
|
|
def test_changing_expires(self):
|
|
|
|
contents = self.env.obj.read(
|
|
|
|
parms=self.obj_tempurl_parms,
|
|
|
|
cfg={'no_auth_token': True})
|
|
|
|
self.assertEqual(contents, "obj contents")
|
|
|
|
|
|
|
|
parms = self.obj_tempurl_parms.copy()
|
|
|
|
if parms['temp_url_expires'][-1] == '0':
|
|
|
|
parms['temp_url_expires'] = parms['temp_url_expires'][:-1] + '1'
|
|
|
|
else:
|
|
|
|
parms['temp_url_expires'] = parms['temp_url_expires'][:-1] + '0'
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, self.env.obj.read,
|
|
|
|
cfg={'no_auth_token': True},
|
|
|
|
parms=parms)
|
|
|
|
self.assert_status([401])
|
|
|
|
|
|
|
|
|
|
|
|
class TestTempurlUTF8(Base2, TestTempurl):
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
|
2014-03-07 15:53:05 -08:00
|
|
|
class TestSloTempurlEnv(object):
|
|
|
|
enabled = None # tri-state: None initially, then True/False
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.conn = Connection(tf.config)
|
2014-03-07 15:53:05 -08:00
|
|
|
cls.conn.authenticate()
|
|
|
|
|
|
|
|
if cls.enabled is None:
|
|
|
|
cls.enabled = 'tempurl' in cluster_info and 'slo' in cluster_info
|
|
|
|
|
|
|
|
cls.tempurl_key = Utils.create_name()
|
|
|
|
|
|
|
|
cls.account = Account(
|
2014-03-31 23:22:49 -04:00
|
|
|
cls.conn, tf.config.get('account', tf.config['username']))
|
2014-03-07 15:53:05 -08:00
|
|
|
cls.account.delete_containers()
|
|
|
|
cls.account.update_metadata({'temp-url-key': cls.tempurl_key})
|
|
|
|
|
|
|
|
cls.manifest_container = cls.account.container(Utils.create_name())
|
|
|
|
cls.segments_container = cls.account.container(Utils.create_name())
|
|
|
|
if not cls.manifest_container.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
if not cls.segments_container.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
seg1 = cls.segments_container.file(Utils.create_name())
|
|
|
|
seg1.write('1' * 1024 * 1024)
|
|
|
|
|
|
|
|
seg2 = cls.segments_container.file(Utils.create_name())
|
|
|
|
seg2.write('2' * 1024 * 1024)
|
|
|
|
|
|
|
|
cls.manifest_data = [{'size_bytes': 1024 * 1024,
|
|
|
|
'etag': seg1.md5,
|
|
|
|
'path': '/%s/%s' % (cls.segments_container.name,
|
|
|
|
seg1.name)},
|
|
|
|
{'size_bytes': 1024 * 1024,
|
|
|
|
'etag': seg2.md5,
|
|
|
|
'path': '/%s/%s' % (cls.segments_container.name,
|
|
|
|
seg2.name)}]
|
|
|
|
|
|
|
|
cls.manifest = cls.manifest_container.file(Utils.create_name())
|
|
|
|
cls.manifest.write(
|
|
|
|
json.dumps(cls.manifest_data),
|
|
|
|
parms={'multipart-manifest': 'put'})
|
|
|
|
|
|
|
|
|
|
|
|
class TestSloTempurl(Base):
|
|
|
|
env = TestSloTempurlEnv
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
super(TestSloTempurl, self).setUp()
|
|
|
|
if self.env.enabled is False:
|
|
|
|
raise SkipTest("TempURL and SLO not both enabled")
|
|
|
|
elif self.env.enabled is not True:
|
|
|
|
# just some sanity checking
|
|
|
|
raise Exception(
|
|
|
|
"Expected enabled to be True/False, got %r" %
|
|
|
|
(self.env.enabled,))
|
|
|
|
|
|
|
|
def tempurl_sig(self, method, expires, path, key):
|
|
|
|
return hmac.new(
|
|
|
|
key,
|
|
|
|
'%s\n%s\n%s' % (method, expires, urllib.unquote(path)),
|
|
|
|
hashlib.sha1).hexdigest()
|
|
|
|
|
|
|
|
def test_GET(self):
|
|
|
|
expires = int(time.time()) + 86400
|
|
|
|
sig = self.tempurl_sig(
|
|
|
|
'GET', expires, self.env.conn.make_path(self.env.manifest.path),
|
|
|
|
self.env.tempurl_key)
|
|
|
|
parms = {'temp_url_sig': sig, 'temp_url_expires': str(expires)}
|
|
|
|
|
|
|
|
contents = self.env.manifest.read(
|
|
|
|
parms=parms,
|
|
|
|
cfg={'no_auth_token': True})
|
|
|
|
self.assertEqual(len(contents), 2 * 1024 * 1024)
|
|
|
|
|
|
|
|
# GET tempurls also allow HEAD requests
|
|
|
|
self.assert_(self.env.manifest.info(
|
|
|
|
parms=parms, cfg={'no_auth_token': True}))
|
|
|
|
|
|
|
|
|
|
|
|
class TestSloTempurlUTF8(Base2, TestSloTempurl):
|
|
|
|
set_up = False
|
|
|
|
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
if __name__ == '__main__':
|
|
|
|
unittest.main()
|