2010-07-12 17:03:45 -05:00
|
|
|
#!/usr/bin/python -u
|
2013-09-20 01:00:54 +08:00
|
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
|
2010-07-12 17:03:45 -05:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
|
|
# implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
from datetime import datetime
|
2013-11-18 13:17:48 -08:00
|
|
|
import hashlib
|
2019-10-14 14:03:16 -07:00
|
|
|
import io
|
2010-07-12 17:03:45 -05:00
|
|
|
import locale
|
|
|
|
import random
|
2015-05-27 17:27:47 +02:00
|
|
|
import six
|
2015-10-08 15:03:52 +02:00
|
|
|
from six.moves import urllib
|
2010-07-12 17:03:45 -05:00
|
|
|
import time
|
2015-12-16 15:28:25 +00:00
|
|
|
import unittest2
|
2014-03-06 13:11:03 -08:00
|
|
|
import uuid
|
2014-04-30 15:00:49 +03:00
|
|
|
from copy import deepcopy
|
2014-04-07 13:01:44 -04:00
|
|
|
import eventlet
|
2014-11-25 14:42:42 +00:00
|
|
|
from swift.common.http import is_success, is_client_error
|
Support last modified on listing containers
For now, last modified timestamp is supported only on
object listing. (i.e. GET container)
For example:
GET container with json format results in like as:
[{"hash": "d41d8cd98f00b204e9800998ecf8427e", "last_modified":
"2015-06-10T04:58:23.460230", "bytes": 0, "name": "object",
"content_type": "application/octet-stream"}]
However, container listing (i.e. GET account) shows just a dict
consists of ("name", "bytes", "name") for each container.
For example:
GET accounts with json format result in like as:
[{"count": 0, "bytes": 0, "name": "container"}]
This patch is for supporting last_modified key in the container
listing results as well as object listing like as:
[{"count": 0, "bytes": 0, "name": "container", "last_modified":
"2015-06-10T04:58:23.460230"}]
This patch is changing just output for listing. The original
timestamp to show the last modified is already in container table
of account.db as a "put_timestamp" column.
Note that this patch *DOESN'T* change the put_timestamp semantics.
i.e. the last_modified timestamp will be changed only at both PUT
container and POST container.
(PUT object doesn't affect the timestamp)
Note that the tuple format of returning value from
swift.account.backend.AccountBroker.list_containers is now
(name, object_count, bytes_used, put_timestamp, 0)
* put_timestamp is added *
Original discussion was in working session at Vancouver Summit.
Etherpads are around here:
https://etherpad.openstack.org/p/liberty-swift-contributors-meetup
https://etherpad.openstack.org/p/liberty-container-listing-update
DocImpact
Change-Id: Iba0503916f1481a20c59ae9136436f40183e4c5b
2014-12-15 20:45:41 -08:00
|
|
|
from email.utils import parsedate
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2019-03-01 12:43:42 -08:00
|
|
|
if six.PY2:
|
|
|
|
from email.parser import FeedParser
|
|
|
|
else:
|
|
|
|
from email.parser import BytesFeedParser as FeedParser
|
|
|
|
|
2017-04-13 11:16:54 +01:00
|
|
|
import mock
|
|
|
|
|
2014-04-10 15:37:15 -04:00
|
|
|
from test.functional import normalized_urls, load_constraint, cluster_info
|
2016-11-22 21:48:14 -05:00
|
|
|
from test.functional import check_response, retry
|
2014-03-31 23:22:49 -04:00
|
|
|
import test.functional as tf
|
2012-09-05 20:49:50 -07:00
|
|
|
from test.functional.swift_test_client import Account, Connection, File, \
|
2017-12-18 09:33:40 -08:00
|
|
|
ResponseError, SkipTest
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2011-02-22 22:25:38 -06:00
|
|
|
|
2015-08-07 18:14:13 -05:00
|
|
|
def setUpModule():
|
|
|
|
tf.setup_package()
|
|
|
|
|
|
|
|
|
|
|
|
def tearDownModule():
|
|
|
|
tf.teardown_package()
|
|
|
|
|
|
|
|
|
Privileged acct ACL header, new ACL syntax, TempAuth impl.
* Introduce a new privileged account header: X-Account-Access-Control
* Introduce JSON-based version 2 ACL syntax -- see below for discussion
* Implement account ACL authorization in TempAuth
X-Account-Access-Control Header
-------------------------------
Accounts now have a new privileged header to represent ACLs or any other
form of account-level access control. The value of the header is an opaque
string to be interpreted by the auth system, but it must be a JSON-encoded
dictionary. A reference implementation is given in TempAuth, with the
knowledge that historically other auth systems often use TempAuth as a
starting point.
The reference implementation describes three levels of account access:
"admin", "read-write", and "read-only". Adding new access control
features in a future patch (e.g. "write-only" account access) will
automatically be forward- and backward-compatible, due to the JSON
dictionary header format.
The privileged X-Account-Access-Control header may only be read or written
by a user with "swift_owner" status, traditionally the account owner but
now also any user on the "admin" ACL.
Access Levels:
Read-only access is intended to indicate to the auth system that this
list of identities can read everything (except privileged headers) in
the account. Specifically, a user with read-only account access can get
a list of containers in the account, list the contents of any container,
retrieve any object, and see the (non-privileged) headers of the
account, any container, or any object.
Read-write access is intended to indicate to the auth system that this
list of identities can read or write (or create) any container. A user
with read-write account access can create new containers, set any
unprivileged container headers, overwrite objects, delete containers,
etc. A read-write user can NOT set account headers (or perform any
PUT/POST/DELETE requests on the account).
Admin access is intended to indicate to the auth system that this list of
identities has "swift_owner" privileges. A user with admin account access
can do anything the account owner can, including setting account headers
and any privileged headers -- and thus changing the value of
X-Account-Access-Control and thereby granting read-only, read-write, or
admin access to other users.
The auth system is responsible for making decisions based on this header,
if it chooses to support its use. Therefore the above access level
descriptions are necessarily advisory only for other auth systems.
When setting the value of the header, callers are urged to use the new
format_acl() method, described below.
New ACL Format
--------------
The account ACLs introduce a new format for ACLs, rather than reusing the
existing format from X-Container-Read/X-Container-Write. There are several
reasons for this:
* Container ACL format does not support Unicode
* Container ACLs have a different structure than account ACLs
+ account ACLs have no concept of referrers or rlistings
+ accounts have additional "admin" access level
+ account access levels are structured as admin > rw > ro, which seems more
appropriate for how people access accounts, rather than reusing
container ACLs' orthogonal read and write access
In addition, the container ACL syntax is a bit arbitrary and highly custom,
so instead of parsing additional custom syntax, I'd rather propose a next
version and introduce a means for migration. The V2 ACL syntax has the
following benefits:
* JSON is a well-known standard syntax with parsers in all languages
* no artificial value restrictions (you can grant access to a user named
".rlistings" if you want)
* forward and backward compatibility: you may have extraneous keys, but
your attempt to parse the header won't raise an exception
I've introduced hooks in parse_acl and format_acl which currently default
to the old V1 syntax but tolerate the V2 syntax and can easily be flipped
to default to V2. I'm not changing the default or adding code to rewrite
V1 ACLs to V2, because this patch has suffered a lot of scope creep already,
but this seems like a sensible milestone in the migration.
TempAuth Account ACL Implementation
-----------------------------------
As stated above, core Swift is responsible for privileging the
X-Account-Access-Control header (making it only accessible to swift_owners),
for translating it to -sysmeta-* headers to trigger persistence by the
account server, and for including the header in the responses to requests
by privileged users. Core Swift puts no expectation on the *content* of
this header. Auth systems (including TempAuth) are responsible for
defining the content of the header and taking action based on it.
In addition to the changes described above, this patch defines a format
to be used by TempAuth for these headers in the common.middleware.acl
module, in the methods format_v2_acl() and parse_v2_acl(). This patch
also teaches TempAuth to take action based on the header contents. TempAuth
now sets swift_owner=True if the user is on the Admin ACL, authorizes
GET/HEAD/OPTIONS requests if the user is on any ACL, authorizes
PUT/POST/DELETE requests if the user is on the admin or read-write ACL, etc.
Note that the action of setting swift_owner=True triggers core Swift to
add or strip the privileged headers from the responses. Core Swift (not
the auth system) is responsible for that.
DocImpact: Documentation for the new ACL usage and format appears in
summary form in doc/source/overview_auth.rst, and in more detail in
swift/common/middleware/tempauth.py in the TempAuth class docstring.
I leave it to the Swift doc team to determine whether more is needed.
Change-Id: I836a99eaaa6bb0e92dc03e1ca46a474522e6e826
2013-11-13 20:55:14 +00:00
|
|
|
class Utils(object):
|
2010-07-12 17:03:45 -05:00
|
|
|
@classmethod
|
|
|
|
def create_ascii_name(cls, length=None):
|
|
|
|
return uuid.uuid4().hex
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def create_utf8_name(cls, length=None):
|
2012-01-04 14:43:16 +08:00
|
|
|
if length is None:
|
2010-07-12 17:03:45 -05:00
|
|
|
length = 15
|
|
|
|
else:
|
|
|
|
length = int(length)
|
|
|
|
|
|
|
|
utf8_chars = u'\uF10F\uD20D\uB30B\u9409\u8508\u5605\u3703\u1801'\
|
|
|
|
u'\u0900\uF110\uD20E\uB30C\u940A\u8509\u5606\u3704'\
|
|
|
|
u'\u1802\u0901\uF111\uD20F\uB30D\u940B\u850A\u5607'\
|
|
|
|
u'\u3705\u1803\u0902\uF112\uD210\uB30E\u940C\u850B'\
|
|
|
|
u'\u5608\u3706\u1804\u0903\u03A9\u2603'
|
2019-03-01 12:13:27 -08:00
|
|
|
ustr = u''.join([random.choice(utf8_chars)
|
|
|
|
for x in range(length)])
|
|
|
|
if six.PY2:
|
|
|
|
return ustr.encode('utf-8')
|
|
|
|
return ustr
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
create_name = create_ascii_name
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2016-07-25 13:50:24 +01:00
|
|
|
class BaseEnv(object):
|
|
|
|
account = conn = None
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
|
|
|
cls.conn = Connection(tf.config)
|
|
|
|
cls.conn.authenticate()
|
|
|
|
cls.account = Account(cls.conn, tf.config.get('account',
|
|
|
|
tf.config['username']))
|
|
|
|
cls.account.delete_containers()
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def tearDown(cls):
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2015-12-16 15:28:25 +00:00
|
|
|
class Base(unittest2.TestCase):
|
2016-07-25 13:50:24 +01:00
|
|
|
env = BaseEnv
|
|
|
|
|
Add checksum to object extended attributes
Currently, our integrity checking for objects is pretty weak when it
comes to object metadata. If the extended attributes on a .data or
.meta file get corrupted in such a way that we can still unpickle it,
we don't have anything that detects that.
This could be especially bad with encrypted etags; if the encrypted
etag (X-Object-Sysmeta-Crypto-Etag or whatever it is) gets some bits
flipped, then we'll cheerfully decrypt the cipherjunk into plainjunk,
then send it to the client. Net effect is that the client sees a GET
response with an ETag that doesn't match the MD5 of the object *and*
Swift has no way of detecting and quarantining this object.
Note that, with an unencrypted object, if the ETag metadatum gets
mangled, then the object will be quarantined by the object server or
auditor, whichever notices first.
As part of this commit, I also ripped out some mocking of
getxattr/setxattr in tests. It appears to be there to allow unit tests
to run on systems where /tmp doesn't support xattrs. However, since
the mock is keyed off of inode number and inode numbers get re-used,
there's lots of leakage between different test runs. On a real FS,
unlinking a file and then creating a new one of the same name will
also reset the xattrs; this isn't the case with the mock.
The mock was pretty old; Ubuntu 12.04 and up all support xattrs in
/tmp, and recent Red Hat / CentOS releases do too. The xattr mock was
added in 2011; maybe it was to support Ubuntu Lucid Lynx?
Bonus: now you can pause a test with the debugger, inspect its files
in /tmp, and actually see the xattrs along with the data.
Since this patch now uses a real filesystem for testing filesystem
operations, tests are skipped if the underlying filesystem does not
support setting xattrs (eg tmpfs or more than 4k of xattrs on ext4).
References to "/tmp" have been replaced with calls to
tempfile.gettempdir(). This will allow setting the TMPDIR envvar in
test setup and getting an XFS filesystem instead of ext4 or tmpfs.
THIS PATCH SIGNIFICANTLY CHANGES TESTING ENVIRONMENTS
With this patch, every test environment will require TMPDIR to be
using a filesystem that supports at least 4k of extended attributes.
Neither ext4 nor tempfs support this. XFS is recommended.
So why all the SkipTests? Why not simply raise an error? We still need
the tests to run on the base image for OpenStack's CI system. Since
we were previously mocking out xattr, there wasn't a problem, but we
also weren't actually testing anything. This patch adds functionality
to validate xattr data, so we need to drop the mock.
`test.unit.skip_if_no_xattrs()` is also imported into `test.functional`
so that functional tests can import it from the functional test
namespace.
The related OpenStack CI infrastructure changes are made in
https://review.openstack.org/#/c/394600/.
Co-Authored-By: John Dickinson <me@not.mn>
Change-Id: I98a37c0d451f4960b7a12f648e4405c6c6716808
2016-06-30 16:52:58 -07:00
|
|
|
@classmethod
|
|
|
|
def tearDownClass(cls):
|
|
|
|
cls.env.tearDown()
|
|
|
|
|
2016-07-25 13:50:24 +01:00
|
|
|
@classmethod
|
|
|
|
def setUpClass(cls):
|
|
|
|
cls.env.setUp()
|
|
|
|
|
Add checksum to object extended attributes
Currently, our integrity checking for objects is pretty weak when it
comes to object metadata. If the extended attributes on a .data or
.meta file get corrupted in such a way that we can still unpickle it,
we don't have anything that detects that.
This could be especially bad with encrypted etags; if the encrypted
etag (X-Object-Sysmeta-Crypto-Etag or whatever it is) gets some bits
flipped, then we'll cheerfully decrypt the cipherjunk into plainjunk,
then send it to the client. Net effect is that the client sees a GET
response with an ETag that doesn't match the MD5 of the object *and*
Swift has no way of detecting and quarantining this object.
Note that, with an unencrypted object, if the ETag metadatum gets
mangled, then the object will be quarantined by the object server or
auditor, whichever notices first.
As part of this commit, I also ripped out some mocking of
getxattr/setxattr in tests. It appears to be there to allow unit tests
to run on systems where /tmp doesn't support xattrs. However, since
the mock is keyed off of inode number and inode numbers get re-used,
there's lots of leakage between different test runs. On a real FS,
unlinking a file and then creating a new one of the same name will
also reset the xattrs; this isn't the case with the mock.
The mock was pretty old; Ubuntu 12.04 and up all support xattrs in
/tmp, and recent Red Hat / CentOS releases do too. The xattr mock was
added in 2011; maybe it was to support Ubuntu Lucid Lynx?
Bonus: now you can pause a test with the debugger, inspect its files
in /tmp, and actually see the xattrs along with the data.
Since this patch now uses a real filesystem for testing filesystem
operations, tests are skipped if the underlying filesystem does not
support setting xattrs (eg tmpfs or more than 4k of xattrs on ext4).
References to "/tmp" have been replaced with calls to
tempfile.gettempdir(). This will allow setting the TMPDIR envvar in
test setup and getting an XFS filesystem instead of ext4 or tmpfs.
THIS PATCH SIGNIFICANTLY CHANGES TESTING ENVIRONMENTS
With this patch, every test environment will require TMPDIR to be
using a filesystem that supports at least 4k of extended attributes.
Neither ext4 nor tempfs support this. XFS is recommended.
So why all the SkipTests? Why not simply raise an error? We still need
the tests to run on the base image for OpenStack's CI system. Since
we were previously mocking out xattr, there wasn't a problem, but we
also weren't actually testing anything. This patch adds functionality
to validate xattr data, so we need to drop the mock.
`test.unit.skip_if_no_xattrs()` is also imported into `test.functional`
so that functional tests can import it from the functional test
namespace.
The related OpenStack CI infrastructure changes are made in
https://review.openstack.org/#/c/394600/.
Co-Authored-By: John Dickinson <me@not.mn>
Change-Id: I98a37c0d451f4960b7a12f648e4405c6c6716808
2016-06-30 16:52:58 -07:00
|
|
|
def setUp(self):
|
|
|
|
if tf.in_process:
|
|
|
|
tf.skip_if_no_xattrs()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def assert_body(self, body):
|
2019-03-01 12:13:27 -08:00
|
|
|
if not isinstance(body, bytes):
|
|
|
|
body = body.encode('utf-8')
|
2010-07-12 17:03:45 -05:00
|
|
|
response_body = self.env.conn.response.read()
|
2016-07-15 14:02:38 +02:00
|
|
|
self.assertEqual(response_body, body,
|
|
|
|
'Body returned: %s' % (response_body))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2010-09-02 21:50:16 -07:00
|
|
|
def assert_status(self, status_or_statuses):
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(
|
|
|
|
self.env.conn.response.status == status_or_statuses or
|
|
|
|
(hasattr(status_or_statuses, '__iter__') and
|
|
|
|
self.env.conn.response.status in status_or_statuses),
|
|
|
|
'Status returned: %d Expected: %s' %
|
|
|
|
(self.env.conn.response.status, status_or_statuses))
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2016-03-16 17:41:30 +00:00
|
|
|
def assert_header(self, header_name, expected_value):
|
|
|
|
try:
|
|
|
|
actual_value = self.env.conn.response.getheader(header_name)
|
|
|
|
except KeyError:
|
|
|
|
self.fail(
|
|
|
|
'Expected header name %r not found in response.' % header_name)
|
|
|
|
self.assertEqual(expected_value, actual_value)
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
class Base2(object):
|
2019-03-07 14:36:02 -08:00
|
|
|
@classmethod
|
|
|
|
def setUpClass(cls):
|
2010-07-12 17:03:45 -05:00
|
|
|
Utils.create_name = Utils.create_utf8_name
|
2019-03-07 14:36:02 -08:00
|
|
|
super(Base2, cls).setUpClass()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2019-03-07 14:36:02 -08:00
|
|
|
@classmethod
|
|
|
|
def tearDownClass(cls):
|
2010-07-12 17:03:45 -05:00
|
|
|
Utils.create_name = Utils.create_ascii_name
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2016-07-25 13:50:24 +01:00
|
|
|
class TestAccountEnv(BaseEnv):
|
2010-07-12 17:03:45 -05:00
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2016-07-25 13:50:24 +01:00
|
|
|
super(TestAccountEnv, cls).setUp()
|
2010-07-12 17:03:45 -05:00
|
|
|
cls.containers = []
|
|
|
|
for i in range(10):
|
|
|
|
cont = cls.account.container(Utils.create_name())
|
|
|
|
if not cont.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
cls.containers.append(cont)
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestAccountDev(Base):
|
|
|
|
env = TestAccountEnv
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestAccountDevUTF8(Base2, TestAccountDev):
|
2016-07-25 13:50:24 +01:00
|
|
|
pass
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestAccount(Base):
|
|
|
|
env = TestAccountEnv
|
|
|
|
|
|
|
|
def testNoAuthToken(self):
|
|
|
|
self.assertRaises(ResponseError, self.env.account.info,
|
2012-09-03 23:30:52 +08:00
|
|
|
cfg={'no_auth_token': True})
|
2010-09-02 21:50:16 -07:00
|
|
|
self.assert_status([401, 412])
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
self.assertRaises(ResponseError, self.env.account.containers,
|
2012-09-03 23:30:52 +08:00
|
|
|
cfg={'no_auth_token': True})
|
2010-09-02 21:50:16 -07:00
|
|
|
self.assert_status([401, 412])
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testInvalidUTF8Path(self):
|
2019-03-01 12:13:27 -08:00
|
|
|
valid_utf8 = Utils.create_utf8_name()
|
|
|
|
if six.PY2:
|
|
|
|
invalid_utf8 = valid_utf8[::-1]
|
|
|
|
else:
|
|
|
|
invalid_utf8 = (valid_utf8.encode('utf8')[::-1]).decode(
|
|
|
|
'utf-8', 'surrogateescape')
|
2010-07-12 17:03:45 -05:00
|
|
|
container = self.env.account.container(invalid_utf8)
|
2015-07-28 10:31:54 +01:00
|
|
|
self.assertFalse(container.create(cfg={'no_path_quote': True}))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
2013-01-12 06:54:17 +00:00
|
|
|
self.assert_body('Invalid UTF8 or contains NULL')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testVersionOnlyPath(self):
|
|
|
|
self.env.account.conn.make_request('PUT',
|
2012-09-03 23:30:52 +08:00
|
|
|
cfg={'version_only_path': True})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
|
|
|
self.assert_body('Bad URL')
|
|
|
|
|
2010-10-29 13:30:34 -07:00
|
|
|
def testInvalidPath(self):
|
2018-06-12 11:55:31 -07:00
|
|
|
was_path = self.env.account.conn.storage_path
|
2013-03-04 23:38:48 +02:00
|
|
|
if (normalized_urls):
|
2018-06-12 11:55:31 -07:00
|
|
|
self.env.account.conn.storage_path = '/'
|
2013-03-04 23:38:48 +02:00
|
|
|
else:
|
2018-06-12 11:55:31 -07:00
|
|
|
self.env.account.conn.storage_path = "/%s" % was_path
|
2010-10-29 13:30:34 -07:00
|
|
|
try:
|
2018-06-12 11:55:31 -07:00
|
|
|
self.env.account.conn.make_request('GET')
|
2010-10-29 13:30:34 -07:00
|
|
|
self.assert_status(404)
|
|
|
|
finally:
|
2018-06-12 11:55:31 -07:00
|
|
|
self.env.account.conn.storage_path = was_path
|
2010-10-29 13:30:34 -07:00
|
|
|
|
2016-05-13 16:43:50 -05:00
|
|
|
def testPUTError(self):
|
|
|
|
if load_constraint('allow_account_management'):
|
|
|
|
raise SkipTest("Allow account management is enabled")
|
2010-07-12 17:03:45 -05:00
|
|
|
self.env.account.conn.make_request('PUT')
|
2010-09-10 13:40:43 -07:00
|
|
|
self.assert_status([403, 405])
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testAccountHead(self):
|
|
|
|
try_count = 0
|
|
|
|
while try_count < 5:
|
|
|
|
try_count += 1
|
|
|
|
|
|
|
|
info = self.env.account.info()
|
|
|
|
for field in ['object_count', 'container_count', 'bytes_used']:
|
2016-07-15 14:02:38 +02:00
|
|
|
self.assertGreaterEqual(info[field], 0)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
if info['container_count'] == len(self.env.containers):
|
|
|
|
break
|
|
|
|
|
|
|
|
if try_count < 5:
|
|
|
|
time.sleep(1)
|
|
|
|
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(info['container_count'], len(self.env.containers))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(204)
|
|
|
|
|
|
|
|
def testContainerSerializedInfo(self):
|
|
|
|
container_info = {}
|
|
|
|
for container in self.env.containers:
|
|
|
|
info = {'bytes': 0}
|
|
|
|
info['count'] = random.randint(10, 30)
|
|
|
|
for i in range(info['count']):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = container.file(Utils.create_name())
|
2010-07-12 17:03:45 -05:00
|
|
|
bytes = random.randint(1, 32768)
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.write_random(bytes)
|
2010-07-12 17:03:45 -05:00
|
|
|
info['bytes'] += bytes
|
|
|
|
|
|
|
|
container_info[container.name] = info
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in ['json', 'xml']:
|
|
|
|
for a in self.env.account.containers(
|
2013-08-31 20:25:25 -04:00
|
|
|
parms={'format': format_type}):
|
2016-07-15 14:02:38 +02:00
|
|
|
self.assertGreaterEqual(a['count'], 0)
|
|
|
|
self.assertGreaterEqual(a['bytes'], 0)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2019-03-01 12:43:42 -08:00
|
|
|
headers = dict((k.lower(), v)
|
|
|
|
for k, v in self.env.conn.response.getheaders())
|
2013-08-04 11:15:53 +08:00
|
|
|
if format_type == 'json':
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(headers['content-type'],
|
|
|
|
'application/json; charset=utf-8')
|
2013-08-04 11:15:53 +08:00
|
|
|
elif format_type == 'xml':
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(headers['content-type'],
|
|
|
|
'application/xml; charset=utf-8')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testListingLimit(self):
|
2012-09-05 20:49:50 -07:00
|
|
|
limit = load_constraint('account_listing_limit')
|
2012-09-03 23:30:52 +08:00
|
|
|
for l in (1, 100, limit / 2, limit - 1, limit, limit + 1, limit * 2):
|
|
|
|
p = {'limit': l}
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
if l <= limit:
|
2016-07-15 14:02:38 +02:00
|
|
|
self.assertLessEqual(len(self.env.account.containers(parms=p)),
|
|
|
|
l)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(200)
|
|
|
|
else:
|
|
|
|
self.assertRaises(ResponseError,
|
2012-09-03 23:30:52 +08:00
|
|
|
self.env.account.containers, parms=p)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
|
|
|
|
|
|
|
def testContainerListing(self):
|
|
|
|
a = sorted([c.name for c in self.env.containers])
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
|
|
|
b = self.env.account.containers(parms={'format': format_type})
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
if isinstance(b[0], dict):
|
|
|
|
b = [x['name'] for x in b]
|
|
|
|
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(a, b)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2015-11-24 14:47:30 +00:00
|
|
|
def testListDelimiter(self):
|
|
|
|
delimiter = '-'
|
|
|
|
containers = ['test', delimiter.join(['test', 'bar']),
|
|
|
|
delimiter.join(['test', 'foo'])]
|
|
|
|
for c in containers:
|
|
|
|
cont = self.env.account.container(c)
|
|
|
|
self.assertTrue(cont.create())
|
|
|
|
|
|
|
|
results = self.env.account.containers(parms={'delimiter': delimiter})
|
|
|
|
expected = ['test', 'test-']
|
|
|
|
results = [r for r in results if r in expected]
|
|
|
|
self.assertEqual(expected, results)
|
|
|
|
|
|
|
|
results = self.env.account.containers(parms={'delimiter': delimiter,
|
|
|
|
'reverse': 'yes'})
|
|
|
|
expected.reverse()
|
|
|
|
results = [r for r in results if r in expected]
|
|
|
|
self.assertEqual(expected, results)
|
|
|
|
|
2018-10-11 15:23:39 -07:00
|
|
|
def testListMultiCharDelimiter(self):
|
|
|
|
delimiter = '-&'
|
|
|
|
containers = ['test', delimiter.join(['test', 'bar']),
|
|
|
|
delimiter.join(['test', 'foo'])]
|
|
|
|
for c in containers:
|
|
|
|
cont = self.env.account.container(c)
|
|
|
|
self.assertTrue(cont.create())
|
|
|
|
|
|
|
|
results = self.env.account.containers(parms={'delimiter': delimiter})
|
|
|
|
expected = ['test', 'test-&']
|
|
|
|
results = [r for r in results if r in expected]
|
|
|
|
self.assertEqual(expected, results)
|
|
|
|
|
|
|
|
results = self.env.account.containers(parms={'delimiter': delimiter,
|
|
|
|
'reverse': 'yes'})
|
|
|
|
expected.reverse()
|
|
|
|
results = [r for r in results if r in expected]
|
|
|
|
self.assertEqual(expected, results)
|
|
|
|
|
2015-11-24 14:47:30 +00:00
|
|
|
def testListDelimiterAndPrefix(self):
|
|
|
|
delimiter = 'a'
|
|
|
|
containers = ['bar', 'bazar']
|
|
|
|
for c in containers:
|
|
|
|
cont = self.env.account.container(c)
|
|
|
|
self.assertTrue(cont.create())
|
|
|
|
|
|
|
|
results = self.env.account.containers(parms={'delimiter': delimiter,
|
|
|
|
'prefix': 'ba'})
|
|
|
|
expected = ['bar', 'baza']
|
|
|
|
results = [r for r in results if r in expected]
|
|
|
|
self.assertEqual(expected, results)
|
|
|
|
|
|
|
|
results = self.env.account.containers(parms={'delimiter': delimiter,
|
|
|
|
'prefix': 'ba',
|
|
|
|
'reverse': 'yes'})
|
|
|
|
expected.reverse()
|
|
|
|
results = [r for r in results if r in expected]
|
|
|
|
self.assertEqual(expected, results)
|
|
|
|
|
Support last modified on listing containers
For now, last modified timestamp is supported only on
object listing. (i.e. GET container)
For example:
GET container with json format results in like as:
[{"hash": "d41d8cd98f00b204e9800998ecf8427e", "last_modified":
"2015-06-10T04:58:23.460230", "bytes": 0, "name": "object",
"content_type": "application/octet-stream"}]
However, container listing (i.e. GET account) shows just a dict
consists of ("name", "bytes", "name") for each container.
For example:
GET accounts with json format result in like as:
[{"count": 0, "bytes": 0, "name": "container"}]
This patch is for supporting last_modified key in the container
listing results as well as object listing like as:
[{"count": 0, "bytes": 0, "name": "container", "last_modified":
"2015-06-10T04:58:23.460230"}]
This patch is changing just output for listing. The original
timestamp to show the last modified is already in container table
of account.db as a "put_timestamp" column.
Note that this patch *DOESN'T* change the put_timestamp semantics.
i.e. the last_modified timestamp will be changed only at both PUT
container and POST container.
(PUT object doesn't affect the timestamp)
Note that the tuple format of returning value from
swift.account.backend.AccountBroker.list_containers is now
(name, object_count, bytes_used, put_timestamp, 0)
* put_timestamp is added *
Original discussion was in working session at Vancouver Summit.
Etherpads are around here:
https://etherpad.openstack.org/p/liberty-swift-contributors-meetup
https://etherpad.openstack.org/p/liberty-container-listing-update
DocImpact
Change-Id: Iba0503916f1481a20c59ae9136436f40183e4c5b
2014-12-15 20:45:41 -08:00
|
|
|
def testContainerListingLastModified(self):
|
|
|
|
expected = {}
|
|
|
|
for container in self.env.containers:
|
|
|
|
res = container.info()
|
|
|
|
expected[container.name] = time.mktime(
|
|
|
|
parsedate(res['last_modified']))
|
|
|
|
|
|
|
|
for format_type in ['json', 'xml']:
|
|
|
|
actual = {}
|
|
|
|
containers = self.env.account.containers(
|
|
|
|
parms={'format': format_type})
|
|
|
|
if isinstance(containers[0], dict):
|
|
|
|
for container in containers:
|
|
|
|
self.assertIn('name', container) # sanity
|
|
|
|
self.assertIn('last_modified', container) # sanity
|
|
|
|
# ceil by hand (wants easier way!)
|
|
|
|
datetime_str, micro_sec_str = \
|
|
|
|
container['last_modified'].split('.')
|
|
|
|
timestamp = time.mktime(
|
|
|
|
time.strptime(datetime_str,
|
|
|
|
"%Y-%m-%dT%H:%M:%S"))
|
|
|
|
if int(micro_sec_str):
|
|
|
|
timestamp += 1
|
|
|
|
actual[container['name']] = timestamp
|
|
|
|
|
|
|
|
self.assertEqual(expected, actual)
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def testInvalidAuthToken(self):
|
|
|
|
hdrs = {'X-Auth-Token': 'bogus_auth_token'}
|
|
|
|
self.assertRaises(ResponseError, self.env.account.info, hdrs=hdrs)
|
|
|
|
self.assert_status(401)
|
|
|
|
|
|
|
|
def testLastContainerMarker(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
2019-03-01 12:13:27 -08:00
|
|
|
containers = self.env.account.containers(parms={
|
|
|
|
'format': format_type})
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(len(containers), len(self.env.containers))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(200)
|
|
|
|
|
2019-03-07 14:36:02 -08:00
|
|
|
marker = (containers[-1] if format_type is None
|
|
|
|
else containers[-1]['name'])
|
2010-07-12 17:03:45 -05:00
|
|
|
containers = self.env.account.containers(
|
2019-03-07 14:36:02 -08:00
|
|
|
parms={'format': format_type, 'marker': marker})
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(len(containers), 0)
|
2013-08-04 11:15:53 +08:00
|
|
|
if format_type is None:
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(204)
|
|
|
|
else:
|
|
|
|
self.assert_status(200)
|
|
|
|
|
|
|
|
def testMarkerLimitContainerList(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
2012-09-03 23:30:52 +08:00
|
|
|
for marker in ['0', 'A', 'I', 'R', 'Z', 'a', 'i', 'r', 'z',
|
|
|
|
'abc123', 'mnop', 'xyz']:
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
limit = random.randint(2, 9)
|
|
|
|
containers = self.env.account.containers(
|
2013-08-04 11:15:53 +08:00
|
|
|
parms={'format': format_type,
|
|
|
|
'marker': marker,
|
|
|
|
'limit': limit})
|
2016-07-15 14:02:38 +02:00
|
|
|
self.assertLessEqual(len(containers), limit)
|
2010-07-12 17:03:45 -05:00
|
|
|
if containers:
|
2012-09-03 23:30:52 +08:00
|
|
|
if isinstance(containers[0], dict):
|
|
|
|
containers = [x['name'] for x in containers]
|
2016-07-15 14:02:38 +02:00
|
|
|
self.assertGreater(locale.strcoll(containers[0], marker),
|
|
|
|
0)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testContainersOrderedByName(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
2010-07-12 17:03:45 -05:00
|
|
|
containers = self.env.account.containers(
|
2013-08-04 11:15:53 +08:00
|
|
|
parms={'format': format_type})
|
2010-07-12 17:03:45 -05:00
|
|
|
if isinstance(containers[0], dict):
|
|
|
|
containers = [x['name'] for x in containers]
|
2019-03-01 12:13:27 -08:00
|
|
|
self.assertEqual(sorted(containers, key=locale.strxfrm),
|
2014-02-26 17:48:33 +08:00
|
|
|
containers)
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2014-06-06 11:46:41 -07:00
|
|
|
def testQuotedWWWAuthenticateHeader(self):
|
2014-09-12 10:20:19 +01:00
|
|
|
# check that the www-authenticate header value with the swift realm
|
|
|
|
# is correctly quoted.
|
2014-06-06 11:46:41 -07:00
|
|
|
conn = Connection(tf.config)
|
|
|
|
conn.authenticate()
|
|
|
|
inserted_html = '<b>Hello World'
|
|
|
|
hax = 'AUTH_haxx"\nContent-Length: %d\n\n%s' % (len(inserted_html),
|
|
|
|
inserted_html)
|
2015-10-08 15:03:52 +02:00
|
|
|
quoted_hax = urllib.parse.quote(hax)
|
2014-06-06 11:46:41 -07:00
|
|
|
conn.connection.request('GET', '/v1/' + quoted_hax, None, {})
|
|
|
|
resp = conn.connection.getresponse()
|
2019-04-17 13:11:33 -07:00
|
|
|
|
|
|
|
resp_headers = {}
|
|
|
|
for h, v in resp.getheaders():
|
|
|
|
h = h.lower()
|
|
|
|
if h in resp_headers:
|
|
|
|
# py2 would do this for us, but py3 apparently keeps them
|
|
|
|
# separate? Not sure which I like more...
|
|
|
|
resp_headers[h] += ',' + v
|
|
|
|
else:
|
|
|
|
resp_headers[h] = v
|
|
|
|
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn('www-authenticate', resp_headers)
|
2014-09-11 10:23:32 +01:00
|
|
|
actual = resp_headers['www-authenticate']
|
|
|
|
expected = 'Swift realm="%s"' % quoted_hax
|
2014-09-12 10:20:19 +01:00
|
|
|
# other middleware e.g. auth_token may also set www-authenticate
|
|
|
|
# headers in which case actual values will be a comma separated list.
|
|
|
|
# check that expected value is among the actual values
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(expected, actual)
|
2014-06-06 11:46:41 -07:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
class TestAccountUTF8(Base2, TestAccount):
|
2016-07-25 13:50:24 +01:00
|
|
|
pass
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestAccountNoContainers(Base):
|
|
|
|
def testGetRequest(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
2015-07-28 10:31:54 +01:00
|
|
|
self.assertFalse(self.env.account.containers(
|
2013-08-04 11:15:53 +08:00
|
|
|
parms={'format': format_type}))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
if format_type is None:
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(204)
|
|
|
|
else:
|
|
|
|
self.assert_status(200)
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestAccountNoContainersUTF8(Base2, TestAccountNoContainers):
|
2016-07-25 13:50:24 +01:00
|
|
|
pass
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2016-07-25 13:50:24 +01:00
|
|
|
class TestAccountSortingEnv(BaseEnv):
|
2014-09-11 16:51:51 +10:00
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2016-07-25 13:50:24 +01:00
|
|
|
super(TestAccountSortingEnv, cls).setUp()
|
2014-09-11 16:51:51 +10:00
|
|
|
postfix = Utils.create_name()
|
|
|
|
cls.cont_items = ('a1', 'a2', 'A3', 'b1', 'B2', 'a10', 'b10', 'zz')
|
|
|
|
cls.cont_items = ['%s%s' % (x, postfix) for x in cls.cont_items]
|
|
|
|
|
|
|
|
for container in cls.cont_items:
|
|
|
|
c = cls.account.container(container)
|
|
|
|
if not c.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
|
|
|
|
class TestAccountSorting(Base):
|
|
|
|
env = TestAccountSortingEnv
|
|
|
|
|
|
|
|
def testAccountContainerListSorting(self):
|
2015-11-24 14:47:30 +00:00
|
|
|
# name (byte order) sorting.
|
|
|
|
cont_list = sorted(self.env.cont_items)
|
|
|
|
for reverse in ('false', 'no', 'off', '', 'garbage'):
|
|
|
|
cont_listing = self.env.account.containers(
|
|
|
|
parms={'reverse': reverse})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(cont_list, cont_listing,
|
|
|
|
'Expected %s but got %s with reverse param %r'
|
|
|
|
% (cont_list, cont_listing, reverse))
|
|
|
|
|
|
|
|
def testAccountContainerListSortingReverse(self):
|
2014-09-11 16:51:51 +10:00
|
|
|
# name (byte order) sorting.
|
|
|
|
cont_list = sorted(self.env.cont_items)
|
|
|
|
cont_list.reverse()
|
2015-11-24 14:47:30 +00:00
|
|
|
for reverse in ('true', '1', 'yes', 'on', 't', 'y'):
|
|
|
|
cont_listing = self.env.account.containers(
|
|
|
|
parms={'reverse': reverse})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(cont_list, cont_listing,
|
|
|
|
'Expected %s but got %s with reverse param %r'
|
|
|
|
% (cont_list, cont_listing, reverse))
|
2014-09-11 16:51:51 +10:00
|
|
|
|
|
|
|
def testAccountContainerListSortingByPrefix(self):
|
|
|
|
cont_list = sorted(c for c in self.env.cont_items if c.startswith('a'))
|
|
|
|
cont_list.reverse()
|
|
|
|
cont_listing = self.env.account.containers(parms={
|
|
|
|
'reverse': 'on', 'prefix': 'a'})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(cont_list, cont_listing)
|
|
|
|
|
|
|
|
def testAccountContainerListSortingByMarkersExclusive(self):
|
|
|
|
first_item = self.env.cont_items[3] # 'b1' + postfix
|
|
|
|
last_item = self.env.cont_items[4] # 'B2' + postfix
|
|
|
|
|
|
|
|
cont_list = sorted(c for c in self.env.cont_items
|
|
|
|
if last_item < c < first_item)
|
|
|
|
cont_list.reverse()
|
|
|
|
cont_listing = self.env.account.containers(parms={
|
|
|
|
'reverse': 'on', 'marker': first_item, 'end_marker': last_item})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(cont_list, cont_listing)
|
|
|
|
|
|
|
|
def testAccountContainerListSortingByMarkersInclusive(self):
|
|
|
|
first_item = self.env.cont_items[3] # 'b1' + postfix
|
|
|
|
last_item = self.env.cont_items[4] # 'B2' + postfix
|
|
|
|
|
|
|
|
cont_list = sorted(c for c in self.env.cont_items
|
|
|
|
if last_item <= c <= first_item)
|
|
|
|
cont_list.reverse()
|
|
|
|
cont_listing = self.env.account.containers(parms={
|
|
|
|
'reverse': 'on', 'marker': first_item + '\x00',
|
|
|
|
'end_marker': last_item[:-1] + chr(ord(last_item[-1]) - 1)})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(cont_list, cont_listing)
|
|
|
|
|
|
|
|
def testAccountContainerListSortingByReversedMarkers(self):
|
|
|
|
cont_listing = self.env.account.containers(parms={
|
|
|
|
'reverse': 'on', 'marker': 'B', 'end_marker': 'b1'})
|
|
|
|
self.assert_status(204)
|
|
|
|
self.assertEqual([], cont_listing)
|
|
|
|
|
|
|
|
|
2016-07-25 13:50:24 +01:00
|
|
|
class TestContainerEnv(BaseEnv):
|
2010-07-12 17:03:45 -05:00
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2016-07-25 13:50:24 +01:00
|
|
|
super(TestContainerEnv, cls).setUp()
|
2010-07-12 17:03:45 -05:00
|
|
|
cls.container = cls.account.container(Utils.create_name())
|
|
|
|
if not cls.container.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
cls.file_count = 10
|
|
|
|
cls.file_size = 128
|
|
|
|
cls.files = list()
|
|
|
|
for x in range(cls.file_count):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cls.container.file(Utils.create_name())
|
|
|
|
file_item.write_random(cls.file_size)
|
|
|
|
cls.files.append(file_item.name)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestContainerDev(Base):
|
|
|
|
env = TestContainerEnv
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestContainerDevUTF8(Base2, TestContainerDev):
|
2016-07-25 13:50:24 +01:00
|
|
|
pass
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestContainer(Base):
|
|
|
|
env = TestContainerEnv
|
|
|
|
|
|
|
|
def testContainerNameLimit(self):
|
2012-09-05 20:49:50 -07:00
|
|
|
limit = load_constraint('max_container_name_length')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
for l in (limit - 100, limit - 10, limit - 1, limit,
|
|
|
|
limit + 1, limit + 10, limit + 100):
|
|
|
|
cont = self.env.account.container('a' * l)
|
2010-07-12 17:03:45 -05:00
|
|
|
if l <= limit:
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(cont.create())
|
2017-12-21 10:43:39 -08:00
|
|
|
self.assert_status((201, 202))
|
2010-07-12 17:03:45 -05:00
|
|
|
else:
|
2015-07-28 10:31:54 +01:00
|
|
|
self.assertFalse(cont.create())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(400)
|
|
|
|
|
|
|
|
def testFileThenContainerDelete(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(cont.create())
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cont.file(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.write_random())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.delete())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(204)
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertNotIn(file_item.name, cont.files())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(cont.delete())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(204)
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertNotIn(cont.name, self.env.account.containers())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testFileListingLimitMarkerPrefix(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(cont.create())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2015-05-25 18:28:02 +02:00
|
|
|
files = sorted([Utils.create_name() for x in range(10)])
|
2010-07-12 17:03:45 -05:00
|
|
|
for f in files:
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cont.file(f)
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.write_random())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2015-05-25 18:28:02 +02:00
|
|
|
for i in range(len(files)):
|
2010-07-12 17:03:45 -05:00
|
|
|
f = files[i]
|
2015-05-25 18:28:02 +02:00
|
|
|
for j in range(1, len(files) - i):
|
2016-07-15 14:02:38 +02:00
|
|
|
self.assertEqual(cont.files(parms={'limit': j, 'marker': f}),
|
|
|
|
files[i + 1: i + j + 1])
|
|
|
|
self.assertEqual(cont.files(parms={'marker': f}), files[i + 1:])
|
|
|
|
self.assertEqual(cont.files(parms={'marker': f, 'prefix': f}), [])
|
|
|
|
self.assertEqual(cont.files(parms={'prefix': f}), [f])
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testPrefixAndLimit(self):
|
2012-09-05 20:49:50 -07:00
|
|
|
load_constraint('container_listing_limit')
|
2010-07-12 17:03:45 -05:00
|
|
|
cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(cont.create())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
prefix_file_count = 10
|
|
|
|
limit_count = 2
|
|
|
|
prefixs = ['alpha/', 'beta/', 'kappa/']
|
|
|
|
prefix_files = {}
|
|
|
|
|
|
|
|
for prefix in prefixs:
|
|
|
|
prefix_files[prefix] = []
|
|
|
|
|
|
|
|
for i in range(prefix_file_count):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cont.file(prefix + Utils.create_name())
|
|
|
|
file_item.write()
|
|
|
|
prefix_files[prefix].append(file_item.name)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
2010-07-12 17:03:45 -05:00
|
|
|
for prefix in prefixs:
|
2016-08-02 21:50:45 -07:00
|
|
|
files = cont.files(parms={'prefix': prefix,
|
|
|
|
'format': format_type})
|
|
|
|
if isinstance(files[0], dict):
|
|
|
|
files = [x.get('name', x.get('subdir')) for x in files]
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(files, sorted(prefix_files[prefix]))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
2010-07-12 17:03:45 -05:00
|
|
|
for prefix in prefixs:
|
2012-09-03 23:30:52 +08:00
|
|
|
files = cont.files(parms={'limit': limit_count,
|
2016-08-02 21:50:45 -07:00
|
|
|
'prefix': prefix,
|
|
|
|
'format': format_type})
|
|
|
|
if isinstance(files[0], dict):
|
|
|
|
files = [x.get('name', x.get('subdir')) for x in files]
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(len(files), limit_count)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in files:
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.startswith(prefix))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2015-07-14 16:56:44 -07:00
|
|
|
def testListDelimiter(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
2015-08-16 10:34:26 +02:00
|
|
|
self.assertTrue(cont.create())
|
2015-07-14 16:56:44 -07:00
|
|
|
|
|
|
|
delimiter = '-'
|
|
|
|
files = ['test', delimiter.join(['test', 'bar']),
|
|
|
|
delimiter.join(['test', 'foo'])]
|
|
|
|
for f in files:
|
|
|
|
file_item = cont.file(f)
|
2015-08-16 10:34:26 +02:00
|
|
|
self.assertTrue(file_item.write_random())
|
2015-07-14 16:56:44 -07:00
|
|
|
|
2016-08-02 21:50:45 -07:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
|
|
|
results = cont.files(parms={'format': format_type})
|
|
|
|
if isinstance(results[0], dict):
|
|
|
|
results = [x.get('name', x.get('subdir')) for x in results]
|
|
|
|
self.assertEqual(results, ['test', 'test-bar', 'test-foo'])
|
|
|
|
|
|
|
|
results = cont.files(parms={'delimiter': delimiter,
|
|
|
|
'format': format_type})
|
|
|
|
if isinstance(results[0], dict):
|
|
|
|
results = [x.get('name', x.get('subdir')) for x in results]
|
|
|
|
self.assertEqual(results, ['test', 'test-'])
|
|
|
|
|
|
|
|
results = cont.files(parms={'delimiter': delimiter,
|
|
|
|
'format': format_type,
|
|
|
|
'reverse': 'yes'})
|
|
|
|
if isinstance(results[0], dict):
|
|
|
|
results = [x.get('name', x.get('subdir')) for x in results]
|
|
|
|
self.assertEqual(results, ['test-', 'test'])
|
2015-11-24 14:47:30 +00:00
|
|
|
|
2018-10-11 15:23:39 -07:00
|
|
|
def testListMultiCharDelimiter(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
|
|
|
self.assertTrue(cont.create())
|
|
|
|
|
|
|
|
delimiter = '-&'
|
|
|
|
files = ['test', delimiter.join(['test', 'bar']),
|
|
|
|
delimiter.join(['test', 'foo'])]
|
|
|
|
for f in files:
|
|
|
|
file_item = cont.file(f)
|
|
|
|
self.assertTrue(file_item.write_random())
|
|
|
|
|
|
|
|
for format_type in [None, 'json', 'xml']:
|
|
|
|
results = cont.files(parms={'format': format_type})
|
|
|
|
if isinstance(results[0], dict):
|
|
|
|
results = [x.get('name', x.get('subdir')) for x in results]
|
|
|
|
self.assertEqual(results, ['test', 'test-&bar', 'test-&foo'])
|
|
|
|
|
|
|
|
results = cont.files(parms={'delimiter': delimiter,
|
|
|
|
'format': format_type})
|
|
|
|
if isinstance(results[0], dict):
|
|
|
|
results = [x.get('name', x.get('subdir')) for x in results]
|
|
|
|
self.assertEqual(results, ['test', 'test-&'])
|
|
|
|
|
|
|
|
results = cont.files(parms={'delimiter': delimiter,
|
|
|
|
'format': format_type,
|
|
|
|
'reverse': 'yes'})
|
|
|
|
if isinstance(results[0], dict):
|
|
|
|
results = [x.get('name', x.get('subdir')) for x in results]
|
|
|
|
self.assertEqual(results, ['test-&', 'test'])
|
|
|
|
|
2015-07-14 16:56:44 -07:00
|
|
|
def testListDelimiterAndPrefix(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
2015-08-16 10:34:26 +02:00
|
|
|
self.assertTrue(cont.create())
|
2015-07-14 16:56:44 -07:00
|
|
|
|
|
|
|
delimiter = 'a'
|
|
|
|
files = ['bar', 'bazar']
|
|
|
|
for f in files:
|
|
|
|
file_item = cont.file(f)
|
2015-08-16 10:34:26 +02:00
|
|
|
self.assertTrue(file_item.write_random())
|
2015-07-14 16:56:44 -07:00
|
|
|
|
|
|
|
results = cont.files(parms={'delimiter': delimiter, 'prefix': 'ba'})
|
|
|
|
self.assertEqual(results, ['bar', 'baza'])
|
|
|
|
|
2015-11-24 14:47:30 +00:00
|
|
|
results = cont.files(parms={'delimiter': delimiter,
|
|
|
|
'prefix': 'ba',
|
|
|
|
'reverse': 'yes'})
|
|
|
|
self.assertEqual(results, ['baza', 'bar'])
|
|
|
|
|
2015-07-15 14:22:45 -07:00
|
|
|
def testLeadingDelimiter(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
|
|
|
self.assertTrue(cont.create())
|
|
|
|
|
|
|
|
delimiter = '/'
|
|
|
|
files = ['test', delimiter.join(['', 'test', 'bar']),
|
|
|
|
delimiter.join(['', 'test', 'bar', 'foo'])]
|
|
|
|
for f in files:
|
|
|
|
file_item = cont.file(f)
|
|
|
|
self.assertTrue(file_item.write_random())
|
|
|
|
|
|
|
|
results = cont.files(parms={'delimiter': delimiter})
|
|
|
|
self.assertEqual(results, [delimiter, 'test'])
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def testCreate(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(cont.create())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(cont.name, self.env.account.containers())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testContainerFileListOnContainerThatDoesNotExist(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
2010-07-12 17:03:45 -05:00
|
|
|
container = self.env.account.container(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, container.files,
|
2013-08-04 11:15:53 +08:00
|
|
|
parms={'format': format_type})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
def testUtf8Container(self):
|
|
|
|
valid_utf8 = Utils.create_utf8_name()
|
2019-03-01 12:13:27 -08:00
|
|
|
if six.PY2:
|
|
|
|
invalid_utf8 = valid_utf8[::-1]
|
|
|
|
else:
|
|
|
|
invalid_utf8 = (valid_utf8.encode('utf8')[::-1]).decode(
|
|
|
|
'utf-8', 'surrogateescape')
|
2010-07-12 17:03:45 -05:00
|
|
|
container = self.env.account.container(valid_utf8)
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(container.create(cfg={'no_path_quote': True}))
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(container.name, self.env.account.containers())
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(container.files(), [])
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(container.delete())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
container = self.env.account.container(invalid_utf8)
|
2015-07-28 10:31:54 +01:00
|
|
|
self.assertFalse(container.create(cfg={'no_path_quote': True}))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
|
|
|
self.assertRaises(ResponseError, container.files,
|
2012-09-03 23:30:52 +08:00
|
|
|
cfg={'no_path_quote': True})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
|
|
|
|
|
|
|
def testCreateOnExisting(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(cont.create())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(cont.create())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(202)
|
|
|
|
|
|
|
|
def testSlashInName(self):
|
2019-03-01 12:13:27 -08:00
|
|
|
if six.PY2:
|
|
|
|
cont_name = list(Utils.create_name().decode('utf-8'))
|
2010-07-12 17:03:45 -05:00
|
|
|
else:
|
|
|
|
cont_name = list(Utils.create_name())
|
2012-09-03 23:30:52 +08:00
|
|
|
cont_name[random.randint(2, len(cont_name) - 2)] = '/'
|
2010-07-12 17:03:45 -05:00
|
|
|
cont_name = ''.join(cont_name)
|
2019-03-01 12:13:27 -08:00
|
|
|
if six.PY2:
|
2010-07-12 17:03:45 -05:00
|
|
|
cont_name = cont_name.encode('utf-8')
|
|
|
|
|
|
|
|
cont = self.env.account.container(cont_name)
|
2015-07-28 10:31:54 +01:00
|
|
|
self.assertFalse(cont.create(cfg={'no_path_quote': True}),
|
|
|
|
'created container with name %s' % (cont_name))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertNotIn(cont.name, self.env.account.containers())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testDelete(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(cont.create())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(cont.delete())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(204)
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertNotIn(cont.name, self.env.account.containers())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testDeleteOnContainerThatDoesNotExist(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
2015-07-28 10:31:54 +01:00
|
|
|
self.assertFalse(cont.delete())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
def testDeleteOnContainerWithFiles(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(cont.create())
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cont.file(Utils.create_name())
|
|
|
|
file_item.write_random(self.env.file_size)
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(file_item.name, cont.files())
|
2015-07-28 10:31:54 +01:00
|
|
|
self.assertFalse(cont.delete())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(409)
|
|
|
|
|
|
|
|
def testFileCreateInContainerThatDoesNotExist(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = File(self.env.conn, self.env.account, Utils.create_name(),
|
2013-08-31 20:25:25 -04:00
|
|
|
Utils.create_name())
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.write)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
def testLastFileMarker(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
2019-03-01 12:13:27 -08:00
|
|
|
files = self.env.container.files(parms={'format': format_type})
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(len(files), len(self.env.files))
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(200)
|
|
|
|
|
2019-03-07 14:36:02 -08:00
|
|
|
marker = files[-1] if format_type is None else files[-1]['name']
|
2010-07-12 17:03:45 -05:00
|
|
|
files = self.env.container.files(
|
2019-03-07 14:36:02 -08:00
|
|
|
parms={'format': format_type, 'marker': marker})
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(len(files), 0)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
if format_type is None:
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(204)
|
|
|
|
else:
|
|
|
|
self.assert_status(200)
|
|
|
|
|
|
|
|
def testContainerFileList(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
|
|
|
files = self.env.container.files(parms={'format': format_type})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(200)
|
|
|
|
if isinstance(files[0], dict):
|
|
|
|
files = [x['name'] for x in files]
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in self.env.files:
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(file_item, files)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in files:
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(file_item, self.env.files)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2016-06-07 13:41:55 +01:00
|
|
|
def _testContainerFormattedFileList(self, format_type):
|
|
|
|
expected = {}
|
|
|
|
for name in self.env.files:
|
|
|
|
expected[name] = self.env.container.file(name).info()
|
|
|
|
|
|
|
|
file_list = self.env.container.files(parms={'format': format_type})
|
|
|
|
self.assert_status(200)
|
|
|
|
for actual in file_list:
|
|
|
|
name = actual['name']
|
|
|
|
self.assertIn(name, expected)
|
|
|
|
self.assertEqual(expected[name]['etag'], actual['hash'])
|
|
|
|
self.assertEqual(
|
|
|
|
expected[name]['content_type'], actual['content_type'])
|
|
|
|
self.assertEqual(
|
|
|
|
expected[name]['content_length'], actual['bytes'])
|
|
|
|
expected.pop(name)
|
|
|
|
self.assertFalse(expected) # sanity check
|
|
|
|
|
|
|
|
def testContainerJsonFileList(self):
|
|
|
|
self._testContainerFormattedFileList('json')
|
|
|
|
|
|
|
|
def testContainerXmlFileList(self):
|
|
|
|
self._testContainerFormattedFileList('xml')
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def testMarkerLimitFileList(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
2012-09-03 23:30:52 +08:00
|
|
|
for marker in ['0', 'A', 'I', 'R', 'Z', 'a', 'i', 'r', 'z',
|
|
|
|
'abc123', 'mnop', 'xyz']:
|
|
|
|
limit = random.randint(2, self.env.file_count - 1)
|
2013-08-04 11:15:53 +08:00
|
|
|
files = self.env.container.files(parms={'format': format_type,
|
2012-09-03 23:30:52 +08:00
|
|
|
'marker': marker,
|
|
|
|
'limit': limit})
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
if not files:
|
|
|
|
continue
|
|
|
|
|
|
|
|
if isinstance(files[0], dict):
|
|
|
|
files = [x['name'] for x in files]
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2016-07-15 14:02:38 +02:00
|
|
|
self.assertLessEqual(len(files), limit)
|
2010-07-12 17:03:45 -05:00
|
|
|
if files:
|
|
|
|
if isinstance(files[0], dict):
|
|
|
|
files = [x['name'] for x in files]
|
2016-07-15 14:02:38 +02:00
|
|
|
self.assertGreater(locale.strcoll(files[0], marker), 0)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testFileOrder(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
|
|
|
files = self.env.container.files(parms={'format': format_type})
|
2010-07-12 17:03:45 -05:00
|
|
|
if isinstance(files[0], dict):
|
|
|
|
files = [x['name'] for x in files]
|
2019-03-01 12:13:27 -08:00
|
|
|
self.assertEqual(sorted(files, key=locale.strxfrm), files)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testContainerInfo(self):
|
|
|
|
info = self.env.container.info()
|
|
|
|
self.assert_status(204)
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(info['object_count'], self.env.file_count)
|
|
|
|
self.assertEqual(info['bytes_used'],
|
|
|
|
self.env.file_count * self.env.file_size)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testContainerInfoOnContainerThatDoesNotExist(self):
|
|
|
|
container = self.env.account.container(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, container.info)
|
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
def testContainerFileListWithLimit(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in [None, 'json', 'xml']:
|
|
|
|
files = self.env.container.files(parms={'format': format_type,
|
2012-09-03 23:30:52 +08:00
|
|
|
'limit': 2})
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(len(files), 2)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testContainerExistenceCachingProblem(self):
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, cont.files)
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(cont.create())
|
2019-03-01 12:13:27 -08:00
|
|
|
self.assertEqual(cont.files(), [])
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
cont = self.env.account.container(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, cont.files)
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(cont.create())
|
2019-03-01 12:13:27 -08:00
|
|
|
# NB: no GET! Make sure the PUT cleared the cached 404
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cont.file(Utils.create_name())
|
|
|
|
file_item.write_random()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2015-07-02 00:35:02 -07:00
|
|
|
def testContainerLastModified(self):
|
|
|
|
container = self.env.account.container(Utils.create_name())
|
|
|
|
self.assertTrue(container.create())
|
|
|
|
info = container.info()
|
|
|
|
t0 = info['last_modified']
|
|
|
|
# last modified header is in date format which supports in second
|
|
|
|
# so we need to wait to increment a sec in the header.
|
|
|
|
eventlet.sleep(1)
|
|
|
|
|
|
|
|
# POST container change last modified timestamp
|
|
|
|
self.assertTrue(
|
|
|
|
container.update_metadata({'x-container-meta-japan': 'mitaka'}))
|
|
|
|
info = container.info()
|
|
|
|
t1 = info['last_modified']
|
|
|
|
self.assertNotEqual(t0, t1)
|
|
|
|
eventlet.sleep(1)
|
|
|
|
|
|
|
|
# PUT container (overwrite) also change last modified
|
|
|
|
self.assertTrue(container.create())
|
|
|
|
info = container.info()
|
|
|
|
t2 = info['last_modified']
|
|
|
|
self.assertNotEqual(t1, t2)
|
|
|
|
eventlet.sleep(1)
|
|
|
|
|
|
|
|
# PUT object doesn't change container last modified timestamp
|
|
|
|
obj = container.file(Utils.create_name())
|
|
|
|
self.assertTrue(
|
2019-03-01 12:13:27 -08:00
|
|
|
obj.write(b"aaaaa", hdrs={'Content-Type': 'text/plain'}))
|
2015-07-02 00:35:02 -07:00
|
|
|
info = container.info()
|
|
|
|
t3 = info['last_modified']
|
|
|
|
self.assertEqual(t2, t3)
|
|
|
|
|
|
|
|
# POST object also doesn't change container last modified timestamp
|
|
|
|
self.assertTrue(
|
|
|
|
obj.sync_metadata({'us': 'austin'}))
|
|
|
|
info = container.info()
|
|
|
|
t4 = info['last_modified']
|
|
|
|
self.assertEqual(t2, t4)
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestContainerUTF8(Base2, TestContainer):
|
2016-07-25 13:50:24 +01:00
|
|
|
pass
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2016-07-25 13:50:24 +01:00
|
|
|
class TestContainerSortingEnv(BaseEnv):
|
2014-09-11 16:51:51 +10:00
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2016-07-25 13:50:24 +01:00
|
|
|
super(TestContainerSortingEnv, cls).setUp()
|
2014-09-11 16:51:51 +10:00
|
|
|
cls.container = cls.account.container(Utils.create_name())
|
|
|
|
if not cls.container.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
cls.file_items = ('a1', 'a2', 'A3', 'b1', 'B2', 'a10', 'b10', 'zz')
|
|
|
|
cls.files = list()
|
|
|
|
cls.file_size = 128
|
|
|
|
for name in cls.file_items:
|
|
|
|
file_item = cls.container.file(name)
|
|
|
|
file_item.write_random(cls.file_size)
|
|
|
|
cls.files.append(file_item.name)
|
|
|
|
|
|
|
|
|
|
|
|
class TestContainerSorting(Base):
|
|
|
|
env = TestContainerSortingEnv
|
|
|
|
|
|
|
|
def testContainerFileListSortingReversed(self):
|
|
|
|
file_list = list(sorted(self.env.file_items))
|
|
|
|
file_list.reverse()
|
2015-11-24 14:47:30 +00:00
|
|
|
for reverse in ('true', '1', 'yes', 'on', 't', 'y'):
|
|
|
|
cont_files = self.env.container.files(parms={'reverse': reverse})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(file_list, cont_files,
|
|
|
|
'Expected %s but got %s with reverse param %r'
|
|
|
|
% (file_list, cont_files, reverse))
|
2014-09-11 16:51:51 +10:00
|
|
|
|
|
|
|
def testContainerFileSortingByPrefixReversed(self):
|
|
|
|
cont_list = sorted(c for c in self.env.file_items if c.startswith('a'))
|
|
|
|
cont_list.reverse()
|
|
|
|
cont_listing = self.env.container.files(parms={
|
|
|
|
'reverse': 'on', 'prefix': 'a'})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(cont_list, cont_listing)
|
|
|
|
|
|
|
|
def testContainerFileSortingByMarkersExclusiveReversed(self):
|
|
|
|
first_item = self.env.file_items[3] # 'b1' + postfix
|
|
|
|
last_item = self.env.file_items[4] # 'B2' + postfix
|
|
|
|
|
|
|
|
cont_list = sorted(c for c in self.env.file_items
|
|
|
|
if last_item < c < first_item)
|
|
|
|
cont_list.reverse()
|
|
|
|
cont_listing = self.env.container.files(parms={
|
|
|
|
'reverse': 'on', 'marker': first_item, 'end_marker': last_item})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(cont_list, cont_listing)
|
|
|
|
|
|
|
|
def testContainerFileSortingByMarkersInclusiveReversed(self):
|
|
|
|
first_item = self.env.file_items[3] # 'b1' + postfix
|
|
|
|
last_item = self.env.file_items[4] # 'B2' + postfix
|
|
|
|
|
|
|
|
cont_list = sorted(c for c in self.env.file_items
|
|
|
|
if last_item <= c <= first_item)
|
|
|
|
cont_list.reverse()
|
|
|
|
cont_listing = self.env.container.files(parms={
|
|
|
|
'reverse': 'on', 'marker': first_item + '\x00',
|
|
|
|
'end_marker': last_item[:-1] + chr(ord(last_item[-1]) - 1)})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(cont_list, cont_listing)
|
|
|
|
|
|
|
|
def testContainerFileSortingByReversedMarkersReversed(self):
|
|
|
|
cont_listing = self.env.container.files(parms={
|
|
|
|
'reverse': 'on', 'marker': 'B', 'end_marker': 'b1'})
|
|
|
|
self.assert_status(204)
|
|
|
|
self.assertEqual([], cont_listing)
|
|
|
|
|
|
|
|
def testContainerFileListSorting(self):
|
|
|
|
file_list = list(sorted(self.env.file_items))
|
|
|
|
cont_files = self.env.container.files()
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(file_list, cont_files)
|
|
|
|
|
|
|
|
# Lets try again but with reverse is specifically turned off
|
|
|
|
cont_files = self.env.container.files(parms={'reverse': 'off'})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(file_list, cont_files)
|
|
|
|
|
|
|
|
cont_files = self.env.container.files(parms={'reverse': 'false'})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(file_list, cont_files)
|
|
|
|
|
|
|
|
cont_files = self.env.container.files(parms={'reverse': 'no'})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(file_list, cont_files)
|
|
|
|
|
|
|
|
cont_files = self.env.container.files(parms={'reverse': ''})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(file_list, cont_files)
|
|
|
|
|
|
|
|
# Lets try again but with a incorrect reverse values
|
|
|
|
cont_files = self.env.container.files(parms={'reverse': 'foo'})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(file_list, cont_files)
|
|
|
|
|
|
|
|
cont_files = self.env.container.files(parms={'reverse': 'hai'})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(file_list, cont_files)
|
|
|
|
|
|
|
|
cont_files = self.env.container.files(parms={'reverse': 'o=[]::::>'})
|
|
|
|
self.assert_status(200)
|
|
|
|
self.assertEqual(file_list, cont_files)
|
|
|
|
|
|
|
|
|
2016-07-25 13:50:24 +01:00
|
|
|
class TestContainerPathsEnv(BaseEnv):
|
2010-07-12 17:03:45 -05:00
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2016-07-25 13:50:24 +01:00
|
|
|
super(TestContainerPathsEnv, cls).setUp()
|
2010-07-12 17:03:45 -05:00
|
|
|
cls.file_size = 8
|
|
|
|
|
|
|
|
cls.container = cls.account.container(Utils.create_name())
|
|
|
|
if not cls.container.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
cls.files = [
|
|
|
|
'/file1',
|
|
|
|
'/file A',
|
|
|
|
'/dir1/',
|
|
|
|
'/dir2/',
|
|
|
|
'/dir1/file2',
|
|
|
|
'/dir1/subdir1/',
|
|
|
|
'/dir1/subdir2/',
|
|
|
|
'/dir1/subdir1/file2',
|
|
|
|
'/dir1/subdir1/file3',
|
|
|
|
'/dir1/subdir1/file4',
|
|
|
|
'/dir1/subdir1/subsubdir1/',
|
|
|
|
'/dir1/subdir1/subsubdir1/file5',
|
|
|
|
'/dir1/subdir1/subsubdir1/file6',
|
|
|
|
'/dir1/subdir1/subsubdir1/file7',
|
|
|
|
'/dir1/subdir1/subsubdir1/file8',
|
|
|
|
'/dir1/subdir1/subsubdir2/',
|
|
|
|
'/dir1/subdir1/subsubdir2/file9',
|
|
|
|
'/dir1/subdir1/subsubdir2/file0',
|
|
|
|
'file1',
|
|
|
|
'dir1/',
|
|
|
|
'dir2/',
|
|
|
|
'dir1/file2',
|
|
|
|
'dir1/subdir1/',
|
|
|
|
'dir1/subdir2/',
|
|
|
|
'dir1/subdir1/file2',
|
|
|
|
'dir1/subdir1/file3',
|
|
|
|
'dir1/subdir1/file4',
|
|
|
|
'dir1/subdir1/subsubdir1/',
|
|
|
|
'dir1/subdir1/subsubdir1/file5',
|
|
|
|
'dir1/subdir1/subsubdir1/file6',
|
|
|
|
'dir1/subdir1/subsubdir1/file7',
|
|
|
|
'dir1/subdir1/subsubdir1/file8',
|
|
|
|
'dir1/subdir1/subsubdir2/',
|
|
|
|
'dir1/subdir1/subsubdir2/file9',
|
|
|
|
'dir1/subdir1/subsubdir2/file0',
|
|
|
|
'dir1/subdir with spaces/',
|
|
|
|
'dir1/subdir with spaces/file B',
|
|
|
|
'dir1/subdir+with{whatever/',
|
|
|
|
'dir1/subdir+with{whatever/file D',
|
|
|
|
]
|
|
|
|
|
2013-03-04 23:38:48 +02:00
|
|
|
stored_files = set()
|
2010-07-12 17:03:45 -05:00
|
|
|
for f in cls.files:
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cls.container.file(f)
|
2010-07-12 17:03:45 -05:00
|
|
|
if f.endswith('/'):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.write(hdrs={'Content-Type': 'application/directory'})
|
2010-07-12 17:03:45 -05:00
|
|
|
else:
|
2013-08-31 20:25:25 -04:00
|
|
|
file_item.write_random(cls.file_size,
|
|
|
|
hdrs={'Content-Type':
|
|
|
|
'application/directory'})
|
2013-03-04 23:38:48 +02:00
|
|
|
if (normalized_urls):
|
|
|
|
nfile = '/'.join(filter(None, f.split('/')))
|
|
|
|
if (f[-1] == '/'):
|
|
|
|
nfile += '/'
|
|
|
|
stored_files.add(nfile)
|
|
|
|
else:
|
|
|
|
stored_files.add(f)
|
|
|
|
cls.stored_files = sorted(stored_files)
|
|
|
|
|
2013-03-26 20:42:26 +00:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestContainerPaths(Base):
|
|
|
|
env = TestContainerPathsEnv
|
|
|
|
|
|
|
|
def testTraverseContainer(self):
|
|
|
|
found_files = []
|
|
|
|
found_dirs = []
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def recurse_path(path, count=0):
|
|
|
|
if count > 10:
|
|
|
|
raise ValueError('too deep recursion')
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in self.env.container.files(parms={'path': path}):
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.startswith(path))
|
2013-08-04 11:15:53 +08:00
|
|
|
if file_item.endswith('/'):
|
|
|
|
recurse_path(file_item, count + 1)
|
|
|
|
found_dirs.append(file_item)
|
2010-07-12 17:03:45 -05:00
|
|
|
else:
|
2013-08-04 11:15:53 +08:00
|
|
|
found_files.append(file_item)
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
recurse_path('')
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in self.env.stored_files:
|
|
|
|
if file_item.startswith('/'):
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertNotIn(file_item, found_dirs)
|
|
|
|
self.assertNotIn(file_item, found_files)
|
2013-08-04 11:15:53 +08:00
|
|
|
elif file_item.endswith('/'):
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(file_item, found_dirs)
|
|
|
|
self.assertNotIn(file_item, found_files)
|
2010-07-12 17:03:45 -05:00
|
|
|
else:
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(file_item, found_files)
|
|
|
|
self.assertNotIn(file_item, found_dirs)
|
2013-03-04 23:38:48 +02:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
found_files = []
|
|
|
|
found_dirs = []
|
|
|
|
recurse_path('/')
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in self.env.stored_files:
|
|
|
|
if not file_item.startswith('/'):
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertNotIn(file_item, found_dirs)
|
|
|
|
self.assertNotIn(file_item, found_files)
|
2013-08-04 11:15:53 +08:00
|
|
|
elif file_item.endswith('/'):
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(file_item, found_dirs)
|
|
|
|
self.assertNotIn(file_item, found_files)
|
2010-07-12 17:03:45 -05:00
|
|
|
else:
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(file_item, found_files)
|
|
|
|
self.assertNotIn(file_item, found_dirs)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testContainerListing(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in (None, 'json', 'xml'):
|
|
|
|
files = self.env.container.files(parms={'format': format_type})
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
if isinstance(files[0], dict):
|
|
|
|
files = [str(x['name']) for x in files]
|
|
|
|
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(files, self.env.stored_files)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in ('json', 'xml'):
|
|
|
|
for file_item in self.env.container.files(parms={'format':
|
|
|
|
format_type}):
|
2016-07-15 14:02:38 +02:00
|
|
|
self.assertGreaterEqual(int(file_item['bytes']), 0)
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn('last_modified', file_item)
|
2013-08-04 11:15:53 +08:00
|
|
|
if file_item['name'].endswith('/'):
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(file_item['content_type'],
|
|
|
|
'application/directory')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testStructure(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
def assert_listing(path, file_list):
|
2012-09-03 23:30:52 +08:00
|
|
|
files = self.env.container.files(parms={'path': path})
|
2019-03-01 12:13:27 -08:00
|
|
|
self.assertEqual(sorted(file_list, key=locale.strxfrm), files)
|
2013-03-04 23:38:48 +02:00
|
|
|
if not normalized_urls:
|
|
|
|
assert_listing('/', ['/dir1/', '/dir2/', '/file1', '/file A'])
|
|
|
|
assert_listing('/dir1',
|
|
|
|
['/dir1/file2', '/dir1/subdir1/', '/dir1/subdir2/'])
|
|
|
|
assert_listing('/dir1/',
|
|
|
|
['/dir1/file2', '/dir1/subdir1/', '/dir1/subdir2/'])
|
|
|
|
assert_listing('/dir1/subdir1',
|
|
|
|
['/dir1/subdir1/subsubdir2/', '/dir1/subdir1/file2',
|
|
|
|
'/dir1/subdir1/file3', '/dir1/subdir1/file4',
|
|
|
|
'/dir1/subdir1/subsubdir1/'])
|
|
|
|
assert_listing('/dir1/subdir2', [])
|
|
|
|
assert_listing('', ['file1', 'dir1/', 'dir2/'])
|
|
|
|
else:
|
|
|
|
assert_listing('', ['file1', 'dir1/', 'dir2/', 'file A'])
|
2010-07-12 17:03:45 -05:00
|
|
|
assert_listing('dir1', ['dir1/file2', 'dir1/subdir1/',
|
2012-09-03 23:30:52 +08:00
|
|
|
'dir1/subdir2/', 'dir1/subdir with spaces/',
|
|
|
|
'dir1/subdir+with{whatever/'])
|
2010-07-12 17:03:45 -05:00
|
|
|
assert_listing('dir1/subdir1',
|
2012-09-03 23:30:52 +08:00
|
|
|
['dir1/subdir1/file4', 'dir1/subdir1/subsubdir2/',
|
|
|
|
'dir1/subdir1/file2', 'dir1/subdir1/file3',
|
|
|
|
'dir1/subdir1/subsubdir1/'])
|
2010-07-12 17:03:45 -05:00
|
|
|
assert_listing('dir1/subdir1/subsubdir1',
|
2012-09-03 23:30:52 +08:00
|
|
|
['dir1/subdir1/subsubdir1/file7',
|
|
|
|
'dir1/subdir1/subsubdir1/file5',
|
|
|
|
'dir1/subdir1/subsubdir1/file8',
|
|
|
|
'dir1/subdir1/subsubdir1/file6'])
|
2010-07-12 17:03:45 -05:00
|
|
|
assert_listing('dir1/subdir1/subsubdir1/',
|
2012-09-03 23:30:52 +08:00
|
|
|
['dir1/subdir1/subsubdir1/file7',
|
|
|
|
'dir1/subdir1/subsubdir1/file5',
|
|
|
|
'dir1/subdir1/subsubdir1/file8',
|
|
|
|
'dir1/subdir1/subsubdir1/file6'])
|
2010-07-12 17:03:45 -05:00
|
|
|
assert_listing('dir1/subdir with spaces/',
|
2012-09-03 23:30:52 +08:00
|
|
|
['dir1/subdir with spaces/file B'])
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2016-07-25 13:50:24 +01:00
|
|
|
class TestFileEnv(BaseEnv):
|
2010-07-12 17:03:45 -05:00
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2016-07-25 13:50:24 +01:00
|
|
|
super(TestFileEnv, cls).setUp()
|
2018-02-20 14:15:31 -08:00
|
|
|
if not tf.skip2:
|
|
|
|
# creating another account and connection
|
|
|
|
# for account to account copy tests
|
|
|
|
config2 = deepcopy(tf.config)
|
|
|
|
config2['account'] = tf.config['account2']
|
|
|
|
config2['username'] = tf.config['username2']
|
|
|
|
config2['password'] = tf.config['password2']
|
|
|
|
cls.conn2 = Connection(config2)
|
|
|
|
cls.conn2.authenticate()
|
|
|
|
|
|
|
|
cls.account2 = cls.conn2.get_account()
|
|
|
|
cls.account2.delete_containers()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
cls.container = cls.account.container(Utils.create_name())
|
|
|
|
if not cls.container.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
cls.file_size = 128
|
|
|
|
|
2015-07-16 09:35:37 +09:00
|
|
|
# With keystoneauth we need the accounts to have had the project
|
|
|
|
# domain id persisted as sysmeta prior to testing ACLs. This may
|
|
|
|
# not be the case if, for example, the account was created using
|
|
|
|
# a request with reseller_admin role, when project domain id may
|
|
|
|
# not have been known. So we ensure that the project domain id is
|
|
|
|
# in sysmeta by making a POST to the accounts using an admin role.
|
|
|
|
cls.account.update_metadata()
|
2018-02-20 14:15:31 -08:00
|
|
|
if not tf.skip2:
|
|
|
|
cls.account2.update_metadata()
|
2015-07-16 09:35:37 +09:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestFileDev(Base):
|
|
|
|
env = TestFileEnv
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestFileDevUTF8(Base2, TestFileDev):
|
2016-07-25 13:50:24 +01:00
|
|
|
pass
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestFile(Base):
|
|
|
|
env = TestFileEnv
|
|
|
|
|
2017-04-13 11:16:54 +01:00
|
|
|
def testGetResponseHeaders(self):
|
2019-03-01 12:13:27 -08:00
|
|
|
obj_data = b'test_body'
|
2017-04-13 11:16:54 +01:00
|
|
|
|
|
|
|
def do_test(put_hdrs, get_hdrs, expected_hdrs, unexpected_hdrs):
|
|
|
|
filename = Utils.create_name()
|
|
|
|
file_item = self.env.container.file(filename)
|
|
|
|
resp = file_item.write(
|
|
|
|
data=obj_data, hdrs=put_hdrs, return_resp=True)
|
|
|
|
|
|
|
|
# put then get an object
|
|
|
|
resp.read()
|
|
|
|
read_data = file_item.read(hdrs=get_hdrs)
|
|
|
|
self.assertEqual(obj_data, read_data) # sanity check
|
|
|
|
resp_headers = file_item.conn.response.getheaders()
|
|
|
|
|
|
|
|
# check the *list* of all header (name, value) pairs rather than
|
|
|
|
# constructing a dict in case of repeated names in the list
|
|
|
|
errors = []
|
|
|
|
for k, v in resp_headers:
|
|
|
|
if k.lower() in unexpected_hdrs:
|
|
|
|
errors.append('Found unexpected header %s: %s' % (k, v))
|
|
|
|
for k, v in expected_hdrs.items():
|
2019-03-01 12:43:42 -08:00
|
|
|
matches = [hdr for hdr in resp_headers if hdr[0].lower() == k]
|
2017-04-13 11:16:54 +01:00
|
|
|
if not matches:
|
|
|
|
errors.append('Missing expected header %s' % k)
|
|
|
|
for (got_k, got_v) in matches:
|
2019-08-09 15:06:47 -05:00
|
|
|
# The Connection: header is parsed by cluster's LB and may
|
|
|
|
# be returned in either original lowercase or camel-cased.
|
|
|
|
if k == 'connection':
|
|
|
|
got_v = got_v.lower()
|
2017-04-13 11:16:54 +01:00
|
|
|
if got_v != v:
|
|
|
|
errors.append('Expected %s but got %s for %s' %
|
|
|
|
(v, got_v, k))
|
|
|
|
if errors:
|
|
|
|
self.fail(
|
|
|
|
'Errors in response headers:\n %s' % '\n '.join(errors))
|
|
|
|
|
|
|
|
put_headers = {'X-Object-Meta-Fruit': 'Banana',
|
|
|
|
'X-Delete-After': '10000',
|
|
|
|
'Content-Type': 'application/test'}
|
|
|
|
expected_headers = {'content-length': str(len(obj_data)),
|
|
|
|
'x-object-meta-fruit': 'Banana',
|
|
|
|
'accept-ranges': 'bytes',
|
|
|
|
'content-type': 'application/test',
|
|
|
|
'etag': hashlib.md5(obj_data).hexdigest(),
|
|
|
|
'last-modified': mock.ANY,
|
|
|
|
'date': mock.ANY,
|
|
|
|
'x-delete-at': mock.ANY,
|
|
|
|
'x-trans-id': mock.ANY,
|
|
|
|
'x-openstack-request-id': mock.ANY}
|
|
|
|
unexpected_headers = ['connection', 'x-delete-after']
|
|
|
|
do_test(put_headers, {}, expected_headers, unexpected_headers)
|
|
|
|
|
|
|
|
get_headers = {'Connection': 'keep-alive'}
|
|
|
|
expected_headers['connection'] = 'keep-alive'
|
|
|
|
unexpected_headers = ['x-delete-after']
|
|
|
|
do_test(put_headers, get_headers, expected_headers, unexpected_headers)
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def testCopy(self):
|
2013-11-18 13:17:48 -08:00
|
|
|
# makes sure to test encoded characters
|
2011-10-19 09:21:14 -05:00
|
|
|
source_filename = 'dealde%2Fl04 011e%204c8df/flash.png'
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(source_filename)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
metadata = {}
|
2016-07-06 11:27:58 +01:00
|
|
|
metadata[Utils.create_ascii_name()] = Utils.create_name()
|
|
|
|
put_headers = {'Content-Type': 'application/test',
|
|
|
|
'Content-Encoding': 'gzip',
|
|
|
|
'Content-Disposition': 'attachment; filename=myfile'}
|
|
|
|
file_item.metadata = metadata
|
|
|
|
data = file_item.write_random(hdrs=put_headers)
|
|
|
|
|
|
|
|
# the allowed headers are configurable in object server, so we cannot
|
|
|
|
# assert that content-encoding and content-disposition get *copied*
|
|
|
|
# unless they were successfully set on the original PUT, so populate
|
|
|
|
# expected_headers by making a HEAD on the original object
|
|
|
|
file_item.initialize()
|
|
|
|
self.assertEqual('application/test', file_item.content_type)
|
|
|
|
resp_headers = dict(file_item.conn.response.getheaders())
|
|
|
|
expected_headers = {}
|
|
|
|
for k, v in put_headers.items():
|
|
|
|
if k.lower() in resp_headers:
|
|
|
|
expected_headers[k] = v
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
dest_cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(dest_cont.create())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
# copy both from within and across containers
|
|
|
|
for cont in (self.env.container, dest_cont):
|
|
|
|
# copy both with and without initial slash
|
|
|
|
for prefix in ('', '/'):
|
|
|
|
dest_filename = Utils.create_name()
|
|
|
|
|
2016-07-06 11:27:58 +01:00
|
|
|
extra_hdrs = {'X-Object-Meta-Extra': 'fresh'}
|
|
|
|
self.assertTrue(file_item.copy(
|
|
|
|
'%s%s' % (prefix, cont), dest_filename, hdrs=extra_hdrs))
|
|
|
|
|
2016-06-02 16:09:18 +01:00
|
|
|
# verify container listing for copy
|
|
|
|
listing = cont.files(parms={'format': 'json'})
|
|
|
|
for obj in listing:
|
|
|
|
if obj['name'] == dest_filename:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
self.fail('Failed to find %s in listing' % dest_filename)
|
|
|
|
|
|
|
|
self.assertEqual(file_item.size, obj['bytes'])
|
|
|
|
self.assertEqual(file_item.etag, obj['hash'])
|
|
|
|
self.assertEqual(file_item.content_type, obj['content_type'])
|
2016-07-06 11:27:58 +01:00
|
|
|
|
|
|
|
file_copy = cont.file(dest_filename)
|
|
|
|
|
|
|
|
self.assertEqual(data, file_copy.read())
|
|
|
|
self.assertTrue(file_copy.initialize())
|
|
|
|
expected_metadata = dict(metadata)
|
|
|
|
# new metadata should be merged with existing
|
|
|
|
expected_metadata['extra'] = 'fresh'
|
|
|
|
self.assertDictEqual(expected_metadata, file_copy.metadata)
|
|
|
|
resp_headers = dict(file_copy.conn.response.getheaders())
|
|
|
|
for k, v in expected_headers.items():
|
|
|
|
self.assertIn(k.lower(), resp_headers)
|
|
|
|
self.assertEqual(v, resp_headers[k.lower()])
|
|
|
|
|
|
|
|
# repeat copy with updated content-type, content-encoding and
|
|
|
|
# content-disposition, which should get updated
|
|
|
|
extra_hdrs = {
|
|
|
|
'X-Object-Meta-Extra': 'fresher',
|
|
|
|
'Content-Type': 'application/test-changed',
|
|
|
|
'Content-Encoding': 'not_gzip',
|
|
|
|
'Content-Disposition': 'attachment; filename=notmyfile'}
|
|
|
|
self.assertTrue(file_item.copy(
|
|
|
|
'%s%s' % (prefix, cont), dest_filename, hdrs=extra_hdrs))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(dest_filename, cont.files())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2016-07-06 11:27:58 +01:00
|
|
|
file_copy = cont.file(dest_filename)
|
|
|
|
|
|
|
|
self.assertEqual(data, file_copy.read())
|
|
|
|
self.assertTrue(file_copy.initialize())
|
|
|
|
expected_metadata['extra'] = 'fresher'
|
|
|
|
self.assertDictEqual(expected_metadata, file_copy.metadata)
|
|
|
|
resp_headers = dict(file_copy.conn.response.getheaders())
|
|
|
|
# if k is in expected_headers then we can assert its new value
|
|
|
|
for k, v in expected_headers.items():
|
|
|
|
v = extra_hdrs.get(k, v)
|
|
|
|
self.assertIn(k.lower(), resp_headers)
|
|
|
|
self.assertEqual(v, resp_headers[k.lower()])
|
|
|
|
|
2016-06-02 16:09:18 +01:00
|
|
|
# verify container listing for copy
|
|
|
|
listing = cont.files(parms={'format': 'json'})
|
|
|
|
for obj in listing:
|
|
|
|
if obj['name'] == dest_filename:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
self.fail('Failed to find %s in listing' % dest_filename)
|
|
|
|
|
|
|
|
self.assertEqual(file_item.size, obj['bytes'])
|
|
|
|
self.assertEqual(file_item.etag, obj['hash'])
|
|
|
|
self.assertEqual(
|
|
|
|
'application/test-changed', obj['content_type'])
|
|
|
|
|
2016-07-06 11:27:58 +01:00
|
|
|
# repeat copy with X-Fresh-Metadata header - existing user
|
|
|
|
# metadata should not be copied, new completely replaces it.
|
|
|
|
extra_hdrs = {'Content-Type': 'application/test-updated',
|
|
|
|
'X-Object-Meta-Extra': 'fresher',
|
|
|
|
'X-Fresh-Metadata': 'true'}
|
|
|
|
self.assertTrue(file_item.copy(
|
|
|
|
'%s%s' % (prefix, cont), dest_filename, hdrs=extra_hdrs))
|
|
|
|
|
|
|
|
self.assertIn(dest_filename, cont.files())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2016-07-06 11:27:58 +01:00
|
|
|
file_copy = cont.file(dest_filename)
|
|
|
|
|
|
|
|
self.assertEqual(data, file_copy.read())
|
|
|
|
self.assertTrue(file_copy.initialize())
|
|
|
|
self.assertEqual('application/test-updated',
|
|
|
|
file_copy.content_type)
|
|
|
|
expected_metadata = {'extra': 'fresher'}
|
|
|
|
self.assertDictEqual(expected_metadata, file_copy.metadata)
|
|
|
|
resp_headers = dict(file_copy.conn.response.getheaders())
|
|
|
|
for k in ('Content-Disposition', 'Content-Encoding'):
|
|
|
|
self.assertNotIn(k.lower(), resp_headers)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2016-06-02 16:09:18 +01:00
|
|
|
# verify container listing for copy
|
|
|
|
listing = cont.files(parms={'format': 'json'})
|
|
|
|
for obj in listing:
|
|
|
|
if obj['name'] == dest_filename:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
self.fail('Failed to find %s in listing' % dest_filename)
|
|
|
|
|
|
|
|
self.assertEqual(file_item.size, obj['bytes'])
|
|
|
|
self.assertEqual(file_item.etag, obj['hash'])
|
|
|
|
self.assertEqual(
|
|
|
|
'application/test-updated', obj['content_type'])
|
|
|
|
|
|
|
|
def testCopyRange(self):
|
|
|
|
# makes sure to test encoded characters
|
|
|
|
source_filename = 'dealde%2Fl04 011e%204c8df/flash.png'
|
|
|
|
file_item = self.env.container.file(source_filename)
|
|
|
|
|
|
|
|
metadata = {Utils.create_ascii_name(): Utils.create_name()}
|
|
|
|
|
|
|
|
data = file_item.write_random(1024)
|
|
|
|
file_item.sync_metadata(metadata)
|
|
|
|
file_item.initialize()
|
|
|
|
|
|
|
|
dest_cont = self.env.account.container(Utils.create_name())
|
|
|
|
self.assertTrue(dest_cont.create())
|
|
|
|
|
|
|
|
expected_body = data[100:201]
|
|
|
|
expected_etag = hashlib.md5(expected_body)
|
|
|
|
# copy both from within and across containers
|
|
|
|
for cont in (self.env.container, dest_cont):
|
|
|
|
# copy both with and without initial slash
|
|
|
|
for prefix in ('', '/'):
|
|
|
|
dest_filename = Utils.create_name()
|
|
|
|
|
|
|
|
file_item.copy('%s%s' % (prefix, cont), dest_filename,
|
|
|
|
hdrs={'Range': 'bytes=100-200'})
|
|
|
|
self.assertEqual(201, file_item.conn.response.status)
|
|
|
|
|
|
|
|
# verify container listing for copy
|
|
|
|
listing = cont.files(parms={'format': 'json'})
|
|
|
|
for obj in listing:
|
|
|
|
if obj['name'] == dest_filename:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
self.fail('Failed to find %s in listing' % dest_filename)
|
|
|
|
|
|
|
|
self.assertEqual(101, obj['bytes'])
|
|
|
|
self.assertEqual(expected_etag.hexdigest(), obj['hash'])
|
|
|
|
self.assertEqual(file_item.content_type, obj['content_type'])
|
|
|
|
|
|
|
|
# verify copy object
|
|
|
|
copy_file_item = cont.file(dest_filename)
|
|
|
|
self.assertEqual(expected_body, copy_file_item.read())
|
|
|
|
self.assertTrue(copy_file_item.initialize())
|
|
|
|
self.assertEqual(metadata, copy_file_item.metadata)
|
|
|
|
|
2014-04-30 15:00:49 +03:00
|
|
|
def testCopyAccount(self):
|
|
|
|
# makes sure to test encoded characters
|
|
|
|
source_filename = 'dealde%2Fl04 011e%204c8df/flash.png'
|
|
|
|
file_item = self.env.container.file(source_filename)
|
|
|
|
|
|
|
|
metadata = {Utils.create_ascii_name(): Utils.create_name()}
|
|
|
|
|
|
|
|
data = file_item.write_random()
|
|
|
|
file_item.sync_metadata(metadata)
|
|
|
|
|
|
|
|
dest_cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(dest_cont.create())
|
2014-04-30 15:00:49 +03:00
|
|
|
|
|
|
|
acct = self.env.conn.account_name
|
|
|
|
# copy both from within and across containers
|
|
|
|
for cont in (self.env.container, dest_cont):
|
|
|
|
# copy both with and without initial slash
|
|
|
|
for prefix in ('', '/'):
|
|
|
|
dest_filename = Utils.create_name()
|
|
|
|
|
|
|
|
file_item = self.env.container.file(source_filename)
|
|
|
|
file_item.copy_account(acct,
|
|
|
|
'%s%s' % (prefix, cont),
|
|
|
|
dest_filename)
|
|
|
|
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(dest_filename, cont.files())
|
2014-04-30 15:00:49 +03:00
|
|
|
|
|
|
|
file_item = cont.file(dest_filename)
|
|
|
|
|
2016-07-15 14:02:38 +02:00
|
|
|
self.assertEqual(data, file_item.read())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.initialize())
|
2016-07-15 14:02:38 +02:00
|
|
|
self.assertEqual(metadata, file_item.metadata)
|
2014-04-30 15:00:49 +03:00
|
|
|
|
2018-02-20 14:15:31 -08:00
|
|
|
if not tf.skip2:
|
|
|
|
dest_cont = self.env.account2.container(Utils.create_name())
|
|
|
|
self.assertTrue(dest_cont.create(hdrs={
|
|
|
|
'X-Container-Write': self.env.conn.user_acl
|
|
|
|
}))
|
2014-04-30 15:00:49 +03:00
|
|
|
|
2018-02-20 14:15:31 -08:00
|
|
|
acct = self.env.conn2.account_name
|
|
|
|
# copy both with and without initial slash
|
|
|
|
for prefix in ('', '/'):
|
|
|
|
dest_filename = Utils.create_name()
|
2014-04-30 15:00:49 +03:00
|
|
|
|
2018-02-20 14:15:31 -08:00
|
|
|
file_item = self.env.container.file(source_filename)
|
|
|
|
file_item.copy_account(acct,
|
|
|
|
'%s%s' % (prefix, dest_cont),
|
|
|
|
dest_filename)
|
2014-04-30 15:00:49 +03:00
|
|
|
|
2018-02-20 14:15:31 -08:00
|
|
|
self.assertIn(dest_filename, dest_cont.files())
|
2014-04-30 15:00:49 +03:00
|
|
|
|
2018-02-20 14:15:31 -08:00
|
|
|
file_item = dest_cont.file(dest_filename)
|
2014-04-30 15:00:49 +03:00
|
|
|
|
2018-02-20 14:15:31 -08:00
|
|
|
self.assertEqual(data, file_item.read())
|
|
|
|
self.assertTrue(file_item.initialize())
|
|
|
|
self.assertEqual(metadata, file_item.metadata)
|
2014-04-30 15:00:49 +03:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def testCopy404s(self):
|
|
|
|
source_filename = Utils.create_name()
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(source_filename)
|
|
|
|
file_item.write_random()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
dest_cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(dest_cont.create())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
for prefix in ('', '/'):
|
|
|
|
# invalid source container
|
|
|
|
source_cont = self.env.account.container(Utils.create_name())
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = source_cont.file(source_filename)
|
Add functests for disallowed COPYs into a versioned container
While we're at it, have copy and copy_account raise ResponseErrors
on failure, similar to cluster_info, update_metadata, containers, info,
files, delete, initialize, read, sync_metadata, write, and post.
Related-Change: Ia8b92251718d10b1eb44a456f28d3d2569a30003
Change-Id: I9ef42d922a6b7dbf253f2f8f5df83965d8f47e0f
2017-03-28 23:26:05 +00:00
|
|
|
self.assertRaises(ResponseError, file_item.copy,
|
|
|
|
'%s%s' % (prefix, self.env.container),
|
|
|
|
Utils.create_name())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
Add functests for disallowed COPYs into a versioned container
While we're at it, have copy and copy_account raise ResponseErrors
on failure, similar to cluster_info, update_metadata, containers, info,
files, delete, initialize, read, sync_metadata, write, and post.
Related-Change: Ia8b92251718d10b1eb44a456f28d3d2569a30003
Change-Id: I9ef42d922a6b7dbf253f2f8f5df83965d8f47e0f
2017-03-28 23:26:05 +00:00
|
|
|
self.assertRaises(ResponseError, file_item.copy,
|
|
|
|
'%s%s' % (prefix, dest_cont),
|
|
|
|
Utils.create_name())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
# invalid source object
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
Add functests for disallowed COPYs into a versioned container
While we're at it, have copy and copy_account raise ResponseErrors
on failure, similar to cluster_info, update_metadata, containers, info,
files, delete, initialize, read, sync_metadata, write, and post.
Related-Change: Ia8b92251718d10b1eb44a456f28d3d2569a30003
Change-Id: I9ef42d922a6b7dbf253f2f8f5df83965d8f47e0f
2017-03-28 23:26:05 +00:00
|
|
|
self.assertRaises(ResponseError, file_item.copy,
|
|
|
|
'%s%s' % (prefix, self.env.container),
|
|
|
|
Utils.create_name())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
Add functests for disallowed COPYs into a versioned container
While we're at it, have copy and copy_account raise ResponseErrors
on failure, similar to cluster_info, update_metadata, containers, info,
files, delete, initialize, read, sync_metadata, write, and post.
Related-Change: Ia8b92251718d10b1eb44a456f28d3d2569a30003
Change-Id: I9ef42d922a6b7dbf253f2f8f5df83965d8f47e0f
2017-03-28 23:26:05 +00:00
|
|
|
self.assertRaises(ResponseError, file_item.copy,
|
|
|
|
'%s%s' % (prefix, dest_cont),
|
|
|
|
Utils.create_name())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
# invalid destination container
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(source_filename)
|
Add functests for disallowed COPYs into a versioned container
While we're at it, have copy and copy_account raise ResponseErrors
on failure, similar to cluster_info, update_metadata, containers, info,
files, delete, initialize, read, sync_metadata, write, and post.
Related-Change: Ia8b92251718d10b1eb44a456f28d3d2569a30003
Change-Id: I9ef42d922a6b7dbf253f2f8f5df83965d8f47e0f
2017-03-28 23:26:05 +00:00
|
|
|
self.assertRaises(ResponseError, file_item.copy,
|
|
|
|
'%s%s' % (prefix, Utils.create_name()),
|
|
|
|
Utils.create_name())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2014-04-30 15:00:49 +03:00
|
|
|
def testCopyAccount404s(self):
|
2018-02-20 14:15:31 -08:00
|
|
|
if tf.skip2:
|
|
|
|
raise SkipTest('Account2 not set')
|
2014-04-30 15:00:49 +03:00
|
|
|
acct = self.env.conn.account_name
|
|
|
|
acct2 = self.env.conn2.account_name
|
|
|
|
source_filename = Utils.create_name()
|
|
|
|
file_item = self.env.container.file(source_filename)
|
|
|
|
file_item.write_random()
|
|
|
|
|
|
|
|
dest_cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(dest_cont.create(hdrs={
|
2014-04-30 15:00:49 +03:00
|
|
|
'X-Container-Read': self.env.conn2.user_acl
|
|
|
|
}))
|
|
|
|
dest_cont2 = self.env.account2.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(dest_cont2.create(hdrs={
|
2014-04-30 15:00:49 +03:00
|
|
|
'X-Container-Write': self.env.conn.user_acl,
|
|
|
|
'X-Container-Read': self.env.conn.user_acl
|
|
|
|
}))
|
|
|
|
|
|
|
|
for acct, cont in ((acct, dest_cont), (acct2, dest_cont2)):
|
|
|
|
for prefix in ('', '/'):
|
|
|
|
# invalid source container
|
|
|
|
source_cont = self.env.account.container(Utils.create_name())
|
|
|
|
file_item = source_cont.file(source_filename)
|
Add functests for disallowed COPYs into a versioned container
While we're at it, have copy and copy_account raise ResponseErrors
on failure, similar to cluster_info, update_metadata, containers, info,
files, delete, initialize, read, sync_metadata, write, and post.
Related-Change: Ia8b92251718d10b1eb44a456f28d3d2569a30003
Change-Id: I9ef42d922a6b7dbf253f2f8f5df83965d8f47e0f
2017-03-28 23:26:05 +00:00
|
|
|
self.assertRaises(ResponseError, file_item.copy_account,
|
|
|
|
acct,
|
|
|
|
'%s%s' % (prefix, self.env.container),
|
|
|
|
Utils.create_name())
|
2015-02-18 11:59:31 +05:30
|
|
|
# there is no such source container but user has
|
|
|
|
# permissions to do a GET (done internally via COPY) for
|
|
|
|
# objects in his own account.
|
|
|
|
self.assert_status(404)
|
2014-04-30 15:00:49 +03:00
|
|
|
|
Add functests for disallowed COPYs into a versioned container
While we're at it, have copy and copy_account raise ResponseErrors
on failure, similar to cluster_info, update_metadata, containers, info,
files, delete, initialize, read, sync_metadata, write, and post.
Related-Change: Ia8b92251718d10b1eb44a456f28d3d2569a30003
Change-Id: I9ef42d922a6b7dbf253f2f8f5df83965d8f47e0f
2017-03-28 23:26:05 +00:00
|
|
|
self.assertRaises(ResponseError, file_item.copy_account,
|
|
|
|
acct,
|
|
|
|
'%s%s' % (prefix, cont),
|
|
|
|
Utils.create_name())
|
2014-04-30 15:00:49 +03:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
# invalid source object
|
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
Add functests for disallowed COPYs into a versioned container
While we're at it, have copy and copy_account raise ResponseErrors
on failure, similar to cluster_info, update_metadata, containers, info,
files, delete, initialize, read, sync_metadata, write, and post.
Related-Change: Ia8b92251718d10b1eb44a456f28d3d2569a30003
Change-Id: I9ef42d922a6b7dbf253f2f8f5df83965d8f47e0f
2017-03-28 23:26:05 +00:00
|
|
|
self.assertRaises(ResponseError, file_item.copy_account,
|
|
|
|
acct,
|
|
|
|
'%s%s' % (prefix, self.env.container),
|
|
|
|
Utils.create_name())
|
2015-02-18 11:59:31 +05:30
|
|
|
# there is no such source container but user has
|
|
|
|
# permissions to do a GET (done internally via COPY) for
|
|
|
|
# objects in his own account.
|
|
|
|
self.assert_status(404)
|
2014-04-30 15:00:49 +03:00
|
|
|
|
Add functests for disallowed COPYs into a versioned container
While we're at it, have copy and copy_account raise ResponseErrors
on failure, similar to cluster_info, update_metadata, containers, info,
files, delete, initialize, read, sync_metadata, write, and post.
Related-Change: Ia8b92251718d10b1eb44a456f28d3d2569a30003
Change-Id: I9ef42d922a6b7dbf253f2f8f5df83965d8f47e0f
2017-03-28 23:26:05 +00:00
|
|
|
self.assertRaises(ResponseError, file_item.copy_account,
|
|
|
|
acct,
|
|
|
|
'%s%s' % (prefix, cont),
|
|
|
|
Utils.create_name())
|
2014-04-30 15:00:49 +03:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
# invalid destination container
|
|
|
|
file_item = self.env.container.file(source_filename)
|
Add functests for disallowed COPYs into a versioned container
While we're at it, have copy and copy_account raise ResponseErrors
on failure, similar to cluster_info, update_metadata, containers, info,
files, delete, initialize, read, sync_metadata, write, and post.
Related-Change: Ia8b92251718d10b1eb44a456f28d3d2569a30003
Change-Id: I9ef42d922a6b7dbf253f2f8f5df83965d8f47e0f
2017-03-28 23:26:05 +00:00
|
|
|
self.assertRaises(ResponseError, file_item.copy_account,
|
|
|
|
acct,
|
|
|
|
'%s%s' % (prefix, Utils.create_name()),
|
|
|
|
Utils.create_name())
|
2014-04-30 15:00:49 +03:00
|
|
|
if acct == acct2:
|
|
|
|
# there is no such destination container
|
|
|
|
# and foreign user can have no permission to write there
|
|
|
|
self.assert_status(403)
|
|
|
|
else:
|
|
|
|
self.assert_status(404)
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def testCopyNoDestinationHeader(self):
|
|
|
|
source_filename = Utils.create_name()
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(source_filename)
|
|
|
|
file_item.write_random()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(source_filename)
|
Add functests for disallowed COPYs into a versioned container
While we're at it, have copy and copy_account raise ResponseErrors
on failure, similar to cluster_info, update_metadata, containers, info,
files, delete, initialize, read, sync_metadata, write, and post.
Related-Change: Ia8b92251718d10b1eb44a456f28d3d2569a30003
Change-Id: I9ef42d922a6b7dbf253f2f8f5df83965d8f47e0f
2017-03-28 23:26:05 +00:00
|
|
|
self.assertRaises(ResponseError, file_item.copy, Utils.create_name(),
|
|
|
|
Utils.create_name(),
|
|
|
|
cfg={'no_destination': True})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
|
|
|
|
|
|
|
def testCopyDestinationSlashProblems(self):
|
|
|
|
source_filename = Utils.create_name()
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(source_filename)
|
|
|
|
file_item.write_random()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
# no slash
|
Add functests for disallowed COPYs into a versioned container
While we're at it, have copy and copy_account raise ResponseErrors
on failure, similar to cluster_info, update_metadata, containers, info,
files, delete, initialize, read, sync_metadata, write, and post.
Related-Change: Ia8b92251718d10b1eb44a456f28d3d2569a30003
Change-Id: I9ef42d922a6b7dbf253f2f8f5df83965d8f47e0f
2017-03-28 23:26:05 +00:00
|
|
|
self.assertRaises(ResponseError, file_item.copy, Utils.create_name(),
|
|
|
|
Utils.create_name(),
|
|
|
|
cfg={'destination': Utils.create_name()})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
|
|
|
|
2016-11-16 22:16:53 +00:00
|
|
|
# too many slashes
|
Add functests for disallowed COPYs into a versioned container
While we're at it, have copy and copy_account raise ResponseErrors
on failure, similar to cluster_info, update_metadata, containers, info,
files, delete, initialize, read, sync_metadata, write, and post.
Related-Change: Ia8b92251718d10b1eb44a456f28d3d2569a30003
Change-Id: I9ef42d922a6b7dbf253f2f8f5df83965d8f47e0f
2017-03-28 23:26:05 +00:00
|
|
|
self.assertRaises(ResponseError, file_item.copy, Utils.create_name(),
|
|
|
|
Utils.create_name(),
|
|
|
|
cfg={'destination': '//%s' % Utils.create_name()})
|
2016-11-16 22:16:53 +00:00
|
|
|
self.assert_status(412)
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def testCopyFromHeader(self):
|
|
|
|
source_filename = Utils.create_name()
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(source_filename)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
metadata = {}
|
|
|
|
for i in range(1):
|
2012-10-23 09:48:24 +02:00
|
|
|
metadata[Utils.create_ascii_name()] = Utils.create_name()
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.metadata = metadata
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
data = file_item.write_random()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
dest_cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(dest_cont.create())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
# copy both from within and across containers
|
|
|
|
for cont in (self.env.container, dest_cont):
|
|
|
|
# copy both with and without initial slash
|
|
|
|
for prefix in ('', '/'):
|
|
|
|
dest_filename = Utils.create_name()
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cont.file(dest_filename)
|
2013-08-31 20:25:25 -04:00
|
|
|
file_item.write(hdrs={'X-Copy-From': '%s%s/%s' % (
|
|
|
|
prefix, self.env.container.name, source_filename)})
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(dest_filename, cont.files())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cont.file(dest_filename)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2016-07-15 14:02:38 +02:00
|
|
|
self.assertEqual(data, file_item.read())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.initialize())
|
2016-07-15 14:02:38 +02:00
|
|
|
self.assertEqual(metadata, file_item.metadata)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2014-04-30 15:00:49 +03:00
|
|
|
def testCopyFromAccountHeader(self):
|
2018-02-20 14:15:31 -08:00
|
|
|
if tf.skip2:
|
|
|
|
raise SkipTest('Account2 not set')
|
2014-04-30 15:00:49 +03:00
|
|
|
acct = self.env.conn.account_name
|
|
|
|
src_cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(src_cont.create(hdrs={
|
2014-04-30 15:00:49 +03:00
|
|
|
'X-Container-Read': self.env.conn2.user_acl
|
|
|
|
}))
|
|
|
|
source_filename = Utils.create_name()
|
|
|
|
file_item = src_cont.file(source_filename)
|
|
|
|
|
|
|
|
metadata = {}
|
|
|
|
for i in range(1):
|
|
|
|
metadata[Utils.create_ascii_name()] = Utils.create_name()
|
|
|
|
file_item.metadata = metadata
|
|
|
|
|
|
|
|
data = file_item.write_random()
|
|
|
|
|
|
|
|
dest_cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(dest_cont.create())
|
2014-04-30 15:00:49 +03:00
|
|
|
dest_cont2 = self.env.account2.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(dest_cont2.create(hdrs={
|
2014-04-30 15:00:49 +03:00
|
|
|
'X-Container-Write': self.env.conn.user_acl
|
|
|
|
}))
|
|
|
|
|
|
|
|
for cont in (src_cont, dest_cont, dest_cont2):
|
|
|
|
# copy both with and without initial slash
|
|
|
|
for prefix in ('', '/'):
|
|
|
|
dest_filename = Utils.create_name()
|
|
|
|
|
|
|
|
file_item = cont.file(dest_filename)
|
|
|
|
file_item.write(hdrs={'X-Copy-From-Account': acct,
|
|
|
|
'X-Copy-From': '%s%s/%s' % (
|
|
|
|
prefix,
|
|
|
|
src_cont.name,
|
|
|
|
source_filename)})
|
|
|
|
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(dest_filename, cont.files())
|
2014-04-30 15:00:49 +03:00
|
|
|
|
|
|
|
file_item = cont.file(dest_filename)
|
|
|
|
|
2016-07-15 14:02:38 +02:00
|
|
|
self.assertEqual(data, file_item.read())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.initialize())
|
2016-07-15 14:02:38 +02:00
|
|
|
self.assertEqual(metadata, file_item.metadata)
|
2014-04-30 15:00:49 +03:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def testCopyFromHeader404s(self):
|
|
|
|
source_filename = Utils.create_name()
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(source_filename)
|
|
|
|
file_item.write_random()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
for prefix in ('', '/'):
|
|
|
|
# invalid source container
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
2015-07-30 00:28:44 +02:00
|
|
|
copy_from = ('%s%s/%s'
|
|
|
|
% (prefix, Utils.create_name(), source_filename))
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.write,
|
2015-07-30 00:28:44 +02:00
|
|
|
hdrs={'X-Copy-From': copy_from})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
# invalid source object
|
2015-07-30 00:28:44 +02:00
|
|
|
copy_from = ('%s%s/%s'
|
|
|
|
% (prefix, self.env.container.name,
|
|
|
|
Utils.create_name()))
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.write,
|
2015-07-30 00:28:44 +02:00
|
|
|
hdrs={'X-Copy-From': copy_from})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
# invalid destination container
|
|
|
|
dest_cont = self.env.account.container(Utils.create_name())
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = dest_cont.file(Utils.create_name())
|
2015-07-30 00:28:44 +02:00
|
|
|
copy_from = ('%s%s/%s'
|
|
|
|
% (prefix, self.env.container.name, source_filename))
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.write,
|
2015-07-30 00:28:44 +02:00
|
|
|
hdrs={'X-Copy-From': copy_from})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
2014-04-30 15:00:49 +03:00
|
|
|
def testCopyFromAccountHeader404s(self):
|
2018-02-20 14:15:31 -08:00
|
|
|
if tf.skip2:
|
|
|
|
raise SkipTest('Account2 not set')
|
2014-04-30 15:00:49 +03:00
|
|
|
acct = self.env.conn2.account_name
|
|
|
|
src_cont = self.env.account2.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(src_cont.create(hdrs={
|
2014-04-30 15:00:49 +03:00
|
|
|
'X-Container-Read': self.env.conn.user_acl
|
|
|
|
}))
|
|
|
|
source_filename = Utils.create_name()
|
|
|
|
file_item = src_cont.file(source_filename)
|
|
|
|
file_item.write_random()
|
|
|
|
dest_cont = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(dest_cont.create())
|
2014-04-30 15:00:49 +03:00
|
|
|
|
|
|
|
for prefix in ('', '/'):
|
|
|
|
# invalid source container
|
|
|
|
file_item = dest_cont.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.write,
|
|
|
|
hdrs={'X-Copy-From-Account': acct,
|
|
|
|
'X-Copy-From': '%s%s/%s' %
|
|
|
|
(prefix,
|
|
|
|
Utils.create_name(),
|
|
|
|
source_filename)})
|
2018-01-19 22:20:59 +00:00
|
|
|
self.assert_status(403)
|
2014-04-30 15:00:49 +03:00
|
|
|
|
|
|
|
# invalid source object
|
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.write,
|
|
|
|
hdrs={'X-Copy-From-Account': acct,
|
|
|
|
'X-Copy-From': '%s%s/%s' %
|
|
|
|
(prefix,
|
|
|
|
src_cont,
|
|
|
|
Utils.create_name())})
|
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
# invalid destination container
|
|
|
|
dest_cont = self.env.account.container(Utils.create_name())
|
|
|
|
file_item = dest_cont.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.write,
|
|
|
|
hdrs={'X-Copy-From-Account': acct,
|
|
|
|
'X-Copy-From': '%s%s/%s' %
|
|
|
|
(prefix,
|
|
|
|
src_cont,
|
|
|
|
source_filename)})
|
|
|
|
self.assert_status(404)
|
|
|
|
|
2018-01-19 22:20:59 +00:00
|
|
|
def testCopyFromAccountHeader403s(self):
|
2018-02-20 14:15:31 -08:00
|
|
|
if tf.skip2:
|
|
|
|
raise SkipTest('Account2 not set')
|
2018-01-19 22:20:59 +00:00
|
|
|
acct = self.env.conn2.account_name
|
|
|
|
src_cont = self.env.account2.container(Utils.create_name())
|
|
|
|
self.assertTrue(src_cont.create()) # Primary user has no access
|
|
|
|
source_filename = Utils.create_name()
|
|
|
|
file_item = src_cont.file(source_filename)
|
|
|
|
file_item.write_random()
|
|
|
|
dest_cont = self.env.account.container(Utils.create_name())
|
|
|
|
self.assertTrue(dest_cont.create())
|
|
|
|
|
|
|
|
for prefix in ('', '/'):
|
|
|
|
# invalid source container
|
|
|
|
file_item = dest_cont.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.write,
|
|
|
|
hdrs={'X-Copy-From-Account': acct,
|
|
|
|
'X-Copy-From': '%s%s/%s' %
|
|
|
|
(prefix,
|
|
|
|
Utils.create_name(),
|
|
|
|
source_filename)})
|
|
|
|
self.assert_status(403)
|
|
|
|
|
|
|
|
# invalid source object
|
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.write,
|
|
|
|
hdrs={'X-Copy-From-Account': acct,
|
|
|
|
'X-Copy-From': '%s%s/%s' %
|
|
|
|
(prefix,
|
|
|
|
src_cont,
|
|
|
|
Utils.create_name())})
|
|
|
|
self.assert_status(403)
|
|
|
|
|
|
|
|
# invalid destination container
|
|
|
|
dest_cont = self.env.account.container(Utils.create_name())
|
|
|
|
file_item = dest_cont.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.write,
|
|
|
|
hdrs={'X-Copy-From-Account': acct,
|
|
|
|
'X-Copy-From': '%s%s/%s' %
|
|
|
|
(prefix,
|
|
|
|
src_cont,
|
|
|
|
source_filename)})
|
|
|
|
self.assert_status(403)
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def testNameLimit(self):
|
2012-09-05 20:49:50 -07:00
|
|
|
limit = load_constraint('max_object_name_length')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2019-03-01 12:13:27 -08:00
|
|
|
for l in (1, 10, limit // 2, limit - 1, limit, limit + 1, limit * 2):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file('a' * l)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
if l <= limit:
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.write())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
|
|
|
else:
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.write)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(400)
|
|
|
|
|
|
|
|
def testQuestionMarkInName(self):
|
|
|
|
if Utils.create_name == Utils.create_ascii_name:
|
|
|
|
file_name = list(Utils.create_name())
|
2012-09-03 23:30:52 +08:00
|
|
|
file_name[random.randint(2, len(file_name) - 2)] = '?'
|
2010-07-12 17:03:45 -05:00
|
|
|
file_name = "".join(file_name)
|
|
|
|
else:
|
|
|
|
file_name = Utils.create_name(6) + '?' + Utils.create_name(6)
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(file_name)
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.write(cfg={'no_path_quote': True}))
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertNotIn(file_name, self.env.container.files())
|
|
|
|
self.assertIn(file_name.split('?')[0], self.env.container.files())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testDeleteThen404s(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.write_random())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
|
|
|
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.delete())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(204)
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.metadata = {Utils.create_ascii_name(): Utils.create_name()}
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for method in (file_item.info,
|
|
|
|
file_item.read,
|
|
|
|
file_item.sync_metadata,
|
|
|
|
file_item.delete):
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assertRaises(ResponseError, method)
|
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
def testBlankMetadataName(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
file_item.metadata = {'': Utils.create_name()}
|
|
|
|
self.assertRaises(ResponseError, file_item.write_random)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(400)
|
|
|
|
|
|
|
|
def testMetadataNumberLimit(self):
|
2012-09-05 20:49:50 -07:00
|
|
|
number_limit = load_constraint('max_meta_count')
|
|
|
|
size_limit = load_constraint('max_meta_overall_size')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
for i in (number_limit - 10, number_limit - 1, number_limit,
|
|
|
|
number_limit + 1, number_limit + 10, number_limit + 100):
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2019-03-01 12:13:27 -08:00
|
|
|
j = size_limit // (i * 2)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
metadata = {}
|
|
|
|
while len(metadata.keys()) < i:
|
2012-10-23 09:48:24 +02:00
|
|
|
key = Utils.create_ascii_name()
|
2010-07-12 17:03:45 -05:00
|
|
|
val = Utils.create_name()
|
|
|
|
|
|
|
|
if len(key) > j:
|
|
|
|
key = key[:j]
|
2019-05-17 10:30:21 -07:00
|
|
|
# NB: we'll likely write object metadata that's *not* UTF-8
|
|
|
|
if six.PY2:
|
2019-03-01 12:43:42 -08:00
|
|
|
val = val[:j]
|
|
|
|
else:
|
|
|
|
val = val.encode('utf8')[:j].decode(
|
|
|
|
'utf8', 'surrogateescape')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
metadata[key] = val
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
file_item.metadata = metadata
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
if i <= number_limit:
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.write())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.sync_metadata())
|
2018-10-10 09:01:14 -07:00
|
|
|
self.assert_status(202)
|
2010-07-12 17:03:45 -05:00
|
|
|
else:
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.write)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(400)
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.metadata = {}
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.write())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.metadata = metadata
|
|
|
|
self.assertRaises(ResponseError, file_item.sync_metadata)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(400)
|
|
|
|
|
|
|
|
def testContentTypeGuessing(self):
|
|
|
|
file_types = {'wav': 'audio/x-wav', 'txt': 'text/plain',
|
|
|
|
'zip': 'application/zip'}
|
|
|
|
|
|
|
|
container = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(container.create())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
for i in file_types.keys():
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = container.file(Utils.create_name() + '.' + i)
|
2019-03-01 12:13:27 -08:00
|
|
|
file_item.write(b'', cfg={'no_content_type': True})
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
file_types_read = {}
|
|
|
|
for i in container.files(parms={'format': 'json'}):
|
|
|
|
file_types_read[i['name'].split('.')[1]] = i['content_type']
|
|
|
|
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(file_types, file_types_read)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testRangedGets(self):
|
2015-03-31 22:35:37 -07:00
|
|
|
# We set the file_length to a strange multiple here. This is to check
|
|
|
|
# that ranges still work in the EC case when the requested range
|
|
|
|
# spans EC segment boundaries. The 1 MiB base value is chosen because
|
|
|
|
# that's a common EC segment size. The 1.33 multiple is to ensure we
|
|
|
|
# aren't aligned on segment boundaries
|
|
|
|
file_length = int(1048576 * 1.33)
|
2019-03-01 12:13:27 -08:00
|
|
|
range_size = file_length // 10
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
data = file_item.write_random(file_length)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
for i in range(0, file_length, range_size):
|
2012-09-03 23:30:52 +08:00
|
|
|
range_string = 'bytes=%d-%d' % (i, i + range_size - 1)
|
2010-07-12 17:03:45 -05:00
|
|
|
hdrs = {'Range': range_string}
|
2016-07-15 14:02:38 +02:00
|
|
|
self.assertEqual(
|
|
|
|
data[i: i + range_size], file_item.read(hdrs=hdrs),
|
2015-07-21 18:06:32 +05:30
|
|
|
range_string)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
range_string = 'bytes=-%d' % (i)
|
|
|
|
hdrs = {'Range': range_string}
|
2012-10-03 14:20:52 -07:00
|
|
|
if i == 0:
|
|
|
|
# RFC 2616 14.35.1
|
|
|
|
# "If a syntactically valid byte-range-set includes ... at
|
|
|
|
# least one suffix-byte-range-spec with a NON-ZERO
|
|
|
|
# suffix-length, then the byte-range-set is satisfiable.
|
|
|
|
# Otherwise, the byte-range-set is unsatisfiable.
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
2012-10-03 14:20:52 -07:00
|
|
|
self.assert_status(416)
|
Let users know entity size in 416 responses
If a user sends a Range header with no satisfiable ranges, we send back
a 416 Requested Range Not Satisfiable response. Previously however,
there would be no indication of the size of the object they were
requesting, so they wouldn't know how to craft a satisfiable range. We
*do* send a Content-Length, but it is (correctly) the length of the
error message.
The RFC [1] has an answer for this:
> A server generating a 416 (Range Not Satisfiable) response to a
> byte-range request SHOULD send a Content-Range header field with an
> unsatisfied-range value, as in the following example:
>
> Content-Range: bytes */1234
>
> The complete-length in a 416 response indicates the current length of
> the selected representation.
Now, we'll send a Content-Range header for all 416 responses, including
those coming from the object server as well as those generated on a
proxy because of the Range mangling required to support EC policies.
[1] RFC 7233, section 4.2, although similar language was used in RFC
2616, sections 10.4.17 and 14.16
Change-Id: I80c7390fc6f84a10a212b0641bb07a64dfccbd45
2016-10-29 17:21:41 +02:00
|
|
|
self.assert_header('content-range', 'bytes */%d' % file_length)
|
2012-10-03 14:20:52 -07:00
|
|
|
else:
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(file_item.read(hdrs=hdrs), data[-i:])
|
Let users know entity size in 416 responses
If a user sends a Range header with no satisfiable ranges, we send back
a 416 Requested Range Not Satisfiable response. Previously however,
there would be no indication of the size of the object they were
requesting, so they wouldn't know how to craft a satisfiable range. We
*do* send a Content-Length, but it is (correctly) the length of the
error message.
The RFC [1] has an answer for this:
> A server generating a 416 (Range Not Satisfiable) response to a
> byte-range request SHOULD send a Content-Range header field with an
> unsatisfied-range value, as in the following example:
>
> Content-Range: bytes */1234
>
> The complete-length in a 416 response indicates the current length of
> the selected representation.
Now, we'll send a Content-Range header for all 416 responses, including
those coming from the object server as well as those generated on a
proxy because of the Range mangling required to support EC policies.
[1] RFC 7233, section 4.2, although similar language was used in RFC
2616, sections 10.4.17 and 14.16
Change-Id: I80c7390fc6f84a10a212b0641bb07a64dfccbd45
2016-10-29 17:21:41 +02:00
|
|
|
self.assert_header('content-range', 'bytes %d-%d/%d' % (
|
|
|
|
file_length - i, file_length - 1, file_length))
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assert_header('etag', file_item.md5)
|
|
|
|
self.assert_header('accept-ranges', 'bytes')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
range_string = 'bytes=%d-' % (i)
|
|
|
|
hdrs = {'Range': range_string}
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assertEqual(
|
|
|
|
file_item.read(hdrs=hdrs), data[i - file_length:],
|
2015-07-21 18:06:32 +05:30
|
|
|
range_string)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
range_string = 'bytes=%d-%d' % (file_length + 1000, file_length + 2000)
|
2010-07-12 17:03:45 -05:00
|
|
|
hdrs = {'Range': range_string}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(416)
|
Let users know entity size in 416 responses
If a user sends a Range header with no satisfiable ranges, we send back
a 416 Requested Range Not Satisfiable response. Previously however,
there would be no indication of the size of the object they were
requesting, so they wouldn't know how to craft a satisfiable range. We
*do* send a Content-Length, but it is (correctly) the length of the
error message.
The RFC [1] has an answer for this:
> A server generating a 416 (Range Not Satisfiable) response to a
> byte-range request SHOULD send a Content-Range header field with an
> unsatisfied-range value, as in the following example:
>
> Content-Range: bytes */1234
>
> The complete-length in a 416 response indicates the current length of
> the selected representation.
Now, we'll send a Content-Range header for all 416 responses, including
those coming from the object server as well as those generated on a
proxy because of the Range mangling required to support EC policies.
[1] RFC 7233, section 4.2, although similar language was used in RFC
2616, sections 10.4.17 and 14.16
Change-Id: I80c7390fc6f84a10a212b0641bb07a64dfccbd45
2016-10-29 17:21:41 +02:00
|
|
|
self.assert_header('content-range', 'bytes */%d' % file_length)
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assert_header('etag', file_item.md5)
|
|
|
|
self.assert_header('accept-ranges', 'bytes')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
range_string = 'bytes=%d-%d' % (file_length - 1000, file_length + 2000)
|
2010-07-12 17:03:45 -05:00
|
|
|
hdrs = {'Range': range_string}
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assertEqual(file_item.read(hdrs=hdrs), data[-1000:], range_string)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
hdrs = {'Range': '0-4'}
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assertEqual(file_item.read(hdrs=hdrs), data, '0-4')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-10-03 14:20:52 -07:00
|
|
|
# RFC 2616 14.35.1
|
|
|
|
# "If the entity is shorter than the specified suffix-length, the
|
|
|
|
# entire entity-body is used."
|
|
|
|
range_string = 'bytes=-%d' % (file_length + 10)
|
|
|
|
hdrs = {'Range': range_string}
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assertEqual(file_item.read(hdrs=hdrs), data, range_string)
|
2012-10-03 14:20:52 -07:00
|
|
|
|
2015-03-25 14:59:43 -07:00
|
|
|
def testMultiRangeGets(self):
|
|
|
|
file_length = 10000
|
2019-03-01 12:13:27 -08:00
|
|
|
range_size = file_length // 10
|
|
|
|
subrange_size = range_size // 10
|
2015-03-25 14:59:43 -07:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
data = file_item.write_random(
|
2015-11-26 14:46:01 +00:00
|
|
|
file_length, hdrs={"Content-Type":
|
|
|
|
"lovecraft/rugose; squamous=true"})
|
2015-03-25 14:59:43 -07:00
|
|
|
|
|
|
|
for i in range(0, file_length, range_size):
|
|
|
|
range_string = 'bytes=%d-%d,%d-%d,%d-%d' % (
|
|
|
|
i, i + subrange_size - 1,
|
|
|
|
i + 2 * subrange_size, i + 3 * subrange_size - 1,
|
|
|
|
i + 4 * subrange_size, i + 5 * subrange_size - 1)
|
|
|
|
hdrs = {'Range': range_string}
|
|
|
|
|
|
|
|
fetched = file_item.read(hdrs=hdrs)
|
2015-11-26 14:46:01 +00:00
|
|
|
self.assert_status(206)
|
2015-03-25 14:59:43 -07:00
|
|
|
content_type = file_item.content_type
|
2015-11-26 14:46:01 +00:00
|
|
|
self.assertTrue(content_type.startswith("multipart/byteranges"))
|
|
|
|
self.assertIsNone(file_item.content_range)
|
2015-03-25 14:59:43 -07:00
|
|
|
|
|
|
|
# email.parser.FeedParser wants a message with headers on the
|
|
|
|
# front, then two CRLFs, and then a body (like emails have but
|
|
|
|
# HTTP response bodies don't). We fake it out by constructing a
|
|
|
|
# one-header preamble containing just the Content-Type, then
|
|
|
|
# feeding in the response body.
|
2019-03-01 12:43:42 -08:00
|
|
|
parser = FeedParser()
|
|
|
|
parser.feed(b"Content-Type: %s\r\n\r\n" % content_type.encode())
|
2015-03-25 14:59:43 -07:00
|
|
|
parser.feed(fetched)
|
|
|
|
root_message = parser.close()
|
|
|
|
self.assertTrue(root_message.is_multipart())
|
|
|
|
|
|
|
|
byteranges = root_message.get_payload()
|
|
|
|
self.assertEqual(len(byteranges), 3)
|
|
|
|
|
2015-11-26 14:46:01 +00:00
|
|
|
self.assertEqual(byteranges[0]['Content-Type'],
|
|
|
|
"lovecraft/rugose; squamous=true")
|
2015-03-25 14:59:43 -07:00
|
|
|
self.assertEqual(
|
|
|
|
byteranges[0]['Content-Range'],
|
|
|
|
"bytes %d-%d/%d" % (i, i + subrange_size - 1, file_length))
|
|
|
|
self.assertEqual(
|
2019-03-01 12:43:42 -08:00
|
|
|
byteranges[0].get_payload(decode=True),
|
2015-03-25 14:59:43 -07:00
|
|
|
data[i:(i + subrange_size)])
|
|
|
|
|
2015-11-26 14:46:01 +00:00
|
|
|
self.assertEqual(byteranges[1]['Content-Type'],
|
|
|
|
"lovecraft/rugose; squamous=true")
|
2015-03-25 14:59:43 -07:00
|
|
|
self.assertEqual(
|
|
|
|
byteranges[1]['Content-Range'],
|
|
|
|
"bytes %d-%d/%d" % (i + 2 * subrange_size,
|
|
|
|
i + 3 * subrange_size - 1, file_length))
|
|
|
|
self.assertEqual(
|
2019-03-01 12:43:42 -08:00
|
|
|
byteranges[1].get_payload(decode=True),
|
2015-03-25 14:59:43 -07:00
|
|
|
data[(i + 2 * subrange_size):(i + 3 * subrange_size)])
|
|
|
|
|
2015-11-26 14:46:01 +00:00
|
|
|
self.assertEqual(byteranges[2]['Content-Type'],
|
|
|
|
"lovecraft/rugose; squamous=true")
|
2015-03-25 14:59:43 -07:00
|
|
|
self.assertEqual(
|
|
|
|
byteranges[2]['Content-Range'],
|
|
|
|
"bytes %d-%d/%d" % (i + 4 * subrange_size,
|
|
|
|
i + 5 * subrange_size - 1, file_length))
|
|
|
|
self.assertEqual(
|
2019-03-01 12:43:42 -08:00
|
|
|
byteranges[2].get_payload(decode=True),
|
2015-03-25 14:59:43 -07:00
|
|
|
data[(i + 4 * subrange_size):(i + 5 * subrange_size)])
|
|
|
|
|
|
|
|
# The first two ranges are satisfiable but the third is not; the
|
|
|
|
# result is a multipart/byteranges response containing only the two
|
|
|
|
# satisfiable byteranges.
|
|
|
|
range_string = 'bytes=%d-%d,%d-%d,%d-%d' % (
|
|
|
|
0, subrange_size - 1,
|
|
|
|
2 * subrange_size, 3 * subrange_size - 1,
|
|
|
|
file_length, file_length + subrange_size - 1)
|
|
|
|
hdrs = {'Range': range_string}
|
|
|
|
fetched = file_item.read(hdrs=hdrs)
|
2015-11-26 14:46:01 +00:00
|
|
|
self.assert_status(206)
|
2015-03-25 14:59:43 -07:00
|
|
|
content_type = file_item.content_type
|
2015-11-26 14:46:01 +00:00
|
|
|
self.assertTrue(content_type.startswith("multipart/byteranges"))
|
|
|
|
self.assertIsNone(file_item.content_range)
|
2015-03-25 14:59:43 -07:00
|
|
|
|
2019-03-01 12:43:42 -08:00
|
|
|
parser = FeedParser()
|
|
|
|
parser.feed(b"Content-Type: %s\r\n\r\n" % content_type.encode())
|
2015-03-25 14:59:43 -07:00
|
|
|
parser.feed(fetched)
|
|
|
|
root_message = parser.close()
|
|
|
|
|
|
|
|
self.assertTrue(root_message.is_multipart())
|
|
|
|
byteranges = root_message.get_payload()
|
|
|
|
self.assertEqual(len(byteranges), 2)
|
|
|
|
|
2015-11-26 14:46:01 +00:00
|
|
|
self.assertEqual(byteranges[0]['Content-Type'],
|
|
|
|
"lovecraft/rugose; squamous=true")
|
2015-03-25 14:59:43 -07:00
|
|
|
self.assertEqual(
|
|
|
|
byteranges[0]['Content-Range'],
|
|
|
|
"bytes %d-%d/%d" % (0, subrange_size - 1, file_length))
|
2019-03-01 12:43:42 -08:00
|
|
|
self.assertEqual(byteranges[0].get_payload(decode=True),
|
|
|
|
data[:subrange_size])
|
2015-03-25 14:59:43 -07:00
|
|
|
|
2015-11-26 14:46:01 +00:00
|
|
|
self.assertEqual(byteranges[1]['Content-Type'],
|
|
|
|
"lovecraft/rugose; squamous=true")
|
2015-03-25 14:59:43 -07:00
|
|
|
self.assertEqual(
|
|
|
|
byteranges[1]['Content-Range'],
|
|
|
|
"bytes %d-%d/%d" % (2 * subrange_size, 3 * subrange_size - 1,
|
|
|
|
file_length))
|
|
|
|
self.assertEqual(
|
2019-03-01 12:43:42 -08:00
|
|
|
byteranges[1].get_payload(decode=True),
|
2015-03-25 14:59:43 -07:00
|
|
|
data[(2 * subrange_size):(3 * subrange_size)])
|
|
|
|
|
|
|
|
# The first range is satisfiable but the second is not; the
|
|
|
|
# result is either a multipart/byteranges response containing one
|
|
|
|
# byterange or a normal, non-MIME 206 response.
|
|
|
|
range_string = 'bytes=%d-%d,%d-%d' % (
|
|
|
|
0, subrange_size - 1,
|
|
|
|
file_length, file_length + subrange_size - 1)
|
|
|
|
hdrs = {'Range': range_string}
|
|
|
|
fetched = file_item.read(hdrs=hdrs)
|
2015-11-26 14:46:01 +00:00
|
|
|
self.assert_status(206)
|
2015-03-25 14:59:43 -07:00
|
|
|
content_type = file_item.content_type
|
|
|
|
if content_type.startswith("multipart/byteranges"):
|
2015-11-26 14:46:01 +00:00
|
|
|
self.assertIsNone(file_item.content_range)
|
2019-03-01 12:43:42 -08:00
|
|
|
parser = FeedParser()
|
|
|
|
parser.feed(b"Content-Type: %s\r\n\r\n" % content_type.encode())
|
2015-03-25 14:59:43 -07:00
|
|
|
parser.feed(fetched)
|
|
|
|
root_message = parser.close()
|
|
|
|
|
|
|
|
self.assertTrue(root_message.is_multipart())
|
|
|
|
byteranges = root_message.get_payload()
|
|
|
|
self.assertEqual(len(byteranges), 1)
|
|
|
|
|
2015-11-26 14:46:01 +00:00
|
|
|
self.assertEqual(byteranges[0]['Content-Type'],
|
|
|
|
"lovecraft/rugose; squamous=true")
|
2015-03-25 14:59:43 -07:00
|
|
|
self.assertEqual(
|
|
|
|
byteranges[0]['Content-Range'],
|
|
|
|
"bytes %d-%d/%d" % (0, subrange_size - 1, file_length))
|
2019-03-01 12:43:42 -08:00
|
|
|
self.assertEqual(byteranges[0].get_payload(decode=True),
|
|
|
|
data[:subrange_size])
|
2015-03-25 14:59:43 -07:00
|
|
|
else:
|
|
|
|
self.assertEqual(
|
2015-11-26 14:46:01 +00:00
|
|
|
file_item.content_range,
|
2015-03-25 14:59:43 -07:00
|
|
|
"bytes %d-%d/%d" % (0, subrange_size - 1, file_length))
|
2015-11-26 14:46:01 +00:00
|
|
|
self.assertEqual(content_type, "lovecraft/rugose; squamous=true")
|
2015-03-25 14:59:43 -07:00
|
|
|
self.assertEqual(fetched, data[:subrange_size])
|
|
|
|
|
|
|
|
# No byterange is satisfiable, so we get a 416 response.
|
|
|
|
range_string = 'bytes=%d-%d,%d-%d' % (
|
|
|
|
file_length, file_length + 2,
|
|
|
|
file_length + 100, file_length + 102)
|
|
|
|
hdrs = {'Range': range_string}
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
|
|
|
self.assert_status(416)
|
Let users know entity size in 416 responses
If a user sends a Range header with no satisfiable ranges, we send back
a 416 Requested Range Not Satisfiable response. Previously however,
there would be no indication of the size of the object they were
requesting, so they wouldn't know how to craft a satisfiable range. We
*do* send a Content-Length, but it is (correctly) the length of the
error message.
The RFC [1] has an answer for this:
> A server generating a 416 (Range Not Satisfiable) response to a
> byte-range request SHOULD send a Content-Range header field with an
> unsatisfied-range value, as in the following example:
>
> Content-Range: bytes */1234
>
> The complete-length in a 416 response indicates the current length of
> the selected representation.
Now, we'll send a Content-Range header for all 416 responses, including
those coming from the object server as well as those generated on a
proxy because of the Range mangling required to support EC policies.
[1] RFC 7233, section 4.2, although similar language was used in RFC
2616, sections 10.4.17 and 14.16
Change-Id: I80c7390fc6f84a10a212b0641bb07a64dfccbd45
2016-10-29 17:21:41 +02:00
|
|
|
self.assert_header('content-range', 'bytes */%d' % file_length)
|
2015-03-25 14:59:43 -07:00
|
|
|
|
2012-06-06 03:39:53 +09:00
|
|
|
def testRangedGetsWithLWSinHeader(self):
|
|
|
|
file_length = 10000
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
data = file_item.write_random(file_length)
|
2012-06-06 03:39:53 +09:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
for r in ('BYTES=0-999', 'bytes = 0-999', 'BYTES = 0 - 999',
|
2012-09-03 23:30:52 +08:00
|
|
|
'bytes = 0 - 999', 'bytes=0 - 999', 'bytes=0-999 '):
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2016-07-15 14:02:38 +02:00
|
|
|
self.assertEqual(file_item.read(hdrs={'Range': r}), data[0:1000])
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testFileSizeLimit(self):
|
2012-09-05 20:49:50 -07:00
|
|
|
limit = load_constraint('max_file_size')
|
2010-07-12 17:03:45 -05:00
|
|
|
tsecs = 3
|
|
|
|
|
2014-04-07 13:01:44 -04:00
|
|
|
def timeout(seconds, method, *args, **kwargs):
|
|
|
|
try:
|
|
|
|
with eventlet.Timeout(seconds):
|
|
|
|
method(*args, **kwargs)
|
|
|
|
except eventlet.Timeout:
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
|
2016-06-17 12:24:57 -07:00
|
|
|
# This loop will result in fallocate calls for 4x the limit
|
|
|
|
# (minus 111 bytes). With fallocate turned on in the object servers,
|
|
|
|
# this may fail if you don't have 4x the limit available on your
|
|
|
|
# data drives.
|
|
|
|
|
|
|
|
# Note that this test does not actually send any data to the system.
|
|
|
|
# All it does is ensure that a response (success or failure) comes
|
|
|
|
# back within 3 seconds. For the successful tests (size smaller
|
|
|
|
# than limit), the cluster will log a 499.
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
for i in (limit - 100, limit - 10, limit - 1, limit, limit + 1,
|
|
|
|
limit + 10, limit + 100):
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
if i <= limit:
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(timeout(tsecs, file_item.write,
|
|
|
|
cfg={'set_content_length': i}))
|
2010-07-12 17:03:45 -05:00
|
|
|
else:
|
|
|
|
self.assertRaises(ResponseError, timeout, tsecs,
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.write,
|
|
|
|
cfg={'set_content_length': i})
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testNoContentLengthForPut(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
2019-03-01 12:13:27 -08:00
|
|
|
self.assertRaises(ResponseError, file_item.write, b'testing',
|
2012-09-03 23:30:52 +08:00
|
|
|
cfg={'no_content_length': True})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(411)
|
|
|
|
|
|
|
|
def testDelete(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
file_item.write_random(self.env.file_size)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(file_item.name, self.env.container.files())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.delete())
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertNotIn(file_item.name, self.env.container.files())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testBadHeaders(self):
|
|
|
|
file_length = 100
|
|
|
|
|
|
|
|
# no content type on puts should be ok
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
file_item.write_random(file_length, cfg={'no_content_type': True})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
|
|
|
|
|
|
|
# content length x
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.write_random, file_length,
|
2012-09-03 23:30:52 +08:00
|
|
|
hdrs={'Content-Length': 'X'},
|
|
|
|
cfg={'no_content_length': True})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(400)
|
|
|
|
|
2014-08-21 10:33:30 -04:00
|
|
|
# no content-length
|
|
|
|
self.assertRaises(ResponseError, file_item.write_random, file_length,
|
|
|
|
cfg={'no_content_length': True})
|
|
|
|
self.assert_status(411)
|
|
|
|
|
|
|
|
self.assertRaises(ResponseError, file_item.write_random, file_length,
|
|
|
|
hdrs={'transfer-encoding': 'gzip,chunked'},
|
|
|
|
cfg={'no_content_length': True})
|
|
|
|
self.assert_status(501)
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
# bad request types
|
2015-07-30 00:16:06 +02:00
|
|
|
# for req in ('LICK', 'GETorHEAD_base', 'container_info',
|
|
|
|
# 'best_response'):
|
2010-09-02 21:50:16 -07:00
|
|
|
for req in ('LICK', 'GETorHEAD_base'):
|
2010-07-12 17:03:45 -05:00
|
|
|
self.env.account.conn.make_request(req)
|
|
|
|
self.assert_status(405)
|
|
|
|
|
|
|
|
# bad range headers
|
2016-07-15 14:02:38 +02:00
|
|
|
self.assertEqual(
|
|
|
|
len(file_item.read(hdrs={'Range': 'parsecs=8-12'})),
|
2015-07-21 18:06:32 +05:30
|
|
|
file_length)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(200)
|
|
|
|
|
|
|
|
def testMetadataLengthLimits(self):
|
2012-09-05 20:49:50 -07:00
|
|
|
key_limit = load_constraint('max_meta_name_length')
|
|
|
|
value_limit = load_constraint('max_meta_value_length')
|
2012-09-03 23:30:52 +08:00
|
|
|
lengths = [[key_limit, value_limit], [key_limit, value_limit + 1],
|
|
|
|
[key_limit + 1, value_limit], [key_limit, 0],
|
|
|
|
[key_limit, value_limit * 10],
|
|
|
|
[key_limit * 10, value_limit]]
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
for l in lengths:
|
2012-09-03 23:30:52 +08:00
|
|
|
metadata = {'a' * l[0]: 'b' * l[1]}
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
file_item.metadata = metadata
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
if l[0] <= key_limit and l[1] <= value_limit:
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.write())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.sync_metadata())
|
2010-07-12 17:03:45 -05:00
|
|
|
else:
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.write)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(400)
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.metadata = {}
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.write())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.metadata = metadata
|
|
|
|
self.assertRaises(ResponseError, file_item.sync_metadata)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(400)
|
|
|
|
|
|
|
|
def testEtagWayoff(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
2010-07-12 17:03:45 -05:00
|
|
|
hdrs = {'etag': 'reallylonganddefinitelynotavalidetagvalue'}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.write_random, hdrs=hdrs)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(422)
|
|
|
|
|
|
|
|
def testFileCreate(self):
|
|
|
|
for i in range(10):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
data = file_item.write_random()
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(201)
|
2016-07-15 14:02:38 +02:00
|
|
|
self.assertEqual(data, file_item.read())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(200)
|
|
|
|
|
|
|
|
def testHead(self):
|
|
|
|
file_name = Utils.create_name()
|
|
|
|
content_type = Utils.create_name()
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(file_name)
|
|
|
|
file_item.content_type = content_type
|
|
|
|
file_item.write_random(self.env.file_size)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
md5 = file_item.md5
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(file_name)
|
|
|
|
info = file_item.info()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
self.assert_status(200)
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(info['content_length'], self.env.file_size)
|
|
|
|
self.assertEqual(info['etag'], md5)
|
|
|
|
self.assertEqual(info['content_type'], content_type)
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn('last_modified', info)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testDeleteOfFileThatDoesNotExist(self):
|
|
|
|
# in container that exists
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.delete)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
# in container that does not exist
|
|
|
|
container = self.env.account.container(Utils.create_name())
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = container.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.delete)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
def testHeadOnFileThatDoesNotExist(self):
|
|
|
|
# in container that exists
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.info)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
# in container that does not exist
|
|
|
|
container = self.env.account.container(Utils.create_name())
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = container.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.info)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
def testMetadataOnPost(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
file_item.write_random(self.env.file_size)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
for i in range(10):
|
|
|
|
metadata = {}
|
2013-08-13 21:57:51 +08:00
|
|
|
for j in range(10):
|
2012-10-23 09:48:24 +02:00
|
|
|
metadata[Utils.create_ascii_name()] = Utils.create_name()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.metadata = metadata
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.sync_metadata())
|
2018-10-10 09:01:14 -07:00
|
|
|
self.assert_status(202)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(file_item.name)
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.initialize())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(200)
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(file_item.metadata, metadata)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testGetContentType(self):
|
|
|
|
file_name = Utils.create_name()
|
|
|
|
content_type = Utils.create_name()
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(file_name)
|
|
|
|
file_item.content_type = content_type
|
|
|
|
file_item.write_random()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(file_name)
|
|
|
|
file_item.read()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(content_type, file_item.content_type)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testGetOnFileThatDoesNotExist(self):
|
|
|
|
# in container that exists
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.read)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
# in container that does not exist
|
|
|
|
container = self.env.account.container(Utils.create_name())
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = container.file(Utils.create_name())
|
|
|
|
self.assertRaises(ResponseError, file_item.read)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
def testPostOnFileThatDoesNotExist(self):
|
|
|
|
# in container that exists
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
file_item.metadata['Field'] = 'Value'
|
|
|
|
self.assertRaises(ResponseError, file_item.sync_metadata)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
# in container that does not exist
|
|
|
|
container = self.env.account.container(Utils.create_name())
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = container.file(Utils.create_name())
|
|
|
|
file_item.metadata['Field'] = 'Value'
|
|
|
|
self.assertRaises(ResponseError, file_item.sync_metadata)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(404)
|
|
|
|
|
|
|
|
def testMetadataOnPut(self):
|
|
|
|
for i in range(10):
|
|
|
|
metadata = {}
|
|
|
|
for j in range(10):
|
2012-10-23 09:48:24 +02:00
|
|
|
metadata[Utils.create_ascii_name()] = Utils.create_name()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
|
|
|
file_item.metadata = metadata
|
|
|
|
file_item.write_random(self.env.file_size)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(file_item.name)
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.initialize())
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(200)
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(file_item.metadata, metadata)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testSerialization(self):
|
|
|
|
container = self.env.account.container(Utils.create_name())
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(container.create())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
files = []
|
|
|
|
for i in (0, 1, 10, 100, 1000, 10000):
|
2012-09-03 23:30:52 +08:00
|
|
|
files.append({'name': Utils.create_name(),
|
|
|
|
'content_type': Utils.create_name(), 'bytes': i})
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
write_time = time.time()
|
|
|
|
for f in files:
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = container.file(f['name'])
|
|
|
|
file_item.content_type = f['content_type']
|
|
|
|
file_item.write_random(f['bytes'])
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
f['hash'] = file_item.md5
|
2010-07-12 17:03:45 -05:00
|
|
|
f['json'] = False
|
|
|
|
f['xml'] = False
|
|
|
|
write_time = time.time() - write_time
|
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in ['json', 'xml']:
|
|
|
|
for file_item in container.files(parms={'format': format_type}):
|
2010-07-12 17:03:45 -05:00
|
|
|
found = False
|
|
|
|
for f in files:
|
2013-08-04 11:15:53 +08:00
|
|
|
if f['name'] != file_item['name']:
|
2010-07-12 17:03:45 -05:00
|
|
|
continue
|
|
|
|
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(file_item['content_type'],
|
|
|
|
f['content_type'])
|
|
|
|
self.assertEqual(int(file_item['bytes']), f['bytes'])
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
d = datetime.strptime(
|
|
|
|
file_item['last_modified'].split('.')[0],
|
|
|
|
"%Y-%m-%dT%H:%M:%S")
|
2010-07-12 17:03:45 -05:00
|
|
|
lm = time.mktime(d.timetuple())
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
if 'last_modified' in f:
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(f['last_modified'], lm)
|
2010-07-12 17:03:45 -05:00
|
|
|
else:
|
2012-09-03 23:30:52 +08:00
|
|
|
f['last_modified'] = lm
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
f[format_type] = True
|
2010-07-12 17:03:45 -05:00
|
|
|
found = True
|
|
|
|
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(
|
|
|
|
found, 'Unexpected file %s found in '
|
|
|
|
'%s listing' % (file_item['name'], format_type))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2019-03-01 12:43:42 -08:00
|
|
|
headers = dict((h.lower(), v)
|
|
|
|
for h, v in self.env.conn.response.getheaders())
|
2013-08-04 11:15:53 +08:00
|
|
|
if format_type == 'json':
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(headers['content-type'],
|
|
|
|
'application/json; charset=utf-8')
|
2013-08-04 11:15:53 +08:00
|
|
|
elif format_type == 'xml':
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(headers['content-type'],
|
|
|
|
'application/xml; charset=utf-8')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
lm_diff = max([f['last_modified'] for f in files]) -\
|
2010-07-12 17:03:45 -05:00
|
|
|
min([f['last_modified'] for f in files])
|
2016-07-15 14:02:38 +02:00
|
|
|
self.assertLess(lm_diff, write_time + 1,
|
|
|
|
'Diff in last modified times '
|
|
|
|
'should be less than time to write files')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
for f in files:
|
2013-08-04 11:15:53 +08:00
|
|
|
for format_type in ['json', 'xml']:
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(
|
|
|
|
f[format_type], 'File %s not found in %s listing'
|
|
|
|
% (f['name'], format_type))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testStackedOverwrite(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
for i in range(1, 11):
|
2013-08-04 11:15:53 +08:00
|
|
|
data = file_item.write_random(512)
|
|
|
|
file_item.write(data)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2016-07-15 14:02:38 +02:00
|
|
|
self.assertEqual(file_item.read(), data)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testZeroByteFile(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2019-03-01 12:13:27 -08:00
|
|
|
self.assertTrue(file_item.write(b''))
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(file_item.name, self.env.container.files())
|
2019-03-01 12:13:27 -08:00
|
|
|
self.assertEqual(file_item.read(), b'')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testEtagResponse(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2019-10-14 14:03:16 -07:00
|
|
|
data = io.BytesIO(file_item.write_random(512))
|
2010-07-12 17:03:45 -05:00
|
|
|
etag = File.compute_md5sum(data)
|
|
|
|
|
2019-03-01 12:43:42 -08:00
|
|
|
headers = dict((h.lower(), v)
|
|
|
|
for h, v in self.env.conn.response.getheaders())
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn('etag', headers.keys())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
header_etag = headers['etag'].strip('"')
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(etag, header_etag)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testChunkedPut(self):
|
2014-03-31 23:22:49 -04:00
|
|
|
if (tf.web_front_end == 'apache2'):
|
2019-03-01 12:13:27 -08:00
|
|
|
raise SkipTest("Chunked PUT cannot be tested with apache2 web "
|
|
|
|
"front end")
|
2014-04-07 13:01:44 -04:00
|
|
|
|
|
|
|
def chunks(s, length=3):
|
|
|
|
i, j = 0, length
|
|
|
|
while i < len(s):
|
|
|
|
yield s[i:j]
|
|
|
|
i, j = j, j + length
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
data = File.random_data(10000)
|
|
|
|
etag = File.compute_md5sum(data)
|
|
|
|
|
|
|
|
for i in (1, 10, 100, 1000):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = self.env.container.file(Utils.create_name())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
for j in chunks(data, i):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item.chunked_write(j)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.chunked_write())
|
2016-07-15 14:02:38 +02:00
|
|
|
self.assertEqual(data, file_item.read())
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
info = file_item.info()
|
2014-02-26 17:48:33 +08:00
|
|
|
self.assertEqual(etag, info['etag'])
|
2010-07-12 17:03:45 -05:00
|
|
|
|
Update container on fast-POST
This patch makes a number of changes to enable content-type
metadata to be updated when using the fast-POST mode of
operation, as proposed in the associated spec [1].
* the object server and diskfile are modified to allow
content-type to be updated by a POST and the updated value
to be stored in .meta files.
* the object server accepts PUTs and DELETEs with older
timestamps than existing .meta files. This is to be
consistent with replication that will leave a later .meta
file in place when replicating a .data file.
* the diskfile interface is modified to provide accessor
methods for the content-type and its timestamp.
* the naming of .meta files is modified to encode two
timestamps when the .meta file contains a content-type value
that was set prior to the latest metadata update; this
enables consistency to be achieved when rsync is used for
replication.
* ssync is modified to sync meta files when content-type
differs between local and remote copies of objects.
* the object server issues container updates when handling
POST requests, notifying the container server of the current
immutable metadata (etag, size, hash, swift_bytes),
content-type with their respective timestamps, and the
mutable metadata timestamp.
* the container server maintains the most recently reported
values for immutable metadata, content-type and mutable
metadata, each with their respective timestamps, in a single
db row.
* new probe tests verify that replication achieves eventual
consistency of containers and objects after discrete updates
to content-type and mutable metadata, and that container-sync
sync's objects after fast-post updates.
[1] spec change-id: I60688efc3df692d3a39557114dca8c5490f7837e
Change-Id: Ia597cd460bb5fd40aa92e886e3e18a7542603d01
2015-08-10 10:30:10 -05:00
|
|
|
def test_POST(self):
|
|
|
|
# verify consistency between object and container listing metadata
|
|
|
|
file_name = Utils.create_name()
|
|
|
|
file_item = self.env.container.file(file_name)
|
|
|
|
file_item.content_type = 'text/foobar'
|
|
|
|
file_item.write_random(1024)
|
|
|
|
|
|
|
|
# sanity check
|
|
|
|
file_item = self.env.container.file(file_name)
|
|
|
|
file_item.initialize()
|
|
|
|
self.assertEqual('text/foobar', file_item.content_type)
|
|
|
|
self.assertEqual(1024, file_item.size)
|
|
|
|
etag = file_item.etag
|
|
|
|
|
|
|
|
# check container listing is consistent
|
|
|
|
listing = self.env.container.files(parms={'format': 'json'})
|
|
|
|
for f_dict in listing:
|
|
|
|
if f_dict['name'] == file_name:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
self.fail('Failed to find file %r in listing' % file_name)
|
|
|
|
self.assertEqual(1024, f_dict['bytes'])
|
|
|
|
self.assertEqual('text/foobar', f_dict['content_type'])
|
|
|
|
self.assertEqual(etag, f_dict['hash'])
|
2017-09-12 06:20:11 +00:00
|
|
|
put_last_modified = f_dict['last_modified']
|
Update container on fast-POST
This patch makes a number of changes to enable content-type
metadata to be updated when using the fast-POST mode of
operation, as proposed in the associated spec [1].
* the object server and diskfile are modified to allow
content-type to be updated by a POST and the updated value
to be stored in .meta files.
* the object server accepts PUTs and DELETEs with older
timestamps than existing .meta files. This is to be
consistent with replication that will leave a later .meta
file in place when replicating a .data file.
* the diskfile interface is modified to provide accessor
methods for the content-type and its timestamp.
* the naming of .meta files is modified to encode two
timestamps when the .meta file contains a content-type value
that was set prior to the latest metadata update; this
enables consistency to be achieved when rsync is used for
replication.
* ssync is modified to sync meta files when content-type
differs between local and remote copies of objects.
* the object server issues container updates when handling
POST requests, notifying the container server of the current
immutable metadata (etag, size, hash, swift_bytes),
content-type with their respective timestamps, and the
mutable metadata timestamp.
* the container server maintains the most recently reported
values for immutable metadata, content-type and mutable
metadata, each with their respective timestamps, in a single
db row.
* new probe tests verify that replication achieves eventual
consistency of containers and objects after discrete updates
to content-type and mutable metadata, and that container-sync
sync's objects after fast-post updates.
[1] spec change-id: I60688efc3df692d3a39557114dca8c5490f7837e
Change-Id: Ia597cd460bb5fd40aa92e886e3e18a7542603d01
2015-08-10 10:30:10 -05:00
|
|
|
|
|
|
|
# now POST updated content-type to each file
|
|
|
|
file_item = self.env.container.file(file_name)
|
|
|
|
file_item.content_type = 'image/foobarbaz'
|
|
|
|
file_item.sync_metadata({'Test': 'blah'})
|
|
|
|
|
|
|
|
# sanity check object metadata
|
|
|
|
file_item = self.env.container.file(file_name)
|
|
|
|
file_item.initialize()
|
|
|
|
|
|
|
|
self.assertEqual(1024, file_item.size)
|
|
|
|
self.assertEqual('image/foobarbaz', file_item.content_type)
|
|
|
|
self.assertEqual(etag, file_item.etag)
|
|
|
|
self.assertIn('test', file_item.metadata)
|
|
|
|
|
|
|
|
# check for consistency between object and container listing
|
|
|
|
listing = self.env.container.files(parms={'format': 'json'})
|
|
|
|
for f_dict in listing:
|
|
|
|
if f_dict['name'] == file_name:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
self.fail('Failed to find file %r in listing' % file_name)
|
|
|
|
self.assertEqual(1024, f_dict['bytes'])
|
|
|
|
self.assertEqual('image/foobarbaz', f_dict['content_type'])
|
2017-09-12 06:20:11 +00:00
|
|
|
self.assertLess(put_last_modified, f_dict['last_modified'])
|
Update container on fast-POST
This patch makes a number of changes to enable content-type
metadata to be updated when using the fast-POST mode of
operation, as proposed in the associated spec [1].
* the object server and diskfile are modified to allow
content-type to be updated by a POST and the updated value
to be stored in .meta files.
* the object server accepts PUTs and DELETEs with older
timestamps than existing .meta files. This is to be
consistent with replication that will leave a later .meta
file in place when replicating a .data file.
* the diskfile interface is modified to provide accessor
methods for the content-type and its timestamp.
* the naming of .meta files is modified to encode two
timestamps when the .meta file contains a content-type value
that was set prior to the latest metadata update; this
enables consistency to be achieved when rsync is used for
replication.
* ssync is modified to sync meta files when content-type
differs between local and remote copies of objects.
* the object server issues container updates when handling
POST requests, notifying the container server of the current
immutable metadata (etag, size, hash, swift_bytes),
content-type with their respective timestamps, and the
mutable metadata timestamp.
* the container server maintains the most recently reported
values for immutable metadata, content-type and mutable
metadata, each with their respective timestamps, in a single
db row.
* new probe tests verify that replication achieves eventual
consistency of containers and objects after discrete updates
to content-type and mutable metadata, and that container-sync
sync's objects after fast-post updates.
[1] spec change-id: I60688efc3df692d3a39557114dca8c5490f7837e
Change-Id: Ia597cd460bb5fd40aa92e886e3e18a7542603d01
2015-08-10 10:30:10 -05:00
|
|
|
self.assertEqual(etag, f_dict['hash'])
|
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestFileUTF8(Base2, TestFile):
|
2016-07-25 13:50:24 +01:00
|
|
|
pass
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2016-07-25 13:50:24 +01:00
|
|
|
class TestFileComparisonEnv(BaseEnv):
|
2010-07-12 17:03:45 -05:00
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2016-07-25 13:50:24 +01:00
|
|
|
super(TestFileComparisonEnv, cls).setUp()
|
2010-07-12 17:03:45 -05:00
|
|
|
cls.container = cls.account.container(Utils.create_name())
|
|
|
|
|
|
|
|
if not cls.container.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
cls.file_count = 20
|
|
|
|
cls.file_size = 128
|
|
|
|
cls.files = list()
|
|
|
|
for x in range(cls.file_count):
|
2013-08-04 11:15:53 +08:00
|
|
|
file_item = cls.container.file(Utils.create_name())
|
|
|
|
file_item.write_random(cls.file_size)
|
|
|
|
cls.files.append(file_item)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-11-21 11:14:34 +08:00
|
|
|
cls.time_old_f1 = time.strftime("%a, %d %b %Y %H:%M:%S GMT",
|
|
|
|
time.gmtime(time.time() - 86400))
|
|
|
|
cls.time_old_f2 = time.strftime("%A, %d-%b-%y %H:%M:%S GMT",
|
|
|
|
time.gmtime(time.time() - 86400))
|
|
|
|
cls.time_old_f3 = time.strftime("%a %b %d %H:%M:%S %Y",
|
|
|
|
time.gmtime(time.time() - 86400))
|
|
|
|
cls.time_new = time.strftime("%a, %d %b %Y %H:%M:%S GMT",
|
|
|
|
time.gmtime(time.time() + 86400))
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
class TestFileComparison(Base):
|
|
|
|
env = TestFileComparisonEnv
|
|
|
|
|
|
|
|
def testIfMatch(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in self.env.files:
|
|
|
|
hdrs = {'If-Match': file_item.md5}
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.read(hdrs=hdrs))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
hdrs = {'If-Match': 'bogus'}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assert_header('etag', file_item.md5)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2016-01-12 11:46:21 -06:00
|
|
|
def testIfMatchMultipleEtags(self):
|
|
|
|
for file_item in self.env.files:
|
|
|
|
hdrs = {'If-Match': '"bogus1", "%s", "bogus2"' % file_item.md5}
|
|
|
|
self.assertTrue(file_item.read(hdrs=hdrs))
|
|
|
|
|
|
|
|
hdrs = {'If-Match': '"bogus1", "bogus2", "bogus3"'}
|
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
|
|
|
self.assert_status(412)
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assert_header('etag', file_item.md5)
|
2016-01-12 11:46:21 -06:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def testIfNoneMatch(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in self.env.files:
|
2010-07-12 17:03:45 -05:00
|
|
|
hdrs = {'If-None-Match': 'bogus'}
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.read(hdrs=hdrs))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
hdrs = {'If-None-Match': file_item.md5}
|
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(304)
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assert_header('etag', file_item.md5)
|
|
|
|
self.assert_header('accept-ranges', 'bytes')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2016-01-12 11:46:21 -06:00
|
|
|
def testIfNoneMatchMultipleEtags(self):
|
|
|
|
for file_item in self.env.files:
|
|
|
|
hdrs = {'If-None-Match': '"bogus1", "bogus2", "bogus3"'}
|
|
|
|
self.assertTrue(file_item.read(hdrs=hdrs))
|
|
|
|
|
|
|
|
hdrs = {'If-None-Match':
|
|
|
|
'"bogus1", "bogus2", "%s"' % file_item.md5}
|
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
|
|
|
self.assert_status(304)
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assert_header('etag', file_item.md5)
|
|
|
|
self.assert_header('accept-ranges', 'bytes')
|
2016-01-12 11:46:21 -06:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def testIfModifiedSince(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in self.env.files:
|
2013-11-21 11:14:34 +08:00
|
|
|
hdrs = {'If-Modified-Since': self.env.time_old_f1}
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.read(hdrs=hdrs))
|
|
|
|
self.assertTrue(file_item.info(hdrs=hdrs))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
hdrs = {'If-Modified-Since': self.env.time_new}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(304)
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assert_header('etag', file_item.md5)
|
|
|
|
self.assert_header('accept-ranges', 'bytes')
|
2014-03-04 11:52:48 -08:00
|
|
|
self.assertRaises(ResponseError, file_item.info, hdrs=hdrs)
|
|
|
|
self.assert_status(304)
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assert_header('etag', file_item.md5)
|
|
|
|
self.assert_header('accept-ranges', 'bytes')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testIfUnmodifiedSince(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in self.env.files:
|
2010-07-12 17:03:45 -05:00
|
|
|
hdrs = {'If-Unmodified-Since': self.env.time_new}
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.read(hdrs=hdrs))
|
|
|
|
self.assertTrue(file_item.info(hdrs=hdrs))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-11-21 11:14:34 +08:00
|
|
|
hdrs = {'If-Unmodified-Since': self.env.time_old_f2}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assert_header('etag', file_item.md5)
|
2014-03-04 11:52:48 -08:00
|
|
|
self.assertRaises(ResponseError, file_item.info, hdrs=hdrs)
|
|
|
|
self.assert_status(412)
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assert_header('etag', file_item.md5)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def testIfMatchAndUnmodified(self):
|
2013-08-04 11:15:53 +08:00
|
|
|
for file_item in self.env.files:
|
|
|
|
hdrs = {'If-Match': file_item.md5,
|
2012-09-03 23:30:52 +08:00
|
|
|
'If-Unmodified-Since': self.env.time_new}
|
2015-07-21 18:06:32 +05:30
|
|
|
self.assertTrue(file_item.read(hdrs=hdrs))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
hdrs = {'If-Match': 'bogus',
|
|
|
|
'If-Unmodified-Since': self.env.time_new}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assert_header('etag', file_item.md5)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-04 11:15:53 +08:00
|
|
|
hdrs = {'If-Match': file_item.md5,
|
2013-11-21 11:14:34 +08:00
|
|
|
'If-Unmodified-Since': self.env.time_old_f3}
|
2013-08-04 11:15:53 +08:00
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assert_status(412)
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assert_header('etag', file_item.md5)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-11-07 04:45:27 +00:00
|
|
|
def testLastModified(self):
|
|
|
|
file_name = Utils.create_name()
|
|
|
|
content_type = Utils.create_name()
|
|
|
|
|
2016-03-16 17:41:30 +00:00
|
|
|
file_item = self.env.container.file(file_name)
|
|
|
|
file_item.content_type = content_type
|
|
|
|
resp = file_item.write_random_return_resp(self.env.file_size)
|
2013-11-07 04:45:27 +00:00
|
|
|
put_last_modified = resp.getheader('last-modified')
|
2016-03-16 17:41:30 +00:00
|
|
|
etag = file_item.md5
|
2013-11-07 04:45:27 +00:00
|
|
|
|
2016-03-16 17:41:30 +00:00
|
|
|
file_item = self.env.container.file(file_name)
|
|
|
|
info = file_item.info()
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn('last_modified', info)
|
2013-11-07 04:45:27 +00:00
|
|
|
last_modified = info['last_modified']
|
|
|
|
self.assertEqual(put_last_modified, info['last_modified'])
|
|
|
|
|
|
|
|
hdrs = {'If-Modified-Since': last_modified}
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
|
2013-11-07 04:45:27 +00:00
|
|
|
self.assert_status(304)
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assert_header('etag', etag)
|
|
|
|
self.assert_header('accept-ranges', 'bytes')
|
2013-11-07 04:45:27 +00:00
|
|
|
|
|
|
|
hdrs = {'If-Unmodified-Since': last_modified}
|
2016-03-16 17:41:30 +00:00
|
|
|
self.assertTrue(file_item.read(hdrs=hdrs))
|
2013-11-07 04:45:27 +00:00
|
|
|
|
2012-09-03 23:30:52 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestFileComparisonUTF8(Base2, TestFileComparison):
|
2016-07-25 13:50:24 +01:00
|
|
|
pass
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-11-18 13:17:48 -08:00
|
|
|
|
2015-12-16 15:28:25 +00:00
|
|
|
class TestServiceToken(unittest2.TestCase):
|
2014-11-25 14:42:42 +00:00
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
if tf.skip_service_tokens:
|
|
|
|
raise SkipTest
|
|
|
|
|
Add checksum to object extended attributes
Currently, our integrity checking for objects is pretty weak when it
comes to object metadata. If the extended attributes on a .data or
.meta file get corrupted in such a way that we can still unpickle it,
we don't have anything that detects that.
This could be especially bad with encrypted etags; if the encrypted
etag (X-Object-Sysmeta-Crypto-Etag or whatever it is) gets some bits
flipped, then we'll cheerfully decrypt the cipherjunk into plainjunk,
then send it to the client. Net effect is that the client sees a GET
response with an ETag that doesn't match the MD5 of the object *and*
Swift has no way of detecting and quarantining this object.
Note that, with an unencrypted object, if the ETag metadatum gets
mangled, then the object will be quarantined by the object server or
auditor, whichever notices first.
As part of this commit, I also ripped out some mocking of
getxattr/setxattr in tests. It appears to be there to allow unit tests
to run on systems where /tmp doesn't support xattrs. However, since
the mock is keyed off of inode number and inode numbers get re-used,
there's lots of leakage between different test runs. On a real FS,
unlinking a file and then creating a new one of the same name will
also reset the xattrs; this isn't the case with the mock.
The mock was pretty old; Ubuntu 12.04 and up all support xattrs in
/tmp, and recent Red Hat / CentOS releases do too. The xattr mock was
added in 2011; maybe it was to support Ubuntu Lucid Lynx?
Bonus: now you can pause a test with the debugger, inspect its files
in /tmp, and actually see the xattrs along with the data.
Since this patch now uses a real filesystem for testing filesystem
operations, tests are skipped if the underlying filesystem does not
support setting xattrs (eg tmpfs or more than 4k of xattrs on ext4).
References to "/tmp" have been replaced with calls to
tempfile.gettempdir(). This will allow setting the TMPDIR envvar in
test setup and getting an XFS filesystem instead of ext4 or tmpfs.
THIS PATCH SIGNIFICANTLY CHANGES TESTING ENVIRONMENTS
With this patch, every test environment will require TMPDIR to be
using a filesystem that supports at least 4k of extended attributes.
Neither ext4 nor tempfs support this. XFS is recommended.
So why all the SkipTests? Why not simply raise an error? We still need
the tests to run on the base image for OpenStack's CI system. Since
we were previously mocking out xattr, there wasn't a problem, but we
also weren't actually testing anything. This patch adds functionality
to validate xattr data, so we need to drop the mock.
`test.unit.skip_if_no_xattrs()` is also imported into `test.functional`
so that functional tests can import it from the functional test
namespace.
The related OpenStack CI infrastructure changes are made in
https://review.openstack.org/#/c/394600/.
Co-Authored-By: John Dickinson <me@not.mn>
Change-Id: I98a37c0d451f4960b7a12f648e4405c6c6716808
2016-06-30 16:52:58 -07:00
|
|
|
if tf.in_process:
|
|
|
|
tf.skip_if_no_xattrs()
|
|
|
|
|
2014-11-25 14:42:42 +00:00
|
|
|
self.SET_TO_USERS_TOKEN = 1
|
|
|
|
self.SET_TO_SERVICE_TOKEN = 2
|
|
|
|
|
|
|
|
# keystoneauth and tempauth differ in allowing PUT account
|
|
|
|
# Even if keystoneauth allows it, the proxy-server uses
|
|
|
|
# allow_account_management to decide if accounts can be created
|
|
|
|
self.put_account_expect = is_client_error
|
|
|
|
if tf.swift_test_auth_version != '1':
|
|
|
|
if cluster_info.get('swift').get('allow_account_management'):
|
|
|
|
self.put_account_expect = is_success
|
|
|
|
|
|
|
|
def _scenario_generator(self):
|
|
|
|
paths = ((None, None), ('c', None), ('c', 'o'))
|
|
|
|
for path in paths:
|
|
|
|
for method in ('PUT', 'POST', 'HEAD', 'GET', 'OPTIONS'):
|
|
|
|
yield method, path[0], path[1]
|
|
|
|
for path in reversed(paths):
|
|
|
|
yield 'DELETE', path[0], path[1]
|
|
|
|
|
|
|
|
def _assert_is_authed_response(self, method, container, object, resp):
|
|
|
|
resp.read()
|
|
|
|
expect = is_success
|
|
|
|
if method == 'DELETE' and not container:
|
|
|
|
expect = is_client_error
|
|
|
|
if method == 'PUT' and not container:
|
|
|
|
expect = self.put_account_expect
|
|
|
|
self.assertTrue(expect(resp.status), 'Unexpected %s for %s %s %s'
|
|
|
|
% (resp.status, method, container, object))
|
|
|
|
|
|
|
|
def _assert_not_authed_response(self, method, container, object, resp):
|
|
|
|
resp.read()
|
|
|
|
expect = is_client_error
|
|
|
|
if method == 'OPTIONS':
|
|
|
|
expect = is_success
|
|
|
|
self.assertTrue(expect(resp.status), 'Unexpected %s for %s %s %s'
|
|
|
|
% (resp.status, method, container, object))
|
|
|
|
|
|
|
|
def prepare_request(self, method, use_service_account=False,
|
|
|
|
container=None, obj=None, body=None, headers=None,
|
|
|
|
x_auth_token=None,
|
|
|
|
x_service_token=None, dbg=False):
|
|
|
|
"""
|
|
|
|
Setup for making the request
|
|
|
|
|
|
|
|
When retry() calls the do_request() function, it calls it the
|
|
|
|
test user's token, the parsed path, a connection and (optionally)
|
|
|
|
a token from the test service user. We save options here so that
|
|
|
|
do_request() can make the appropriate request.
|
|
|
|
|
2016-01-25 09:25:20 -08:00
|
|
|
:param method: The operation (e.g. 'HEAD')
|
2014-11-25 14:42:42 +00:00
|
|
|
:param use_service_account: Optional. Set True to change the path to
|
|
|
|
be the service account
|
|
|
|
:param container: Optional. Adds a container name to the path
|
|
|
|
:param obj: Optional. Adds an object name to the path
|
|
|
|
:param body: Optional. Adds a body (string) in the request
|
|
|
|
:param headers: Optional. Adds additional headers.
|
|
|
|
:param x_auth_token: Optional. Default is SET_TO_USERS_TOKEN. One of:
|
|
|
|
SET_TO_USERS_TOKEN Put the test user's token in
|
|
|
|
X-Auth-Token
|
|
|
|
SET_TO_SERVICE_TOKEN Put the service token in X-Auth-Token
|
|
|
|
:param x_service_token: Optional. Default is to not set X-Service-Token
|
|
|
|
to any value. If specified, is one of following:
|
|
|
|
SET_TO_USERS_TOKEN Put the test user's token in
|
|
|
|
X-Service-Token
|
|
|
|
SET_TO_SERVICE_TOKEN Put the service token in
|
|
|
|
X-Service-Token
|
|
|
|
:param dbg: Optional. Set true to check request arguments
|
|
|
|
"""
|
|
|
|
self.method = method
|
|
|
|
self.use_service_account = use_service_account
|
|
|
|
self.container = container
|
|
|
|
self.obj = obj
|
|
|
|
self.body = body
|
|
|
|
self.headers = headers
|
|
|
|
if x_auth_token:
|
|
|
|
self.x_auth_token = x_auth_token
|
|
|
|
else:
|
|
|
|
self.x_auth_token = self.SET_TO_USERS_TOKEN
|
|
|
|
self.x_service_token = x_service_token
|
|
|
|
self.dbg = dbg
|
|
|
|
|
|
|
|
def do_request(self, url, token, parsed, conn, service_token=''):
|
|
|
|
if self.use_service_account:
|
|
|
|
path = self._service_account(parsed.path)
|
|
|
|
else:
|
|
|
|
path = parsed.path
|
|
|
|
if self.container:
|
|
|
|
path += '/%s' % self.container
|
|
|
|
if self.obj:
|
|
|
|
path += '/%s' % self.obj
|
|
|
|
headers = {}
|
|
|
|
if self.body:
|
|
|
|
headers.update({'Content-Length': len(self.body)})
|
|
|
|
if self.x_auth_token == self.SET_TO_USERS_TOKEN:
|
|
|
|
headers.update({'X-Auth-Token': token})
|
|
|
|
elif self.x_auth_token == self.SET_TO_SERVICE_TOKEN:
|
|
|
|
headers.update({'X-Auth-Token': service_token})
|
|
|
|
if self.x_service_token == self.SET_TO_USERS_TOKEN:
|
|
|
|
headers.update({'X-Service-Token': token})
|
|
|
|
elif self.x_service_token == self.SET_TO_SERVICE_TOKEN:
|
|
|
|
headers.update({'X-Service-Token': service_token})
|
|
|
|
if self.dbg:
|
|
|
|
print('DEBUG: conn.request: method:%s path:%s'
|
|
|
|
' body:%s headers:%s' % (self.method, path, self.body,
|
|
|
|
headers))
|
|
|
|
conn.request(self.method, path, self.body, headers=headers)
|
|
|
|
return check_response(conn)
|
|
|
|
|
|
|
|
def _service_account(self, path):
|
|
|
|
parts = path.split('/', 3)
|
|
|
|
account = parts[2]
|
|
|
|
try:
|
|
|
|
project_id = account[account.index('_') + 1:]
|
|
|
|
except ValueError:
|
|
|
|
project_id = account
|
|
|
|
parts[2] = '%s%s' % (tf.swift_test_service_prefix, project_id)
|
|
|
|
return '/'.join(parts)
|
|
|
|
|
|
|
|
def test_user_access_own_auth_account(self):
|
|
|
|
# This covers ground tested elsewhere (tests a user doing HEAD
|
|
|
|
# on own account). However, if this fails, none of the remaining
|
|
|
|
# tests will work
|
|
|
|
self.prepare_request('HEAD')
|
|
|
|
resp = retry(self.do_request)
|
|
|
|
resp.read()
|
2015-07-22 15:40:55 -07:00
|
|
|
self.assertIn(resp.status, (200, 204))
|
2014-11-25 14:42:42 +00:00
|
|
|
|
|
|
|
def test_user_cannot_access_service_account(self):
|
|
|
|
for method, container, obj in self._scenario_generator():
|
|
|
|
self.prepare_request(method, use_service_account=True,
|
|
|
|
container=container, obj=obj)
|
|
|
|
resp = retry(self.do_request)
|
|
|
|
self._assert_not_authed_response(method, container, obj, resp)
|
|
|
|
|
|
|
|
def test_service_user_denied_with_x_auth_token(self):
|
|
|
|
for method, container, obj in self._scenario_generator():
|
|
|
|
self.prepare_request(method, use_service_account=True,
|
|
|
|
container=container, obj=obj,
|
|
|
|
x_auth_token=self.SET_TO_SERVICE_TOKEN)
|
|
|
|
resp = retry(self.do_request, service_user=5)
|
|
|
|
self._assert_not_authed_response(method, container, obj, resp)
|
|
|
|
|
|
|
|
def test_service_user_denied_with_x_service_token(self):
|
|
|
|
for method, container, obj in self._scenario_generator():
|
|
|
|
self.prepare_request(method, use_service_account=True,
|
|
|
|
container=container, obj=obj,
|
|
|
|
x_auth_token=self.SET_TO_SERVICE_TOKEN,
|
|
|
|
x_service_token=self.SET_TO_SERVICE_TOKEN)
|
|
|
|
resp = retry(self.do_request, service_user=5)
|
|
|
|
self._assert_not_authed_response(method, container, obj, resp)
|
|
|
|
|
|
|
|
def test_user_plus_service_can_access_service_account(self):
|
|
|
|
for method, container, obj in self._scenario_generator():
|
|
|
|
self.prepare_request(method, use_service_account=True,
|
|
|
|
container=container, obj=obj,
|
|
|
|
x_auth_token=self.SET_TO_USERS_TOKEN,
|
|
|
|
x_service_token=self.SET_TO_SERVICE_TOKEN)
|
|
|
|
resp = retry(self.do_request, service_user=5)
|
|
|
|
self._assert_is_authed_response(method, container, obj, resp)
|
|
|
|
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
if __name__ == '__main__':
|
2015-12-16 15:28:25 +00:00
|
|
|
unittest2.main()
|