2016-11-17 17:17:00 -05:00
|
|
|
#!/usr/bin/python -u
|
|
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
|
|
# implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
2018-05-24 13:00:58 -07:00
|
|
|
from copy import deepcopy
|
2016-11-17 17:17:00 -05:00
|
|
|
import json
|
|
|
|
import time
|
2020-01-12 03:10:25 -06:00
|
|
|
import unittest
|
2019-08-09 15:59:59 -07:00
|
|
|
import six
|
2018-06-01 15:38:10 -07:00
|
|
|
from six.moves.urllib.parse import quote, unquote
|
2016-11-17 17:17:00 -05:00
|
|
|
|
|
|
|
import test.functional as tf
|
2017-12-15 12:36:47 +00:00
|
|
|
|
|
|
|
from swift.common.utils import MD5_OF_EMPTY_STRING
|
2016-07-25 13:50:24 +01:00
|
|
|
from test.functional.tests import Base, Base2, BaseEnv, Utils
|
2017-12-18 09:33:40 -08:00
|
|
|
from test.functional import cluster_info, SkipTest
|
2016-11-17 17:17:00 -05:00
|
|
|
from test.functional.swift_test_client import Account, Connection, \
|
|
|
|
ResponseError
|
|
|
|
|
|
|
|
|
|
|
|
def setUpModule():
|
|
|
|
tf.setup_package()
|
|
|
|
|
|
|
|
|
|
|
|
def tearDownModule():
|
|
|
|
tf.teardown_package()
|
|
|
|
|
|
|
|
|
2016-07-25 13:50:24 +01:00
|
|
|
class TestObjectVersioningEnv(BaseEnv):
|
2016-11-17 17:17:00 -05:00
|
|
|
versioning_enabled = None # tri-state: None initially, then True/False
|
|
|
|
location_header_key = 'X-Versions-Location'
|
2016-07-25 13:50:24 +01:00
|
|
|
account2 = None
|
2016-11-17 17:17:00 -05:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2016-07-25 13:50:24 +01:00
|
|
|
super(TestObjectVersioningEnv, cls).setUp()
|
2018-02-20 14:15:31 -08:00
|
|
|
if not tf.skip2:
|
|
|
|
# Second connection for ACL tests
|
|
|
|
config2 = deepcopy(tf.config)
|
|
|
|
config2['account'] = tf.config['account2']
|
|
|
|
config2['username'] = tf.config['username2']
|
|
|
|
config2['password'] = tf.config['password2']
|
|
|
|
cls.conn2 = Connection(config2)
|
|
|
|
cls.conn2.authenticate()
|
2016-11-17 17:17:00 -05:00
|
|
|
|
2019-08-09 15:59:59 -07:00
|
|
|
if six.PY2:
|
|
|
|
# avoid getting a prefix that stops halfway through an encoded
|
|
|
|
# character
|
|
|
|
prefix = Utils.create_name().decode("utf-8")[:10].encode("utf-8")
|
|
|
|
else:
|
|
|
|
prefix = Utils.create_name()[:10]
|
2016-11-17 17:17:00 -05:00
|
|
|
|
|
|
|
cls.versions_container = cls.account.container(prefix + "-versions")
|
|
|
|
if not cls.versions_container.create():
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
cls.container = cls.account.container(prefix + "-objs")
|
|
|
|
container_headers = {
|
2018-05-24 13:00:58 -07:00
|
|
|
cls.location_header_key: quote(cls.versions_container.name)}
|
2016-11-17 17:17:00 -05:00
|
|
|
if not cls.container.create(hdrs=container_headers):
|
|
|
|
if cls.conn.response.status == 412:
|
|
|
|
cls.versioning_enabled = False
|
|
|
|
return
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
container_info = cls.container.info()
|
|
|
|
# if versioning is off, then cls.location_header_key won't persist
|
|
|
|
cls.versioning_enabled = 'versions' in container_info
|
|
|
|
|
2018-02-20 14:15:31 -08:00
|
|
|
if not tf.skip2:
|
|
|
|
# setup another account to test ACLs
|
|
|
|
config2 = deepcopy(tf.config)
|
|
|
|
config2['account'] = tf.config['account2']
|
|
|
|
config2['username'] = tf.config['username2']
|
|
|
|
config2['password'] = tf.config['password2']
|
|
|
|
cls.conn2 = Connection(config2)
|
|
|
|
cls.storage_url2, cls.storage_token2 = cls.conn2.authenticate()
|
|
|
|
cls.account2 = cls.conn2.get_account()
|
|
|
|
cls.account2.delete_containers()
|
|
|
|
|
|
|
|
if not tf.skip3:
|
|
|
|
# setup another account with no access to anything to test ACLs
|
|
|
|
config3 = deepcopy(tf.config)
|
|
|
|
config3['account'] = tf.config['account']
|
|
|
|
config3['username'] = tf.config['username3']
|
|
|
|
config3['password'] = tf.config['password3']
|
|
|
|
cls.conn3 = Connection(config3)
|
|
|
|
cls.storage_url3, cls.storage_token3 = cls.conn3.authenticate()
|
|
|
|
cls.account3 = cls.conn3.get_account()
|
2016-11-17 17:17:00 -05:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def tearDown(cls):
|
2016-07-25 13:50:24 +01:00
|
|
|
if cls.account:
|
|
|
|
cls.account.delete_containers()
|
|
|
|
if cls.account2:
|
|
|
|
cls.account2.delete_containers()
|
2016-11-17 17:17:00 -05:00
|
|
|
|
|
|
|
|
2016-07-25 13:50:24 +01:00
|
|
|
class TestCrossPolicyObjectVersioningEnv(BaseEnv):
|
2016-11-17 17:17:00 -05:00
|
|
|
# tri-state: None initially, then True/False
|
|
|
|
versioning_enabled = None
|
|
|
|
multiple_policies_enabled = None
|
|
|
|
policies = None
|
|
|
|
location_header_key = 'X-Versions-Location'
|
2016-07-25 13:50:24 +01:00
|
|
|
account2 = None
|
2016-11-17 17:17:00 -05:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def setUp(cls):
|
2016-07-25 13:50:24 +01:00
|
|
|
super(TestCrossPolicyObjectVersioningEnv, cls).setUp()
|
2016-11-17 17:17:00 -05:00
|
|
|
if cls.multiple_policies_enabled is None:
|
|
|
|
try:
|
|
|
|
cls.policies = tf.FunctionalStoragePolicyCollection.from_info()
|
|
|
|
except AssertionError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
if cls.policies and len(cls.policies) > 1:
|
|
|
|
cls.multiple_policies_enabled = True
|
|
|
|
else:
|
|
|
|
cls.multiple_policies_enabled = False
|
|
|
|
cls.versioning_enabled = True
|
|
|
|
# We don't actually know the state of versioning, but without
|
|
|
|
# multiple policies the tests should be skipped anyway. Claiming
|
|
|
|
# versioning support lets us report the right reason for skipping.
|
|
|
|
return
|
|
|
|
|
|
|
|
policy = cls.policies.select()
|
|
|
|
version_policy = cls.policies.exclude(name=policy['name']).select()
|
|
|
|
|
2018-02-20 14:15:31 -08:00
|
|
|
if not tf.skip2:
|
|
|
|
# Second connection for ACL tests
|
|
|
|
config2 = deepcopy(tf.config)
|
|
|
|
config2['account'] = tf.config['account2']
|
|
|
|
config2['username'] = tf.config['username2']
|
|
|
|
config2['password'] = tf.config['password2']
|
|
|
|
cls.conn2 = Connection(config2)
|
|
|
|
cls.conn2.authenticate()
|
2016-11-17 17:17:00 -05:00
|
|
|
|
2019-08-09 15:59:59 -07:00
|
|
|
if six.PY2:
|
|
|
|
# avoid getting a prefix that stops halfway through an encoded
|
|
|
|
# character
|
|
|
|
prefix = Utils.create_name().decode("utf-8")[:10].encode("utf-8")
|
|
|
|
else:
|
|
|
|
prefix = Utils.create_name()[:10]
|
2016-11-17 17:17:00 -05:00
|
|
|
|
|
|
|
cls.versions_container = cls.account.container(prefix + "-versions")
|
|
|
|
if not cls.versions_container.create(
|
|
|
|
{'X-Storage-Policy': policy['name']}):
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
cls.container = cls.account.container(prefix + "-objs")
|
|
|
|
if not cls.container.create(
|
|
|
|
hdrs={cls.location_header_key: cls.versions_container.name,
|
|
|
|
'X-Storage-Policy': version_policy['name']}):
|
|
|
|
if cls.conn.response.status == 412:
|
|
|
|
cls.versioning_enabled = False
|
|
|
|
return
|
|
|
|
raise ResponseError(cls.conn.response)
|
|
|
|
|
|
|
|
container_info = cls.container.info()
|
|
|
|
# if versioning is off, then X-Versions-Location won't persist
|
|
|
|
cls.versioning_enabled = 'versions' in container_info
|
|
|
|
|
2018-02-20 14:15:31 -08:00
|
|
|
if not tf.skip2:
|
|
|
|
# setup another account to test ACLs
|
|
|
|
config2 = deepcopy(tf.config)
|
|
|
|
config2['account'] = tf.config['account2']
|
|
|
|
config2['username'] = tf.config['username2']
|
|
|
|
config2['password'] = tf.config['password2']
|
|
|
|
cls.conn2 = Connection(config2)
|
|
|
|
cls.storage_url2, cls.storage_token2 = cls.conn2.authenticate()
|
|
|
|
cls.account2 = cls.conn2.get_account()
|
|
|
|
cls.account2.delete_containers()
|
|
|
|
|
|
|
|
if not tf.skip3:
|
|
|
|
# setup another account with no access to anything to test ACLs
|
|
|
|
config3 = deepcopy(tf.config)
|
|
|
|
config3['account'] = tf.config['account']
|
|
|
|
config3['username'] = tf.config['username3']
|
|
|
|
config3['password'] = tf.config['password3']
|
|
|
|
cls.conn3 = Connection(config3)
|
|
|
|
cls.storage_url3, cls.storage_token3 = cls.conn3.authenticate()
|
|
|
|
cls.account3 = cls.conn3.get_account()
|
2016-11-17 17:17:00 -05:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def tearDown(cls):
|
2016-07-25 13:50:24 +01:00
|
|
|
if cls.account:
|
|
|
|
cls.account.delete_containers()
|
|
|
|
if cls.account2:
|
|
|
|
cls.account2.delete_containers()
|
2016-11-17 17:17:00 -05:00
|
|
|
|
|
|
|
|
|
|
|
class TestObjectVersioningHistoryModeEnv(TestObjectVersioningEnv):
|
|
|
|
location_header_key = 'X-History-Location'
|
|
|
|
|
|
|
|
|
|
|
|
class TestObjectVersioning(Base):
|
|
|
|
env = TestObjectVersioningEnv
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
super(TestObjectVersioning, self).setUp()
|
|
|
|
if self.env.versioning_enabled is False:
|
|
|
|
raise SkipTest("Object versioning not enabled")
|
|
|
|
elif self.env.versioning_enabled is not True:
|
|
|
|
# just some sanity checking
|
|
|
|
raise Exception(
|
|
|
|
"Expected versioning_enabled to be True/False, got %r" %
|
|
|
|
(self.env.versioning_enabled,))
|
|
|
|
|
|
|
|
def _tear_down_files(self):
|
|
|
|
try:
|
|
|
|
# only delete files and not containers
|
|
|
|
# as they were configured in self.env
|
2017-12-12 21:39:54 -08:00
|
|
|
# get rid of any versions so they aren't restored
|
2016-11-17 17:17:00 -05:00
|
|
|
self.env.versions_container.delete_files()
|
2017-12-12 21:39:54 -08:00
|
|
|
# get rid of originals
|
2016-11-17 17:17:00 -05:00
|
|
|
self.env.container.delete_files()
|
2017-12-12 21:39:54 -08:00
|
|
|
# in history mode, deleted originals got copied to versions, so
|
|
|
|
# clear that again
|
2017-12-15 12:36:47 +00:00
|
|
|
self.env.versions_container.delete_files()
|
2016-11-17 17:17:00 -05:00
|
|
|
except ResponseError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
super(TestObjectVersioning, self).tearDown()
|
|
|
|
self._tear_down_files()
|
|
|
|
|
|
|
|
def test_clear_version_option(self):
|
|
|
|
# sanity
|
2018-05-24 13:00:58 -07:00
|
|
|
header_val = quote(self.env.versions_container.name)
|
|
|
|
self.assertEqual(self.env.container.info()['versions'], header_val)
|
2016-11-17 17:17:00 -05:00
|
|
|
self.env.container.update_metadata(
|
|
|
|
hdrs={self.env.location_header_key: ''})
|
|
|
|
self.assertIsNone(self.env.container.info().get('versions'))
|
|
|
|
|
|
|
|
# set location back to the way it was
|
|
|
|
self.env.container.update_metadata(
|
2018-05-24 13:00:58 -07:00
|
|
|
hdrs={self.env.location_header_key: header_val})
|
|
|
|
self.assertEqual(self.env.container.info()['versions'], header_val)
|
2016-11-17 17:17:00 -05:00
|
|
|
|
2018-03-22 19:26:24 +09:00
|
|
|
def _test_overwriting_setup(self, obj_name=None):
|
2016-11-17 17:17:00 -05:00
|
|
|
container = self.env.container
|
|
|
|
versions_container = self.env.versions_container
|
|
|
|
cont_info = container.info()
|
2018-05-24 13:00:58 -07:00
|
|
|
self.assertEqual(cont_info['versions'], quote(versions_container.name))
|
2017-03-15 21:43:18 +00:00
|
|
|
expected_content_types = []
|
2018-03-22 19:26:24 +09:00
|
|
|
obj_name = obj_name or Utils.create_name()
|
2016-11-17 17:17:00 -05:00
|
|
|
|
|
|
|
versioned_obj = container.file(obj_name)
|
|
|
|
put_headers = {'Content-Type': 'text/jibberish01',
|
|
|
|
'Content-Encoding': 'gzip',
|
|
|
|
'Content-Disposition': 'attachment; filename=myfile'}
|
2019-08-09 15:59:59 -07:00
|
|
|
versioned_obj.write(b"aaaaa", hdrs=put_headers)
|
2016-11-17 17:17:00 -05:00
|
|
|
obj_info = versioned_obj.info()
|
|
|
|
self.assertEqual('text/jibberish01', obj_info['content_type'])
|
2017-03-15 21:43:18 +00:00
|
|
|
expected_content_types.append('text/jibberish01')
|
2016-11-17 17:17:00 -05:00
|
|
|
|
|
|
|
# the allowed headers are configurable in object server, so we cannot
|
|
|
|
# assert that content-encoding or content-disposition get *copied* to
|
|
|
|
# the object version unless they were set on the original PUT, so
|
|
|
|
# populate expected_headers by making a HEAD on the original object
|
2019-08-09 15:59:59 -07:00
|
|
|
resp_headers = {
|
|
|
|
h.lower(): v for h, v in versioned_obj.conn.response.getheaders()}
|
2016-11-17 17:17:00 -05:00
|
|
|
expected_headers = {}
|
|
|
|
for k, v in put_headers.items():
|
|
|
|
if k.lower() in resp_headers:
|
|
|
|
expected_headers[k] = v
|
|
|
|
|
|
|
|
self.assertEqual(0, versions_container.info()['object_count'])
|
2019-08-09 15:59:59 -07:00
|
|
|
versioned_obj.write(b"bbbbb", hdrs={'Content-Type': 'text/jibberish02',
|
2016-11-17 17:17:00 -05:00
|
|
|
'X-Object-Meta-Foo': 'Bar'})
|
|
|
|
versioned_obj.initialize()
|
|
|
|
self.assertEqual(versioned_obj.content_type, 'text/jibberish02')
|
2017-03-15 21:43:18 +00:00
|
|
|
expected_content_types.append('text/jibberish02')
|
2016-11-17 17:17:00 -05:00
|
|
|
self.assertEqual(versioned_obj.metadata['foo'], 'Bar')
|
|
|
|
|
|
|
|
# the old version got saved off
|
|
|
|
self.assertEqual(1, versions_container.info()['object_count'])
|
|
|
|
versioned_obj_name = versions_container.files()[0]
|
|
|
|
prev_version = versions_container.file(versioned_obj_name)
|
|
|
|
prev_version.initialize()
|
2019-08-09 15:59:59 -07:00
|
|
|
self.assertEqual(b"aaaaa", prev_version.read())
|
2016-11-17 17:17:00 -05:00
|
|
|
self.assertEqual(prev_version.content_type, 'text/jibberish01')
|
|
|
|
|
2019-08-09 15:59:59 -07:00
|
|
|
resp_headers = {
|
|
|
|
h.lower(): v for h, v in prev_version.conn.response.getheaders()}
|
2016-11-17 17:17:00 -05:00
|
|
|
for k, v in expected_headers.items():
|
|
|
|
self.assertIn(k.lower(), resp_headers)
|
|
|
|
self.assertEqual(v, resp_headers[k.lower()])
|
|
|
|
|
|
|
|
# make sure the new obj metadata did not leak to the prev. version
|
|
|
|
self.assertNotIn('foo', prev_version.metadata)
|
|
|
|
|
|
|
|
# check that POST does not create a new version
|
|
|
|
versioned_obj.sync_metadata(metadata={'fu': 'baz'})
|
|
|
|
self.assertEqual(1, versions_container.info()['object_count'])
|
|
|
|
|
|
|
|
# if we overwrite it again, there are two versions
|
2019-08-09 15:59:59 -07:00
|
|
|
versioned_obj.write(b"ccccc")
|
2016-11-17 17:17:00 -05:00
|
|
|
self.assertEqual(2, versions_container.info()['object_count'])
|
2018-03-22 19:26:24 +09:00
|
|
|
expected_content_types.append('text/jibberish02')
|
2016-11-17 17:17:00 -05:00
|
|
|
versioned_obj_name = versions_container.files()[1]
|
|
|
|
prev_version = versions_container.file(versioned_obj_name)
|
|
|
|
prev_version.initialize()
|
2019-08-09 15:59:59 -07:00
|
|
|
self.assertEqual(b"bbbbb", prev_version.read())
|
2016-11-17 17:17:00 -05:00
|
|
|
self.assertEqual(prev_version.content_type, 'text/jibberish02')
|
|
|
|
self.assertNotIn('foo', prev_version.metadata)
|
|
|
|
self.assertIn('fu', prev_version.metadata)
|
|
|
|
|
2017-03-15 21:43:18 +00:00
|
|
|
# versioned_obj keeps the newest content
|
2019-08-09 15:59:59 -07:00
|
|
|
self.assertEqual(b"ccccc", versioned_obj.read())
|
2016-11-17 17:17:00 -05:00
|
|
|
|
|
|
|
# test copy from a different container
|
|
|
|
src_container = self.env.account.container(Utils.create_name())
|
|
|
|
self.assertTrue(src_container.create())
|
|
|
|
src_name = Utils.create_name()
|
|
|
|
src_obj = src_container.file(src_name)
|
2019-08-09 15:59:59 -07:00
|
|
|
src_obj.write(b"ddddd", hdrs={'Content-Type': 'text/jibberish04'})
|
2016-11-17 17:17:00 -05:00
|
|
|
src_obj.copy(container.name, obj_name)
|
|
|
|
|
2019-08-09 15:59:59 -07:00
|
|
|
self.assertEqual(b"ddddd", versioned_obj.read())
|
2016-11-17 17:17:00 -05:00
|
|
|
versioned_obj.initialize()
|
|
|
|
self.assertEqual(versioned_obj.content_type, 'text/jibberish04')
|
2017-03-15 21:43:18 +00:00
|
|
|
expected_content_types.append('text/jibberish04')
|
2016-11-17 17:17:00 -05:00
|
|
|
|
|
|
|
# make sure versions container has the previous version
|
|
|
|
self.assertEqual(3, versions_container.info()['object_count'])
|
|
|
|
versioned_obj_name = versions_container.files()[2]
|
|
|
|
prev_version = versions_container.file(versioned_obj_name)
|
|
|
|
prev_version.initialize()
|
2019-08-09 15:59:59 -07:00
|
|
|
self.assertEqual(b"ccccc", prev_version.read())
|
2016-11-17 17:17:00 -05:00
|
|
|
|
2017-03-15 21:43:18 +00:00
|
|
|
# for further use in the mode-specific tests
|
|
|
|
return (versioned_obj, expected_headers, expected_content_types)
|
|
|
|
|
|
|
|
def test_overwriting(self):
|
|
|
|
versions_container = self.env.versions_container
|
|
|
|
versioned_obj, expected_headers, expected_content_types = \
|
|
|
|
self._test_overwriting_setup()
|
|
|
|
|
|
|
|
# pop one for the current version
|
|
|
|
expected_content_types.pop()
|
|
|
|
self.assertEqual(expected_content_types, [
|
|
|
|
o['content_type'] for o in versions_container.files(
|
|
|
|
parms={'format': 'json'})])
|
|
|
|
|
2016-11-17 17:17:00 -05:00
|
|
|
# test delete
|
|
|
|
versioned_obj.delete()
|
2019-08-09 15:59:59 -07:00
|
|
|
self.assertEqual(b"ccccc", versioned_obj.read())
|
2017-03-15 21:43:18 +00:00
|
|
|
expected_content_types.pop()
|
|
|
|
self.assertEqual(expected_content_types, [
|
|
|
|
o['content_type'] for o in versions_container.files(
|
|
|
|
parms={'format': 'json'})])
|
|
|
|
|
2016-11-17 17:17:00 -05:00
|
|
|
versioned_obj.delete()
|
2019-08-09 15:59:59 -07:00
|
|
|
self.assertEqual(b"bbbbb", versioned_obj.read())
|
2017-03-15 21:43:18 +00:00
|
|
|
expected_content_types.pop()
|
|
|
|
self.assertEqual(expected_content_types, [
|
|
|
|
o['content_type'] for o in versions_container.files(
|
|
|
|
parms={'format': 'json'})])
|
|
|
|
|
2016-11-17 17:17:00 -05:00
|
|
|
versioned_obj.delete()
|
2019-08-09 15:59:59 -07:00
|
|
|
self.assertEqual(b"aaaaa", versioned_obj.read())
|
2016-11-17 17:17:00 -05:00
|
|
|
self.assertEqual(0, versions_container.info()['object_count'])
|
|
|
|
|
|
|
|
# verify that all the original object headers have been copied back
|
|
|
|
obj_info = versioned_obj.info()
|
|
|
|
self.assertEqual('text/jibberish01', obj_info['content_type'])
|
2019-08-09 15:59:59 -07:00
|
|
|
resp_headers = {
|
|
|
|
h.lower(): v for h, v in versioned_obj.conn.response.getheaders()}
|
2016-11-17 17:17:00 -05:00
|
|
|
for k, v in expected_headers.items():
|
|
|
|
self.assertIn(k.lower(), resp_headers)
|
|
|
|
self.assertEqual(v, resp_headers[k.lower()])
|
|
|
|
|
|
|
|
versioned_obj.delete()
|
|
|
|
self.assertRaises(ResponseError, versioned_obj.read)
|
|
|
|
|
2018-03-22 19:26:24 +09:00
|
|
|
def test_overwriting_with_url_encoded_object_name(self):
|
|
|
|
versions_container = self.env.versions_container
|
|
|
|
obj_name = Utils.create_name() + '%25ff'
|
|
|
|
versioned_obj, expected_headers, expected_content_types = \
|
|
|
|
self._test_overwriting_setup(obj_name)
|
|
|
|
|
|
|
|
# pop one for the current version
|
|
|
|
expected_content_types.pop()
|
|
|
|
self.assertEqual(expected_content_types, [
|
|
|
|
o['content_type'] for o in versions_container.files(
|
|
|
|
parms={'format': 'json'})])
|
|
|
|
|
|
|
|
# test delete
|
|
|
|
versioned_obj.delete()
|
2019-08-09 15:59:59 -07:00
|
|
|
self.assertEqual(b"ccccc", versioned_obj.read())
|
2018-03-22 19:26:24 +09:00
|
|
|
expected_content_types.pop()
|
|
|
|
self.assertEqual(expected_content_types, [
|
|
|
|
o['content_type'] for o in versions_container.files(
|
|
|
|
parms={'format': 'json'})])
|
|
|
|
|
|
|
|
versioned_obj.delete()
|
2019-08-09 15:59:59 -07:00
|
|
|
self.assertEqual(b"bbbbb", versioned_obj.read())
|
2018-03-22 19:26:24 +09:00
|
|
|
expected_content_types.pop()
|
|
|
|
self.assertEqual(expected_content_types, [
|
|
|
|
o['content_type'] for o in versions_container.files(
|
|
|
|
parms={'format': 'json'})])
|
|
|
|
|
|
|
|
versioned_obj.delete()
|
2019-08-09 15:59:59 -07:00
|
|
|
self.assertEqual(b"aaaaa", versioned_obj.read())
|
2018-03-22 19:26:24 +09:00
|
|
|
self.assertEqual(0, versions_container.info()['object_count'])
|
|
|
|
|
|
|
|
# verify that all the original object headers have been copied back
|
|
|
|
obj_info = versioned_obj.info()
|
|
|
|
self.assertEqual('text/jibberish01', obj_info['content_type'])
|
2019-08-09 15:59:59 -07:00
|
|
|
resp_headers = {
|
|
|
|
h.lower(): v for h, v in versioned_obj.conn.response.getheaders()}
|
2018-03-22 19:26:24 +09:00
|
|
|
for k, v in expected_headers.items():
|
|
|
|
self.assertIn(k.lower(), resp_headers)
|
|
|
|
self.assertEqual(v, resp_headers[k.lower()])
|
|
|
|
|
|
|
|
versioned_obj.delete()
|
|
|
|
self.assertRaises(ResponseError, versioned_obj.read)
|
|
|
|
|
2017-03-15 19:30:38 +00:00
|
|
|
def assert_most_recent_version(self, obj_name, content,
|
|
|
|
should_be_dlo=False):
|
2019-08-09 15:59:59 -07:00
|
|
|
name_len = len(obj_name if six.PY2 else obj_name.encode('utf8'))
|
2017-03-15 19:30:38 +00:00
|
|
|
archive_versions = self.env.versions_container.files(parms={
|
2019-08-09 15:59:59 -07:00
|
|
|
'prefix': '%03x%s/' % (name_len, obj_name),
|
2017-03-15 19:30:38 +00:00
|
|
|
'reverse': 'yes'})
|
|
|
|
archive_file = self.env.versions_container.file(archive_versions[0])
|
|
|
|
self.assertEqual(content, archive_file.read())
|
2019-08-09 15:59:59 -07:00
|
|
|
resp_headers = {
|
|
|
|
h.lower(): v for h, v in archive_file.conn.response.getheaders()}
|
2017-03-15 19:30:38 +00:00
|
|
|
if should_be_dlo:
|
|
|
|
self.assertIn('x-object-manifest', resp_headers)
|
|
|
|
else:
|
|
|
|
self.assertNotIn('x-object-manifest', resp_headers)
|
|
|
|
|
|
|
|
def _test_versioning_dlo_setup(self):
|
Add checksum to object extended attributes
Currently, our integrity checking for objects is pretty weak when it
comes to object metadata. If the extended attributes on a .data or
.meta file get corrupted in such a way that we can still unpickle it,
we don't have anything that detects that.
This could be especially bad with encrypted etags; if the encrypted
etag (X-Object-Sysmeta-Crypto-Etag or whatever it is) gets some bits
flipped, then we'll cheerfully decrypt the cipherjunk into plainjunk,
then send it to the client. Net effect is that the client sees a GET
response with an ETag that doesn't match the MD5 of the object *and*
Swift has no way of detecting and quarantining this object.
Note that, with an unencrypted object, if the ETag metadatum gets
mangled, then the object will be quarantined by the object server or
auditor, whichever notices first.
As part of this commit, I also ripped out some mocking of
getxattr/setxattr in tests. It appears to be there to allow unit tests
to run on systems where /tmp doesn't support xattrs. However, since
the mock is keyed off of inode number and inode numbers get re-used,
there's lots of leakage between different test runs. On a real FS,
unlinking a file and then creating a new one of the same name will
also reset the xattrs; this isn't the case with the mock.
The mock was pretty old; Ubuntu 12.04 and up all support xattrs in
/tmp, and recent Red Hat / CentOS releases do too. The xattr mock was
added in 2011; maybe it was to support Ubuntu Lucid Lynx?
Bonus: now you can pause a test with the debugger, inspect its files
in /tmp, and actually see the xattrs along with the data.
Since this patch now uses a real filesystem for testing filesystem
operations, tests are skipped if the underlying filesystem does not
support setting xattrs (eg tmpfs or more than 4k of xattrs on ext4).
References to "/tmp" have been replaced with calls to
tempfile.gettempdir(). This will allow setting the TMPDIR envvar in
test setup and getting an XFS filesystem instead of ext4 or tmpfs.
THIS PATCH SIGNIFICANTLY CHANGES TESTING ENVIRONMENTS
With this patch, every test environment will require TMPDIR to be
using a filesystem that supports at least 4k of extended attributes.
Neither ext4 nor tempfs support this. XFS is recommended.
So why all the SkipTests? Why not simply raise an error? We still need
the tests to run on the base image for OpenStack's CI system. Since
we were previously mocking out xattr, there wasn't a problem, but we
also weren't actually testing anything. This patch adds functionality
to validate xattr data, so we need to drop the mock.
`test.unit.skip_if_no_xattrs()` is also imported into `test.functional`
so that functional tests can import it from the functional test
namespace.
The related OpenStack CI infrastructure changes are made in
https://review.openstack.org/#/c/394600/.
Co-Authored-By: John Dickinson <me@not.mn>
Change-Id: I98a37c0d451f4960b7a12f648e4405c6c6716808
2016-06-30 16:52:58 -07:00
|
|
|
if tf.in_process:
|
|
|
|
tf.skip_if_no_xattrs()
|
|
|
|
|
2016-11-17 17:17:00 -05:00
|
|
|
container = self.env.container
|
|
|
|
versions_container = self.env.versions_container
|
|
|
|
obj_name = Utils.create_name()
|
|
|
|
|
|
|
|
for i in ('1', '2', '3'):
|
|
|
|
time.sleep(.01) # guarantee that the timestamp changes
|
|
|
|
obj_name_seg = obj_name + '/' + i
|
|
|
|
versioned_obj = container.file(obj_name_seg)
|
2019-08-09 15:59:59 -07:00
|
|
|
versioned_obj.write(i.encode('ascii'))
|
2017-03-15 19:30:38 +00:00
|
|
|
# immediately overwrite
|
2019-08-09 15:59:59 -07:00
|
|
|
versioned_obj.write((i + i).encode('ascii'))
|
2016-11-17 17:17:00 -05:00
|
|
|
|
|
|
|
self.assertEqual(3, versions_container.info()['object_count'])
|
|
|
|
|
|
|
|
man_file = container.file(obj_name)
|
2017-03-15 19:30:38 +00:00
|
|
|
|
|
|
|
# write a normal file first
|
2019-08-09 15:59:59 -07:00
|
|
|
man_file.write(b'old content')
|
2016-11-17 17:17:00 -05:00
|
|
|
|
|
|
|
# guarantee that the timestamp changes
|
|
|
|
time.sleep(.01)
|
|
|
|
|
2017-03-15 19:30:38 +00:00
|
|
|
# overwrite with a dlo manifest
|
2019-08-09 15:59:59 -07:00
|
|
|
man_file.write(b'', hdrs={"X-Object-Manifest": "%s/%s/" %
|
2016-11-17 17:17:00 -05:00
|
|
|
(self.env.container.name, obj_name)})
|
|
|
|
|
2017-03-15 19:30:38 +00:00
|
|
|
self.assertEqual(4, versions_container.info()['object_count'])
|
2019-08-09 15:59:59 -07:00
|
|
|
self.assertEqual(b"112233", man_file.read())
|
|
|
|
self.assert_most_recent_version(obj_name, b'old content')
|
2017-03-15 19:30:38 +00:00
|
|
|
|
|
|
|
# overwrite the manifest with a normal file
|
2019-08-09 15:59:59 -07:00
|
|
|
man_file.write(b'new content')
|
2017-03-15 19:30:38 +00:00
|
|
|
self.assertEqual(5, versions_container.info()['object_count'])
|
|
|
|
|
|
|
|
# new most-recent archive is the dlo
|
2019-08-09 15:59:59 -07:00
|
|
|
self.assert_most_recent_version(
|
|
|
|
obj_name, b'112233', should_be_dlo=True)
|
2017-03-15 19:30:38 +00:00
|
|
|
|
|
|
|
return obj_name, man_file
|
|
|
|
|
|
|
|
def test_versioning_dlo(self):
|
|
|
|
obj_name, man_file = self._test_versioning_dlo_setup()
|
|
|
|
|
|
|
|
# verify that restore works properly
|
|
|
|
man_file.delete()
|
|
|
|
self.assertEqual(4, self.env.versions_container.info()['object_count'])
|
2019-08-09 15:59:59 -07:00
|
|
|
self.assertEqual(b"112233", man_file.read())
|
|
|
|
resp_headers = {
|
|
|
|
h.lower(): v for h, v in man_file.conn.response.getheaders()}
|
2017-03-15 19:30:38 +00:00
|
|
|
self.assertIn('x-object-manifest', resp_headers)
|
|
|
|
|
2019-08-09 15:59:59 -07:00
|
|
|
self.assert_most_recent_version(obj_name, b'old content')
|
2017-03-15 19:30:38 +00:00
|
|
|
|
|
|
|
man_file.delete()
|
|
|
|
self.assertEqual(3, self.env.versions_container.info()['object_count'])
|
2019-08-09 15:59:59 -07:00
|
|
|
self.assertEqual(b"old content", man_file.read())
|
2016-11-17 17:17:00 -05:00
|
|
|
|
|
|
|
def test_versioning_container_acl(self):
|
2018-02-20 14:15:31 -08:00
|
|
|
if tf.skip2:
|
|
|
|
raise SkipTest('Account2 not set')
|
2016-11-17 17:17:00 -05:00
|
|
|
# create versions container and DO NOT give write access to account2
|
|
|
|
versions_container = self.env.account.container(Utils.create_name())
|
2018-05-24 13:00:58 -07:00
|
|
|
location_header_val = quote(str(versions_container))
|
2016-11-17 17:17:00 -05:00
|
|
|
self.assertTrue(versions_container.create(hdrs={
|
|
|
|
'X-Container-Write': ''
|
|
|
|
}))
|
|
|
|
|
|
|
|
# check account2 cannot write to versions container
|
|
|
|
fail_obj_name = Utils.create_name()
|
|
|
|
fail_obj = versions_container.file(fail_obj_name)
|
2019-08-09 15:59:59 -07:00
|
|
|
self.assertRaises(ResponseError, fail_obj.write, b"should fail",
|
2016-11-17 17:17:00 -05:00
|
|
|
cfg={'use_token': self.env.storage_token2})
|
|
|
|
|
|
|
|
# create container and give write access to account2
|
|
|
|
# don't set X-Versions-Location just yet
|
|
|
|
container = self.env.account.container(Utils.create_name())
|
|
|
|
self.assertTrue(container.create(hdrs={
|
|
|
|
'X-Container-Write': self.env.conn2.user_acl}))
|
|
|
|
|
|
|
|
# check account2 cannot set X-Versions-Location on container
|
|
|
|
self.assertRaises(ResponseError, container.update_metadata, hdrs={
|
2018-05-24 13:00:58 -07:00
|
|
|
self.env.location_header_key: location_header_val},
|
2016-11-17 17:17:00 -05:00
|
|
|
cfg={'use_token': self.env.storage_token2})
|
|
|
|
|
|
|
|
# good! now let admin set the X-Versions-Location
|
|
|
|
# p.s.: sticking a 'x-remove' header here to test precedence
|
|
|
|
# of both headers. Setting the location should succeed.
|
|
|
|
self.assertTrue(container.update_metadata(hdrs={
|
|
|
|
'X-Remove-' + self.env.location_header_key[len('X-'):]:
|
2018-05-24 13:00:58 -07:00
|
|
|
location_header_val,
|
|
|
|
self.env.location_header_key: location_header_val}))
|
2016-11-17 17:17:00 -05:00
|
|
|
|
|
|
|
# write object twice to container and check version
|
|
|
|
obj_name = Utils.create_name()
|
|
|
|
versioned_obj = container.file(obj_name)
|
2019-08-09 15:59:59 -07:00
|
|
|
self.assertTrue(versioned_obj.write(b"never argue with the data",
|
2016-11-17 17:17:00 -05:00
|
|
|
cfg={'use_token': self.env.storage_token2}))
|
2019-08-09 15:59:59 -07:00
|
|
|
self.assertEqual(versioned_obj.read(), b"never argue with the data")
|
2016-11-17 17:17:00 -05:00
|
|
|
|
|
|
|
self.assertTrue(
|
2019-08-09 15:59:59 -07:00
|
|
|
versioned_obj.write(b"we don't have no beer, just tequila",
|
2016-11-17 17:17:00 -05:00
|
|
|
cfg={'use_token': self.env.storage_token2}))
|
|
|
|
self.assertEqual(versioned_obj.read(),
|
2019-08-09 15:59:59 -07:00
|
|
|
b"we don't have no beer, just tequila")
|
2016-11-17 17:17:00 -05:00
|
|
|
self.assertEqual(1, versions_container.info()['object_count'])
|
|
|
|
|
|
|
|
# read the original uploaded object
|
|
|
|
for filename in versions_container.files():
|
|
|
|
backup_file = versions_container.file(filename)
|
|
|
|
break
|
2019-08-09 15:59:59 -07:00
|
|
|
self.assertEqual(backup_file.read(), b"never argue with the data")
|
2016-11-17 17:17:00 -05:00
|
|
|
|
Add functests for disallowed COPYs into a versioned container
While we're at it, have copy and copy_account raise ResponseErrors
on failure, similar to cluster_info, update_metadata, containers, info,
files, delete, initialize, read, sync_metadata, write, and post.
Related-Change: Ia8b92251718d10b1eb44a456f28d3d2569a30003
Change-Id: I9ef42d922a6b7dbf253f2f8f5df83965d8f47e0f
2017-03-28 23:26:05 +00:00
|
|
|
# user3 (some random user with no access to any of account1)
|
2016-11-17 17:17:00 -05:00
|
|
|
# tries to read from versioned container
|
|
|
|
self.assertRaises(ResponseError, backup_file.read,
|
|
|
|
cfg={'use_token': self.env.storage_token3})
|
|
|
|
|
Add functests for disallowed COPYs into a versioned container
While we're at it, have copy and copy_account raise ResponseErrors
on failure, similar to cluster_info, update_metadata, containers, info,
files, delete, initialize, read, sync_metadata, write, and post.
Related-Change: Ia8b92251718d10b1eb44a456f28d3d2569a30003
Change-Id: I9ef42d922a6b7dbf253f2f8f5df83965d8f47e0f
2017-03-28 23:26:05 +00:00
|
|
|
# create an object user3 can try to copy
|
|
|
|
a2_container = self.env.account2.container(Utils.create_name())
|
|
|
|
a2_container.create(
|
|
|
|
hdrs={'X-Container-Read': self.env.conn3.user_acl},
|
|
|
|
cfg={'use_token': self.env.storage_token2})
|
|
|
|
a2_obj = a2_container.file(Utils.create_name())
|
2019-08-09 15:59:59 -07:00
|
|
|
self.assertTrue(a2_obj.write(b"unused",
|
Add functests for disallowed COPYs into a versioned container
While we're at it, have copy and copy_account raise ResponseErrors
on failure, similar to cluster_info, update_metadata, containers, info,
files, delete, initialize, read, sync_metadata, write, and post.
Related-Change: Ia8b92251718d10b1eb44a456f28d3d2569a30003
Change-Id: I9ef42d922a6b7dbf253f2f8f5df83965d8f47e0f
2017-03-28 23:26:05 +00:00
|
|
|
cfg={'use_token': self.env.storage_token2}))
|
|
|
|
|
|
|
|
# user3 cannot write, delete, or copy to/from source container either
|
2016-11-17 17:17:00 -05:00
|
|
|
number_of_versions = versions_container.info()['object_count']
|
|
|
|
self.assertRaises(ResponseError, versioned_obj.write,
|
2019-08-09 15:59:59 -07:00
|
|
|
b"some random user trying to write data",
|
2016-11-17 17:17:00 -05:00
|
|
|
cfg={'use_token': self.env.storage_token3})
|
|
|
|
self.assertEqual(number_of_versions,
|
|
|
|
versions_container.info()['object_count'])
|
|
|
|
self.assertRaises(ResponseError, versioned_obj.delete,
|
|
|
|
cfg={'use_token': self.env.storage_token3})
|
|
|
|
self.assertEqual(number_of_versions,
|
|
|
|
versions_container.info()['object_count'])
|
Add functests for disallowed COPYs into a versioned container
While we're at it, have copy and copy_account raise ResponseErrors
on failure, similar to cluster_info, update_metadata, containers, info,
files, delete, initialize, read, sync_metadata, write, and post.
Related-Change: Ia8b92251718d10b1eb44a456f28d3d2569a30003
Change-Id: I9ef42d922a6b7dbf253f2f8f5df83965d8f47e0f
2017-03-28 23:26:05 +00:00
|
|
|
self.assertRaises(
|
|
|
|
ResponseError, versioned_obj.write,
|
|
|
|
hdrs={'X-Copy-From': '%s/%s' % (a2_container.name, a2_obj.name),
|
|
|
|
'X-Copy-From-Account': self.env.conn2.account_name},
|
|
|
|
cfg={'use_token': self.env.storage_token3})
|
|
|
|
self.assertEqual(number_of_versions,
|
|
|
|
versions_container.info()['object_count'])
|
|
|
|
self.assertRaises(
|
|
|
|
ResponseError, a2_obj.copy_account,
|
|
|
|
self.env.conn.account_name, container.name, obj_name,
|
|
|
|
cfg={'use_token': self.env.storage_token3})
|
|
|
|
self.assertEqual(number_of_versions,
|
|
|
|
versions_container.info()['object_count'])
|
2016-11-17 17:17:00 -05:00
|
|
|
|
|
|
|
# user2 can't read or delete from versions-location
|
|
|
|
self.assertRaises(ResponseError, backup_file.read,
|
|
|
|
cfg={'use_token': self.env.storage_token2})
|
|
|
|
self.assertRaises(ResponseError, backup_file.delete,
|
|
|
|
cfg={'use_token': self.env.storage_token2})
|
|
|
|
|
|
|
|
# but is able to delete from the source container
|
|
|
|
# this could be a helpful scenario for dev ops that want to setup
|
|
|
|
# just one container to hold object versions of multiple containers
|
|
|
|
# and each one of those containers are owned by different users
|
|
|
|
self.assertTrue(versioned_obj.delete(
|
|
|
|
cfg={'use_token': self.env.storage_token2}))
|
|
|
|
|
|
|
|
# tear-down since we create these containers here
|
|
|
|
# and not in self.env
|
Add functests for disallowed COPYs into a versioned container
While we're at it, have copy and copy_account raise ResponseErrors
on failure, similar to cluster_info, update_metadata, containers, info,
files, delete, initialize, read, sync_metadata, write, and post.
Related-Change: Ia8b92251718d10b1eb44a456f28d3d2569a30003
Change-Id: I9ef42d922a6b7dbf253f2f8f5df83965d8f47e0f
2017-03-28 23:26:05 +00:00
|
|
|
a2_container.delete_recursive()
|
2016-11-17 17:17:00 -05:00
|
|
|
versions_container.delete_recursive()
|
|
|
|
container.delete_recursive()
|
|
|
|
|
2017-03-15 21:43:18 +00:00
|
|
|
def _test_versioning_check_acl_setup(self):
|
2016-11-17 17:17:00 -05:00
|
|
|
container = self.env.container
|
|
|
|
versions_container = self.env.versions_container
|
|
|
|
versions_container.create(hdrs={'X-Container-Read': '.r:*,.rlistings'})
|
|
|
|
|
|
|
|
obj_name = Utils.create_name()
|
|
|
|
versioned_obj = container.file(obj_name)
|
2019-08-09 15:59:59 -07:00
|
|
|
versioned_obj.write(b"aaaaa")
|
|
|
|
self.assertEqual(b"aaaaa", versioned_obj.read())
|
2016-11-17 17:17:00 -05:00
|
|
|
|
2019-08-09 15:59:59 -07:00
|
|
|
versioned_obj.write(b"bbbbb")
|
|
|
|
self.assertEqual(b"bbbbb", versioned_obj.read())
|
2016-11-17 17:17:00 -05:00
|
|
|
|
|
|
|
# Use token from second account and try to delete the object
|
|
|
|
org_token = self.env.account.conn.storage_token
|
|
|
|
self.env.account.conn.storage_token = self.env.conn2.storage_token
|
|
|
|
try:
|
|
|
|
with self.assertRaises(ResponseError) as cm:
|
|
|
|
versioned_obj.delete()
|
|
|
|
self.assertEqual(403, cm.exception.status)
|
|
|
|
finally:
|
|
|
|
self.env.account.conn.storage_token = org_token
|
|
|
|
|
|
|
|
# Verify with token from first account
|
2019-08-09 15:59:59 -07:00
|
|
|
self.assertEqual(b"bbbbb", versioned_obj.read())
|
2017-03-15 21:43:18 +00:00
|
|
|
return versioned_obj
|
2016-11-17 17:17:00 -05:00
|
|
|
|
2017-03-15 21:43:18 +00:00
|
|
|
def test_versioning_check_acl(self):
|
2018-02-20 14:15:31 -08:00
|
|
|
if tf.skip2:
|
|
|
|
raise SkipTest('Account2 not set')
|
2017-03-15 21:43:18 +00:00
|
|
|
versioned_obj = self._test_versioning_check_acl_setup()
|
2016-11-17 17:17:00 -05:00
|
|
|
versioned_obj.delete()
|
2019-08-09 15:59:59 -07:00
|
|
|
self.assertEqual(b"aaaaa", versioned_obj.read())
|
2016-11-17 17:17:00 -05:00
|
|
|
|
2017-12-15 12:36:47 +00:00
|
|
|
def _check_overwriting_symlink(self):
|
|
|
|
# assertions common to x-versions-location and x-history-location modes
|
|
|
|
container = self.env.container
|
|
|
|
versions_container = self.env.versions_container
|
|
|
|
|
|
|
|
tgt_a_name = Utils.create_name()
|
|
|
|
tgt_b_name = Utils.create_name()
|
|
|
|
|
|
|
|
tgt_a = container.file(tgt_a_name)
|
2019-08-09 15:59:59 -07:00
|
|
|
tgt_a.write(b"aaaaa")
|
2017-12-15 12:36:47 +00:00
|
|
|
|
|
|
|
tgt_b = container.file(tgt_b_name)
|
2019-08-09 15:59:59 -07:00
|
|
|
tgt_b.write(b"bbbbb")
|
2017-12-15 12:36:47 +00:00
|
|
|
|
|
|
|
symlink_name = Utils.create_name()
|
2018-06-01 15:38:10 -07:00
|
|
|
sym_tgt_header = quote(unquote('%s/%s' % (container.name, tgt_a_name)))
|
2017-12-15 12:36:47 +00:00
|
|
|
sym_headers_a = {'X-Symlink-Target': sym_tgt_header}
|
|
|
|
symlink = container.file(symlink_name)
|
2019-08-09 15:59:59 -07:00
|
|
|
symlink.write(b"", hdrs=sym_headers_a)
|
|
|
|
self.assertEqual(b"aaaaa", symlink.read())
|
2017-12-15 12:36:47 +00:00
|
|
|
|
|
|
|
sym_headers_b = {'X-Symlink-Target': '%s/%s' % (container.name,
|
|
|
|
tgt_b_name)}
|
2019-08-09 15:59:59 -07:00
|
|
|
symlink.write(b"", hdrs=sym_headers_b)
|
|
|
|
self.assertEqual(b"bbbbb", symlink.read())
|
2017-12-15 12:36:47 +00:00
|
|
|
|
|
|
|
# the old version got saved off
|
|
|
|
self.assertEqual(1, versions_container.info()['object_count'])
|
|
|
|
versioned_obj_name = versions_container.files()[0]
|
|
|
|
prev_version = versions_container.file(versioned_obj_name)
|
|
|
|
prev_version_info = prev_version.info(parms={'symlink': 'get'})
|
2019-08-09 15:59:59 -07:00
|
|
|
self.assertEqual(b"aaaaa", prev_version.read())
|
2019-11-19 21:25:45 -08:00
|
|
|
symlink_etag = prev_version_info['etag']
|
|
|
|
if symlink_etag.startswith('"') and symlink_etag.endswith('"') and \
|
|
|
|
symlink_etag[1:-1]:
|
|
|
|
symlink_etag = symlink_etag[1:-1]
|
|
|
|
self.assertEqual(MD5_OF_EMPTY_STRING, symlink_etag)
|
2017-12-15 12:36:47 +00:00
|
|
|
self.assertEqual(sym_tgt_header,
|
|
|
|
prev_version_info['x_symlink_target'])
|
|
|
|
return symlink, tgt_a
|
|
|
|
|
|
|
|
def test_overwriting_symlink(self):
|
2017-12-25 09:13:17 +00:00
|
|
|
if 'symlink' not in cluster_info:
|
|
|
|
raise SkipTest("Symlinks not enabled")
|
|
|
|
|
2017-12-15 12:36:47 +00:00
|
|
|
symlink, target = self._check_overwriting_symlink()
|
|
|
|
# test delete
|
|
|
|
symlink.delete()
|
|
|
|
sym_info = symlink.info(parms={'symlink': 'get'})
|
2019-08-09 15:59:59 -07:00
|
|
|
self.assertEqual(b"aaaaa", symlink.read())
|
2019-11-19 21:25:45 -08:00
|
|
|
if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
|
|
|
|
self.assertEqual('"%s"' % MD5_OF_EMPTY_STRING, sym_info['etag'])
|
|
|
|
else:
|
|
|
|
self.assertEqual(MD5_OF_EMPTY_STRING, sym_info['etag'])
|
2018-06-01 15:38:10 -07:00
|
|
|
self.assertEqual(
|
|
|
|
quote(unquote('%s/%s' % (self.env.container.name, target.name))),
|
|
|
|
sym_info['x_symlink_target'])
|
2017-12-15 12:36:47 +00:00
|
|
|
|
2017-12-12 21:39:54 -08:00
|
|
|
def _setup_symlink(self):
|
|
|
|
target = self.env.container.file('target-object')
|
2019-08-09 15:59:59 -07:00
|
|
|
target.write(b'target object data')
|
2017-12-12 21:39:54 -08:00
|
|
|
symlink = self.env.container.file('symlink')
|
2019-08-09 15:59:59 -07:00
|
|
|
symlink.write(b'', hdrs={
|
2017-12-12 21:39:54 -08:00
|
|
|
'Content-Type': 'application/symlink',
|
|
|
|
'X-Symlink-Target': '%s/%s' % (
|
|
|
|
self.env.container.name, target.name)})
|
|
|
|
return symlink, target
|
|
|
|
|
|
|
|
def _assert_symlink(self, symlink, target):
|
2019-08-09 15:59:59 -07:00
|
|
|
self.assertEqual(b'target object data', symlink.read())
|
2017-12-12 21:39:54 -08:00
|
|
|
self.assertEqual(target.info(), symlink.info())
|
|
|
|
self.assertEqual('application/symlink',
|
|
|
|
symlink.info(parms={
|
|
|
|
'symlink': 'get'})['content_type'])
|
|
|
|
|
|
|
|
def _check_copy_destination_restore_symlink(self):
|
|
|
|
# assertions common to x-versions-location and x-history-location modes
|
|
|
|
symlink, target = self._setup_symlink()
|
2019-08-09 15:59:59 -07:00
|
|
|
symlink.write(b'this is not a symlink')
|
2017-12-12 21:39:54 -08:00
|
|
|
# the symlink is versioned
|
|
|
|
version_container_files = self.env.versions_container.files(
|
|
|
|
parms={'format': 'json'})
|
|
|
|
self.assertEqual(1, len(version_container_files))
|
|
|
|
versioned_obj_info = version_container_files[0]
|
|
|
|
self.assertEqual('application/symlink',
|
|
|
|
versioned_obj_info['content_type'])
|
|
|
|
versioned_obj = self.env.versions_container.file(
|
|
|
|
versioned_obj_info['name'])
|
|
|
|
# the symlink is still a symlink
|
|
|
|
self._assert_symlink(versioned_obj, target)
|
|
|
|
# test manual restore (this creates a new backup of the overwrite)
|
|
|
|
versioned_obj.copy(self.env.container.name, symlink.name,
|
|
|
|
parms={'symlink': 'get'})
|
|
|
|
self._assert_symlink(symlink, target)
|
|
|
|
# symlink overwritten by write then copy -> 2 versions
|
|
|
|
self.assertEqual(2, self.env.versions_container.info()['object_count'])
|
|
|
|
return symlink, target
|
|
|
|
|
|
|
|
def test_copy_destination_restore_symlink(self):
|
2017-12-25 09:13:17 +00:00
|
|
|
if 'symlink' not in cluster_info:
|
|
|
|
raise SkipTest("Symlinks not enabled")
|
|
|
|
|
2017-12-12 21:39:54 -08:00
|
|
|
symlink, target = self._check_copy_destination_restore_symlink()
|
|
|
|
# and versioned writes restore
|
|
|
|
symlink.delete()
|
|
|
|
self.assertEqual(1, self.env.versions_container.info()['object_count'])
|
2019-08-09 15:59:59 -07:00
|
|
|
self.assertEqual(b'this is not a symlink', symlink.read())
|
2017-12-12 21:39:54 -08:00
|
|
|
symlink.delete()
|
|
|
|
self.assertEqual(0, self.env.versions_container.info()['object_count'])
|
|
|
|
self._assert_symlink(symlink, target)
|
|
|
|
|
|
|
|
def test_put_x_copy_from_restore_symlink(self):
|
2017-12-25 09:13:17 +00:00
|
|
|
if 'symlink' not in cluster_info:
|
|
|
|
raise SkipTest("Symlinks not enabled")
|
|
|
|
|
2017-12-12 21:39:54 -08:00
|
|
|
symlink, target = self._setup_symlink()
|
2019-08-09 15:59:59 -07:00
|
|
|
symlink.write(b'this is not a symlink')
|
2017-12-12 21:39:54 -08:00
|
|
|
version_container_files = self.env.versions_container.files()
|
|
|
|
self.assertEqual(1, len(version_container_files))
|
|
|
|
versioned_obj = self.env.versions_container.file(
|
|
|
|
version_container_files[0])
|
|
|
|
symlink.write(parms={'symlink': 'get'}, cfg={
|
|
|
|
'no_content_type': True}, hdrs={
|
|
|
|
'X-Copy-From': '%s/%s' % (
|
|
|
|
self.env.versions_container, versioned_obj.name)})
|
|
|
|
self._assert_symlink(symlink, target)
|
|
|
|
|
2016-11-17 17:17:00 -05:00
|
|
|
|
|
|
|
class TestObjectVersioningUTF8(Base2, TestObjectVersioning):
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
self._tear_down_files()
|
|
|
|
super(TestObjectVersioningUTF8, self).tearDown()
|
|
|
|
|
|
|
|
|
|
|
|
class TestCrossPolicyObjectVersioning(TestObjectVersioning):
|
|
|
|
env = TestCrossPolicyObjectVersioningEnv
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
super(TestCrossPolicyObjectVersioning, self).setUp()
|
|
|
|
if self.env.multiple_policies_enabled is False:
|
|
|
|
raise SkipTest('Cross policy test requires multiple policies')
|
|
|
|
elif self.env.multiple_policies_enabled is not True:
|
|
|
|
# just some sanity checking
|
|
|
|
raise Exception("Expected multiple_policies_enabled "
|
|
|
|
"to be True/False, got %r" % (
|
|
|
|
self.env.versioning_enabled,))
|
|
|
|
|
|
|
|
|
|
|
|
class TestObjectVersioningHistoryMode(TestObjectVersioning):
|
|
|
|
env = TestObjectVersioningHistoryModeEnv
|
|
|
|
|
|
|
|
# those override tests includes assertions for delete versioned objects
|
|
|
|
# behaviors different from default object versioning using
|
|
|
|
# x-versions-location.
|
|
|
|
|
|
|
|
def test_overwriting(self):
|
|
|
|
versions_container = self.env.versions_container
|
2017-03-15 21:43:18 +00:00
|
|
|
versioned_obj, expected_headers, expected_content_types = \
|
|
|
|
self._test_overwriting_setup()
|
2016-11-17 17:17:00 -05:00
|
|
|
|
|
|
|
# test delete
|
|
|
|
# at first, delete will succeed with 204
|
|
|
|
versioned_obj.delete()
|
|
|
|
expected_content_types.append(
|
|
|
|
'application/x-deleted;swift_versions_deleted=1')
|
|
|
|
# after that, any time the delete doesn't restore the old version
|
|
|
|
# and we will get 404 NotFound
|
|
|
|
for x in range(3):
|
|
|
|
with self.assertRaises(ResponseError) as cm:
|
|
|
|
versioned_obj.delete()
|
|
|
|
self.assertEqual(404, cm.exception.status)
|
|
|
|
expected_content_types.append(
|
|
|
|
'application/x-deleted;swift_versions_deleted=1')
|
|
|
|
# finally, we have 4 versioned items and 4 delete markers total in
|
|
|
|
# the versions container
|
|
|
|
self.assertEqual(8, versions_container.info()['object_count'])
|
|
|
|
self.assertEqual(expected_content_types, [
|
|
|
|
o['content_type'] for o in versions_container.files(
|
|
|
|
parms={'format': 'json'})])
|
|
|
|
|
|
|
|
# update versioned_obj
|
2019-08-09 15:59:59 -07:00
|
|
|
versioned_obj.write(b"eeee", hdrs={'Content-Type': 'text/thanksgiving',
|
2016-11-17 17:17:00 -05:00
|
|
|
'X-Object-Meta-Bar': 'foo'})
|
|
|
|
# verify the PUT object is kept successfully
|
|
|
|
obj_info = versioned_obj.info()
|
|
|
|
self.assertEqual('text/thanksgiving', obj_info['content_type'])
|
|
|
|
|
|
|
|
# we still have delete-marker there
|
|
|
|
self.assertEqual(8, versions_container.info()['object_count'])
|
|
|
|
|
|
|
|
# update versioned_obj
|
2019-08-09 15:59:59 -07:00
|
|
|
versioned_obj.write(b"ffff", hdrs={'Content-Type': 'text/teriyaki',
|
2016-11-17 17:17:00 -05:00
|
|
|
'X-Object-Meta-Food': 'chickin'})
|
|
|
|
# verify the PUT object is kept successfully
|
|
|
|
obj_info = versioned_obj.info()
|
|
|
|
self.assertEqual('text/teriyaki', obj_info['content_type'])
|
|
|
|
|
|
|
|
# new obj will be inserted after delete-marker there
|
|
|
|
self.assertEqual(9, versions_container.info()['object_count'])
|
|
|
|
|
|
|
|
versioned_obj.delete()
|
|
|
|
with self.assertRaises(ResponseError) as cm:
|
|
|
|
versioned_obj.read()
|
|
|
|
self.assertEqual(404, cm.exception.status)
|
|
|
|
self.assertEqual(11, versions_container.info()['object_count'])
|
|
|
|
|
2018-03-22 19:26:24 +09:00
|
|
|
def test_overwriting_with_url_encoded_object_name(self):
|
|
|
|
versions_container = self.env.versions_container
|
|
|
|
obj_name = Utils.create_name() + '%25ff'
|
|
|
|
versioned_obj, expected_headers, expected_content_types = \
|
|
|
|
self._test_overwriting_setup(obj_name)
|
|
|
|
|
|
|
|
# test delete
|
|
|
|
# at first, delete will succeed with 204
|
|
|
|
versioned_obj.delete()
|
|
|
|
expected_content_types.append(
|
|
|
|
'application/x-deleted;swift_versions_deleted=1')
|
|
|
|
# after that, any time the delete doesn't restore the old version
|
|
|
|
# and we will get 404 NotFound
|
|
|
|
for x in range(3):
|
|
|
|
with self.assertRaises(ResponseError) as cm:
|
|
|
|
versioned_obj.delete()
|
|
|
|
self.assertEqual(404, cm.exception.status)
|
|
|
|
expected_content_types.append(
|
|
|
|
'application/x-deleted;swift_versions_deleted=1')
|
|
|
|
# finally, we have 4 versioned items and 4 delete markers total in
|
|
|
|
# the versions container
|
|
|
|
self.assertEqual(8, versions_container.info()['object_count'])
|
|
|
|
self.assertEqual(expected_content_types, [
|
|
|
|
o['content_type'] for o in versions_container.files(
|
|
|
|
parms={'format': 'json'})])
|
|
|
|
|
|
|
|
# update versioned_obj
|
2019-08-09 15:59:59 -07:00
|
|
|
versioned_obj.write(b"eeee", hdrs={'Content-Type': 'text/thanksgiving',
|
2018-03-22 19:26:24 +09:00
|
|
|
'X-Object-Meta-Bar': 'foo'})
|
|
|
|
# verify the PUT object is kept successfully
|
|
|
|
obj_info = versioned_obj.info()
|
|
|
|
self.assertEqual('text/thanksgiving', obj_info['content_type'])
|
|
|
|
|
|
|
|
# we still have delete-marker there
|
|
|
|
self.assertEqual(8, versions_container.info()['object_count'])
|
|
|
|
|
|
|
|
# update versioned_obj
|
2019-08-09 15:59:59 -07:00
|
|
|
versioned_obj.write(b"ffff", hdrs={'Content-Type': 'text/teriyaki',
|
2018-03-22 19:26:24 +09:00
|
|
|
'X-Object-Meta-Food': 'chickin'})
|
|
|
|
# verify the PUT object is kept successfully
|
|
|
|
obj_info = versioned_obj.info()
|
|
|
|
self.assertEqual('text/teriyaki', obj_info['content_type'])
|
|
|
|
|
|
|
|
# new obj will be inserted after delete-marker there
|
|
|
|
self.assertEqual(9, versions_container.info()['object_count'])
|
|
|
|
|
|
|
|
versioned_obj.delete()
|
|
|
|
with self.assertRaises(ResponseError) as cm:
|
|
|
|
versioned_obj.read()
|
|
|
|
self.assertEqual(404, cm.exception.status)
|
|
|
|
|
2017-03-15 19:30:38 +00:00
|
|
|
def test_versioning_dlo(self):
|
|
|
|
obj_name, man_file = \
|
|
|
|
self._test_versioning_dlo_setup()
|
|
|
|
|
|
|
|
man_file.delete()
|
|
|
|
with self.assertRaises(ResponseError) as cm:
|
|
|
|
man_file.read()
|
|
|
|
self.assertEqual(404, cm.exception.status)
|
|
|
|
self.assertEqual(7, self.env.versions_container.info()['object_count'])
|
|
|
|
|
2019-08-09 15:59:59 -07:00
|
|
|
expected = [b'old content', b'112233', b'new content', b'']
|
2017-03-15 19:30:38 +00:00
|
|
|
|
2019-09-23 16:21:36 -07:00
|
|
|
name_len = len(obj_name if six.PY2 else obj_name.encode('utf8'))
|
2017-03-15 19:30:38 +00:00
|
|
|
bodies = [
|
|
|
|
self.env.versions_container.file(f).read()
|
|
|
|
for f in self.env.versions_container.files(parms={
|
2019-09-23 16:21:36 -07:00
|
|
|
'prefix': '%03x%s/' % (name_len, obj_name)})]
|
2017-03-15 19:30:38 +00:00
|
|
|
self.assertEqual(expected, bodies)
|
|
|
|
|
2016-11-17 17:17:00 -05:00
|
|
|
def test_versioning_check_acl(self):
|
2018-02-20 14:15:31 -08:00
|
|
|
if tf.skip2:
|
|
|
|
raise SkipTest('Account2 not set')
|
2017-03-15 21:43:18 +00:00
|
|
|
versioned_obj = self._test_versioning_check_acl_setup()
|
2016-11-17 17:17:00 -05:00
|
|
|
versioned_obj.delete()
|
|
|
|
with self.assertRaises(ResponseError) as cm:
|
|
|
|
versioned_obj.read()
|
|
|
|
self.assertEqual(404, cm.exception.status)
|
|
|
|
|
|
|
|
# we have 3 objects in the versions_container, 'aaaaa', 'bbbbb'
|
|
|
|
# and delete-marker with empty content
|
2017-03-15 21:43:18 +00:00
|
|
|
self.assertEqual(3, self.env.versions_container.info()['object_count'])
|
|
|
|
files = self.env.versions_container.files()
|
2019-08-09 15:59:59 -07:00
|
|
|
for actual, expected in zip(files, [b'aaaaa', b'bbbbb', b'']):
|
2017-03-15 21:43:18 +00:00
|
|
|
prev_version = self.env.versions_container.file(actual)
|
2016-11-17 17:17:00 -05:00
|
|
|
self.assertEqual(expected, prev_version.read())
|
|
|
|
|
2017-12-15 12:36:47 +00:00
|
|
|
def test_overwriting_symlink(self):
|
2017-12-25 09:13:17 +00:00
|
|
|
if 'symlink' not in cluster_info:
|
|
|
|
raise SkipTest("Symlinks not enabled")
|
|
|
|
|
2017-12-15 12:36:47 +00:00
|
|
|
symlink, target = self._check_overwriting_symlink()
|
|
|
|
# test delete
|
|
|
|
symlink.delete()
|
|
|
|
with self.assertRaises(ResponseError) as cm:
|
|
|
|
symlink.read()
|
|
|
|
self.assertEqual(404, cm.exception.status)
|
|
|
|
|
2017-12-12 21:39:54 -08:00
|
|
|
def test_copy_destination_restore_symlink(self):
|
2017-12-25 09:13:17 +00:00
|
|
|
if 'symlink' not in cluster_info:
|
|
|
|
raise SkipTest("Symlinks not enabled")
|
|
|
|
|
2017-12-12 21:39:54 -08:00
|
|
|
symlink, target = self._check_copy_destination_restore_symlink()
|
|
|
|
symlink.delete()
|
|
|
|
with self.assertRaises(ResponseError) as cm:
|
|
|
|
symlink.read()
|
|
|
|
self.assertEqual(404, cm.exception.status)
|
|
|
|
# 2 versions plus delete marker and deleted version
|
|
|
|
self.assertEqual(4, self.env.versions_container.info()['object_count'])
|
|
|
|
|
2016-11-17 17:17:00 -05:00
|
|
|
|
2019-09-23 16:21:36 -07:00
|
|
|
class TestObjectVersioningHistoryModeUTF8(
|
|
|
|
Base2, TestObjectVersioningHistoryMode):
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2020-01-12 03:10:25 -06:00
|
|
|
class TestSloWithVersioning(unittest.TestCase):
|
2016-11-17 17:17:00 -05:00
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
if 'slo' not in cluster_info:
|
|
|
|
raise SkipTest("SLO not enabled")
|
Add checksum to object extended attributes
Currently, our integrity checking for objects is pretty weak when it
comes to object metadata. If the extended attributes on a .data or
.meta file get corrupted in such a way that we can still unpickle it,
we don't have anything that detects that.
This could be especially bad with encrypted etags; if the encrypted
etag (X-Object-Sysmeta-Crypto-Etag or whatever it is) gets some bits
flipped, then we'll cheerfully decrypt the cipherjunk into plainjunk,
then send it to the client. Net effect is that the client sees a GET
response with an ETag that doesn't match the MD5 of the object *and*
Swift has no way of detecting and quarantining this object.
Note that, with an unencrypted object, if the ETag metadatum gets
mangled, then the object will be quarantined by the object server or
auditor, whichever notices first.
As part of this commit, I also ripped out some mocking of
getxattr/setxattr in tests. It appears to be there to allow unit tests
to run on systems where /tmp doesn't support xattrs. However, since
the mock is keyed off of inode number and inode numbers get re-used,
there's lots of leakage between different test runs. On a real FS,
unlinking a file and then creating a new one of the same name will
also reset the xattrs; this isn't the case with the mock.
The mock was pretty old; Ubuntu 12.04 and up all support xattrs in
/tmp, and recent Red Hat / CentOS releases do too. The xattr mock was
added in 2011; maybe it was to support Ubuntu Lucid Lynx?
Bonus: now you can pause a test with the debugger, inspect its files
in /tmp, and actually see the xattrs along with the data.
Since this patch now uses a real filesystem for testing filesystem
operations, tests are skipped if the underlying filesystem does not
support setting xattrs (eg tmpfs or more than 4k of xattrs on ext4).
References to "/tmp" have been replaced with calls to
tempfile.gettempdir(). This will allow setting the TMPDIR envvar in
test setup and getting an XFS filesystem instead of ext4 or tmpfs.
THIS PATCH SIGNIFICANTLY CHANGES TESTING ENVIRONMENTS
With this patch, every test environment will require TMPDIR to be
using a filesystem that supports at least 4k of extended attributes.
Neither ext4 nor tempfs support this. XFS is recommended.
So why all the SkipTests? Why not simply raise an error? We still need
the tests to run on the base image for OpenStack's CI system. Since
we were previously mocking out xattr, there wasn't a problem, but we
also weren't actually testing anything. This patch adds functionality
to validate xattr data, so we need to drop the mock.
`test.unit.skip_if_no_xattrs()` is also imported into `test.functional`
so that functional tests can import it from the functional test
namespace.
The related OpenStack CI infrastructure changes are made in
https://review.openstack.org/#/c/394600/.
Co-Authored-By: John Dickinson <me@not.mn>
Change-Id: I98a37c0d451f4960b7a12f648e4405c6c6716808
2016-06-30 16:52:58 -07:00
|
|
|
if tf.in_process:
|
|
|
|
tf.skip_if_no_xattrs()
|
2016-11-17 17:17:00 -05:00
|
|
|
|
|
|
|
self.conn = Connection(tf.config)
|
|
|
|
self.conn.authenticate()
|
|
|
|
self.account = Account(
|
|
|
|
self.conn, tf.config.get('account', tf.config['username']))
|
|
|
|
self.account.delete_containers()
|
|
|
|
|
|
|
|
# create a container with versioning
|
|
|
|
self.versions_container = self.account.container(Utils.create_name())
|
|
|
|
self.container = self.account.container(Utils.create_name())
|
|
|
|
self.segments_container = self.account.container(Utils.create_name())
|
|
|
|
if not self.container.create(
|
|
|
|
hdrs={'X-Versions-Location': self.versions_container.name}):
|
2019-09-17 16:52:55 +02:00
|
|
|
if self.conn.response.status == 412:
|
|
|
|
raise SkipTest("Object versioning not enabled")
|
|
|
|
else:
|
|
|
|
raise ResponseError(self.conn.response)
|
2016-11-17 17:17:00 -05:00
|
|
|
if 'versions' not in self.container.info():
|
|
|
|
raise SkipTest("Object versioning not enabled")
|
|
|
|
|
|
|
|
for cont in (self.versions_container, self.segments_container):
|
|
|
|
if not cont.create():
|
|
|
|
raise ResponseError(self.conn.response)
|
|
|
|
|
|
|
|
# create some segments
|
|
|
|
self.seg_info = {}
|
|
|
|
for letter, size in (('a', 1024 * 1024),
|
|
|
|
('b', 1024 * 1024)):
|
|
|
|
seg_name = letter
|
|
|
|
file_item = self.segments_container.file(seg_name)
|
2019-08-09 15:59:59 -07:00
|
|
|
file_item.write((letter * size).encode('ascii'))
|
2016-11-17 17:17:00 -05:00
|
|
|
self.seg_info[seg_name] = {
|
|
|
|
'size_bytes': size,
|
|
|
|
'etag': file_item.md5,
|
|
|
|
'path': '/%s/%s' % (self.segments_container.name, seg_name)}
|
|
|
|
|
|
|
|
def _create_manifest(self, seg_name):
|
|
|
|
# create a manifest in the versioning container
|
|
|
|
file_item = self.container.file("my-slo-manifest")
|
|
|
|
file_item.write(
|
2019-08-09 15:59:59 -07:00
|
|
|
json.dumps([self.seg_info[seg_name]]).encode('ascii'),
|
2016-11-17 17:17:00 -05:00
|
|
|
parms={'multipart-manifest': 'put'})
|
|
|
|
return file_item
|
|
|
|
|
|
|
|
def _assert_is_manifest(self, file_item, seg_name):
|
|
|
|
manifest_body = file_item.read(parms={'multipart-manifest': 'get'})
|
2019-08-09 15:59:59 -07:00
|
|
|
resp_headers = {
|
|
|
|
h.lower(): v for h, v in file_item.conn.response.getheaders()}
|
2016-11-17 17:17:00 -05:00
|
|
|
self.assertIn('x-static-large-object', resp_headers)
|
|
|
|
self.assertEqual('application/json; charset=utf-8',
|
|
|
|
file_item.content_type)
|
|
|
|
try:
|
|
|
|
manifest = json.loads(manifest_body)
|
|
|
|
except ValueError:
|
|
|
|
self.fail("GET with multipart-manifest=get got invalid json")
|
|
|
|
|
|
|
|
self.assertEqual(1, len(manifest))
|
2019-09-23 16:21:36 -07:00
|
|
|
key_map = {'etag': 'hash', 'size_bytes': 'bytes'}
|
2016-11-17 17:17:00 -05:00
|
|
|
for k_client, k_slo in key_map.items():
|
|
|
|
self.assertEqual(self.seg_info[seg_name][k_client],
|
|
|
|
manifest[0][k_slo])
|
2019-09-23 16:21:36 -07:00
|
|
|
if six.PY2:
|
|
|
|
self.assertEqual(self.seg_info[seg_name]['path'].decode('utf8'),
|
|
|
|
manifest[0]['name'])
|
|
|
|
else:
|
|
|
|
self.assertEqual(self.seg_info[seg_name]['path'],
|
|
|
|
manifest[0]['name'])
|
2016-11-17 17:17:00 -05:00
|
|
|
|
2019-08-09 15:59:59 -07:00
|
|
|
def _assert_is_object(self, file_item, seg_data):
|
2016-11-17 17:17:00 -05:00
|
|
|
file_contents = file_item.read()
|
|
|
|
self.assertEqual(1024 * 1024, len(file_contents))
|
2019-08-09 15:59:59 -07:00
|
|
|
self.assertEqual(seg_data, file_contents[:1])
|
|
|
|
self.assertEqual(seg_data, file_contents[-1:])
|
2016-11-17 17:17:00 -05:00
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
# remove versioning to allow simple container delete
|
|
|
|
self.container.update_metadata(hdrs={'X-Versions-Location': ''})
|
|
|
|
self.account.delete_containers()
|
|
|
|
|
|
|
|
def test_slo_manifest_version(self):
|
|
|
|
file_item = self._create_manifest('a')
|
|
|
|
# sanity check: read the manifest, then the large object
|
|
|
|
self._assert_is_manifest(file_item, 'a')
|
2019-08-09 15:59:59 -07:00
|
|
|
self._assert_is_object(file_item, b'a')
|
2016-11-17 17:17:00 -05:00
|
|
|
|
|
|
|
# upload new manifest
|
|
|
|
file_item = self._create_manifest('b')
|
|
|
|
# sanity check: read the manifest, then the large object
|
|
|
|
self._assert_is_manifest(file_item, 'b')
|
2019-08-09 15:59:59 -07:00
|
|
|
self._assert_is_object(file_item, b'b')
|
2016-11-17 17:17:00 -05:00
|
|
|
|
|
|
|
versions_list = self.versions_container.files()
|
|
|
|
self.assertEqual(1, len(versions_list))
|
|
|
|
version_file = self.versions_container.file(versions_list[0])
|
|
|
|
# check the version is still a manifest
|
|
|
|
self._assert_is_manifest(version_file, 'a')
|
2019-08-09 15:59:59 -07:00
|
|
|
self._assert_is_object(version_file, b'a')
|
2016-11-17 17:17:00 -05:00
|
|
|
|
|
|
|
# delete the newest manifest
|
|
|
|
file_item.delete()
|
|
|
|
|
|
|
|
# expect the original manifest file to be restored
|
|
|
|
self._assert_is_manifest(file_item, 'a')
|
2019-08-09 15:59:59 -07:00
|
|
|
self._assert_is_object(file_item, b'a')
|
2019-09-12 11:31:42 +07:00
|
|
|
|
|
|
|
def test_slo_manifest_version_size(self):
|
|
|
|
file_item = self._create_manifest('a')
|
|
|
|
# sanity check: read the manifest, then the large object
|
|
|
|
self._assert_is_manifest(file_item, 'a')
|
|
|
|
self._assert_is_object(file_item, b'a')
|
|
|
|
|
|
|
|
# original manifest size
|
|
|
|
primary_list = self.container.files(parms={'format': 'json'})
|
|
|
|
self.assertEqual(1, len(primary_list))
|
|
|
|
org_size = primary_list[0]['bytes']
|
|
|
|
|
|
|
|
# upload new manifest
|
|
|
|
file_item = self._create_manifest('b')
|
|
|
|
# sanity check: read the manifest, then the large object
|
|
|
|
self._assert_is_manifest(file_item, 'b')
|
|
|
|
self._assert_is_object(file_item, b'b')
|
|
|
|
|
|
|
|
versions_list = self.versions_container.files(parms={'format': 'json'})
|
|
|
|
self.assertEqual(1, len(versions_list))
|
|
|
|
version_file = self.versions_container.file(versions_list[0]['name'])
|
|
|
|
version_file_size = versions_list[0]['bytes']
|
|
|
|
# check the version is still a manifest
|
|
|
|
self._assert_is_manifest(version_file, 'a')
|
|
|
|
self._assert_is_object(version_file, b'a')
|
|
|
|
|
|
|
|
# check the version size is correct
|
|
|
|
self.assertEqual(version_file_size, org_size)
|
|
|
|
|
|
|
|
# delete the newest manifest
|
|
|
|
file_item.delete()
|
|
|
|
|
|
|
|
# expect the original manifest file to be restored
|
|
|
|
self._assert_is_manifest(file_item, 'a')
|
|
|
|
self._assert_is_object(file_item, b'a')
|
|
|
|
|
|
|
|
primary_list = self.container.files(parms={'format': 'json'})
|
|
|
|
self.assertEqual(1, len(primary_list))
|
|
|
|
primary_file_size = primary_list[0]['bytes']
|
|
|
|
# expect the original manifest file size to be the same
|
|
|
|
self.assertEqual(primary_file_size, org_size)
|
2019-08-23 19:14:37 +02:00
|
|
|
|
|
|
|
|
2019-09-23 16:21:36 -07:00
|
|
|
class TestSloWithVersioningUTF8(Base2, TestSloWithVersioning):
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2019-08-23 19:14:37 +02:00
|
|
|
class TestObjectVersioningChangingMode(Base):
|
|
|
|
env = TestObjectVersioningHistoryModeEnv
|
|
|
|
|
|
|
|
def test_delete_while_changing_mode(self):
|
|
|
|
container = self.env.container
|
|
|
|
versions_container = self.env.versions_container
|
|
|
|
cont_info = container.info()
|
|
|
|
self.assertEqual(cont_info['versions'], quote(versions_container.name))
|
|
|
|
|
|
|
|
obj_name = Utils.create_name()
|
|
|
|
versioned_obj = container.file(obj_name)
|
|
|
|
|
|
|
|
versioned_obj.write(
|
|
|
|
b"version1", hdrs={'Content-Type': 'text/jibberish01'})
|
|
|
|
versioned_obj.write(
|
|
|
|
b"version2", hdrs={'Content-Type': 'text/jibberish01'})
|
|
|
|
|
|
|
|
# sanity, version1 object should have moved to versions_container
|
|
|
|
self.assertEqual(1, versions_container.info()['object_count'])
|
|
|
|
|
|
|
|
versioned_obj.delete()
|
|
|
|
|
|
|
|
# version2 and the delete marker should have put in versions_container
|
|
|
|
self.assertEqual(3, versions_container.info()['object_count'])
|
|
|
|
delete_marker_name = versions_container.files()[2]
|
|
|
|
delete_marker = versions_container.file(delete_marker_name)
|
|
|
|
delete_marker.initialize()
|
|
|
|
self.assertEqual(
|
|
|
|
delete_marker.content_type,
|
|
|
|
'application/x-deleted;swift_versions_deleted=1')
|
|
|
|
|
|
|
|
# change to stack mode
|
|
|
|
hdrs = {'X-Versions-Location': versions_container.name}
|
|
|
|
container.update_metadata(hdrs=hdrs)
|
|
|
|
|
|
|
|
versioned_obj.delete()
|
|
|
|
|
|
|
|
# version2 object should have been moved in container
|
|
|
|
self.assertEqual(b"version2", versioned_obj.read())
|
|
|
|
|
|
|
|
# and there's only one version1 is left in versions_container
|
|
|
|
self.assertEqual(1, versions_container.info()['object_count'])
|
|
|
|
versioned_obj_name = versions_container.files()[0]
|
|
|
|
prev_version = versions_container.file(versioned_obj_name)
|
|
|
|
prev_version.initialize()
|
|
|
|
self.assertEqual(b"version1", prev_version.read())
|
|
|
|
self.assertEqual(prev_version.content_type, 'text/jibberish01')
|
|
|
|
|
|
|
|
# reset and test double delete
|
|
|
|
# change back to history mode
|
|
|
|
hdrs = {'X-History-Location': versions_container.name}
|
|
|
|
container.update_metadata(hdrs=hdrs)
|
|
|
|
|
|
|
|
# double delete, second DELETE returns a 404 as expected
|
|
|
|
versioned_obj.delete()
|
|
|
|
with self.assertRaises(ResponseError) as cm:
|
|
|
|
versioned_obj.delete()
|
|
|
|
self.assertEqual(404, cm.exception.status)
|
|
|
|
|
|
|
|
# There should now be 4 objects total in versions_container
|
|
|
|
# 2 are delete markers
|
|
|
|
self.assertEqual(4, versions_container.info()['object_count'])
|
|
|
|
|
|
|
|
# change to stack mode
|
|
|
|
hdrs = {'X-Versions-Location': versions_container.name}
|
|
|
|
container.update_metadata(hdrs=hdrs)
|
|
|
|
|
|
|
|
# a delete, just deletes one delete marker, it doesn't yet pop
|
|
|
|
# version2 back in the container
|
|
|
|
# This DELETE doesn't return a 404!
|
|
|
|
versioned_obj.delete()
|
|
|
|
self.assertEqual(3, versions_container.info()['object_count'])
|
|
|
|
self.assertEqual(0, container.info()['object_count'])
|
|
|
|
|
|
|
|
# neither does this one!
|
|
|
|
versioned_obj.delete()
|
|
|
|
|
|
|
|
# version2 object should have been moved in container
|
|
|
|
self.assertEqual(b"version2", versioned_obj.read())
|
|
|
|
|
|
|
|
# and there's only one version1 is left in versions_container
|
|
|
|
self.assertEqual(1, versions_container.info()['object_count'])
|
2019-09-23 16:21:36 -07:00
|
|
|
|
|
|
|
|
|
|
|
class TestObjectVersioningChangingModeUTF8(
|
|
|
|
Base2, TestObjectVersioningChangingMode):
|
|
|
|
pass
|