From 04ff0927eeda0cc3ae2409c6fe6c5a9b6e39fdd2 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Mon, 24 Aug 2015 15:41:23 -0700 Subject: [PATCH 001/141] Make the object auditor's run-once mode run once. If you invoked the object auditor with --once, it would run the full-audit checker(s) once, but it would run the ZBF checker over and over until the full-audit checkers were done. Now it runs the ZBF and full-audit checkers once each. Change-Id: Ieeaa6fba4184a069756ee150727f24df7833697a --- swift/obj/auditor.py | 17 +++-- test/unit/obj/test_auditor.py | 117 +++++++++++++++++++++++++++++++++- 2 files changed, 128 insertions(+), 6 deletions(-) diff --git a/swift/obj/auditor.py b/swift/obj/auditor.py index 4875bb2520..4bd0c1eb47 100644 --- a/swift/obj/auditor.py +++ b/swift/obj/auditor.py @@ -268,6 +268,7 @@ class ObjectAuditor(Daemon): """Parallel audit loop""" self.clear_recon_cache('ALL') self.clear_recon_cache('ZBF') + once = kwargs.get('mode') == 'once' kwargs['device_dirs'] = override_devices if parent: kwargs['zero_byte_fps'] = zbo_fps @@ -294,13 +295,18 @@ class ObjectAuditor(Daemon): if len(pids) == parallel_proc: pid = os.wait()[0] pids.remove(pid) - # ZBF scanner must be restarted as soon as it finishes - if self.conf_zero_byte_fps and pid == zbf_pid: + + if self.conf_zero_byte_fps and pid == zbf_pid and once: + # If we're only running one pass and the ZBF scanner + # finished, don't bother restarting it. + zbf_pid = -100 + elif self.conf_zero_byte_fps and pid == zbf_pid: + # When we're running forever, the ZBF scanner must + # be restarted as soon as it finishes. kwargs['device_dirs'] = override_devices # sleep between ZBF scanner forks self._sleep() - zbf_pid = self.fork_child(zero_byte_fps=True, - **kwargs) + zbf_pid = self.fork_child(zero_byte_fps=True, **kwargs) pids.append(zbf_pid) else: kwargs['device_dirs'] = [device_list.pop()] @@ -308,8 +314,9 @@ class ObjectAuditor(Daemon): while pids: pid = os.wait()[0] # ZBF scanner must be restarted as soon as it finishes + # unless we're in run-once mode if self.conf_zero_byte_fps and pid == zbf_pid and \ - len(pids) > 1: + len(pids) > 1 and not once: kwargs['device_dirs'] = override_devices # sleep between ZBF scanner forks self._sleep() diff --git a/test/unit/obj/test_auditor.py b/test/unit/obj/test_auditor.py index 2429879e5d..61aa4d2f57 100644 --- a/test/unit/obj/test_auditor.py +++ b/test/unit/obj/test_auditor.py @@ -34,6 +34,19 @@ _mocked_policies = [StoragePolicy(0, 'zero', False), StoragePolicy(1, 'one', True)] +def works_only_once(callable_thing, exception): + called = [False] + + def only_once(*a, **kw): + if called[0]: + raise exception + else: + called[0] = True + return callable_thing(*a, **kw) + + return only_once + + @patch_policies(_mocked_policies) class TestAuditor(unittest.TestCase): @@ -548,6 +561,9 @@ class TestAuditor(unittest.TestCase): loop_error = Bogus('exception') + class LetMeOut(BaseException): + pass + class ObjectAuditorMock(object): check_args = () check_kwargs = {} @@ -644,11 +660,13 @@ class TestAuditor(unittest.TestCase): self.assertEqual(mocker.wait_called, 1) my_auditor._sleep = mocker.mock_sleep_continue + my_auditor.audit_loop = works_only_once(my_auditor.audit_loop, + LetMeOut()) my_auditor.concurrency = 2 mocker.fork_called = 0 mocker.wait_called = 0 - my_auditor.run_once() + self.assertRaises(LetMeOut, my_auditor.run_forever) # Fork is called no. of devices + (no. of devices)/2 + 1 times # since zbf process is forked (no.of devices)/2 + 1 times no_devices = len(os.listdir(self.devices)) @@ -661,5 +679,102 @@ class TestAuditor(unittest.TestCase): os.fork = was_fork os.wait = was_wait + def test_run_audit_once(self): + my_auditor = auditor.ObjectAuditor(dict(devices=self.devices, + mount_check='false', + zero_byte_files_per_second=89, + concurrency=1)) + + forked_pids = [] + next_zbf_pid = [2] + next_normal_pid = [1001] + outstanding_pids = [[]] + + def fake_fork_child(**kwargs): + if len(forked_pids) > 10: + # something's gone horribly wrong + raise BaseException("forking too much") + + # ZBF pids are all smaller than the normal-audit pids; this way + # we can return them first. + # + # Also, ZBF pids are even and normal-audit pids are odd; this is + # so humans seeing this test fail can better tell what's happening. + if kwargs.get('zero_byte_fps'): + pid = next_zbf_pid[0] + next_zbf_pid[0] += 2 + else: + pid = next_normal_pid[0] + next_normal_pid[0] += 2 + outstanding_pids[0].append(pid) + forked_pids.append(pid) + return pid + + def fake_os_wait(): + # Smallest pid first; that's ZBF if we have one, else normal + outstanding_pids[0].sort() + pid = outstanding_pids[0].pop(0) + return (pid, 0) # (pid, status) + + with mock.patch("swift.obj.auditor.os.wait", fake_os_wait), \ + mock.patch.object(my_auditor, 'fork_child', fake_fork_child), \ + mock.patch.object(my_auditor, '_sleep', lambda *a: None): + my_auditor.run_once() + + self.assertEqual(sorted(forked_pids), [2, 1001]) + + def test_run_parallel_audit_once(self): + my_auditor = auditor.ObjectAuditor( + dict(devices=self.devices, mount_check='false', + zero_byte_files_per_second=89, concurrency=2)) + + # ZBF pids are smaller than the normal-audit pids; this way we can + # return them first from our mocked os.wait(). + # + # Also, ZBF pids are even and normal-audit pids are odd; this is so + # humans seeing this test fail can better tell what's happening. + forked_pids = [] + next_zbf_pid = [2] + next_normal_pid = [1001] + outstanding_pids = [[]] + + def fake_fork_child(**kwargs): + if len(forked_pids) > 10: + # something's gone horribly wrong; try not to hang the test + # run because of it + raise BaseException("forking too much") + + if kwargs.get('zero_byte_fps'): + pid = next_zbf_pid[0] + next_zbf_pid[0] += 2 + else: + pid = next_normal_pid[0] + next_normal_pid[0] += 2 + outstanding_pids[0].append(pid) + forked_pids.append(pid) + return pid + + def fake_os_wait(): + if not outstanding_pids[0]: + raise BaseException("nobody waiting") + + # ZBF auditor finishes first + outstanding_pids[0].sort() + pid = outstanding_pids[0].pop(0) + return (pid, 0) # (pid, status) + + # make sure we've got enough devs that the ZBF auditor can finish + # before all the normal auditors have been started + mkdirs(os.path.join(self.devices, 'sdc')) + mkdirs(os.path.join(self.devices, 'sdd')) + + with mock.patch("swift.obj.auditor.os.wait", fake_os_wait), \ + mock.patch.object(my_auditor, 'fork_child', fake_fork_child), \ + mock.patch.object(my_auditor, '_sleep', lambda *a: None): + my_auditor.run_once() + + self.assertEqual(sorted(forked_pids), [2, 1001, 1003, 1005, 1007]) + + if __name__ == '__main__': unittest.main() From 432e280aef1ba08bbb8dc239260604024325c9f6 Mon Sep 17 00:00:00 2001 From: Timur Alperovich Date: Wed, 15 Jul 2015 14:22:45 -0700 Subject: [PATCH 002/141] Correctly handle keys starting with the delimiter. When processing keys where the names start with the delimiter character, swift should list only the delimiter character. To get the list of nested keys, the caller should also supply the prefix which is equal to the delimiter. Added a functional test and unit tests to verify this behavior. Fixes Bug: 1475018 Change-Id: I27701a31bfa22842c272b7781738e8c546b82cbc --- swift/container/backend.py | 2 +- test/functional/tests.py | 14 ++++++++++++++ test/unit/container/test_backend.py | 3 +++ test/unit/container/test_server.py | 24 ++++++++++++++++++++++++ 4 files changed, 42 insertions(+), 1 deletion(-) diff --git a/swift/container/backend.py b/swift/container/backend.py index 15155e252b..c3aac55723 100644 --- a/swift/container/backend.py +++ b/swift/container/backend.py @@ -674,7 +674,7 @@ class ContainerBroker(DatabaseBroker): marker = name[:end] + chr(ord(delimiter) + 1) curs.close() break - elif end > 0: + elif end >= 0: if reverse: end_marker = name[:end + 1] else: diff --git a/test/functional/tests.py b/test/functional/tests.py index fcc239c4c4..ad5437ad6d 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -610,6 +610,20 @@ class TestContainer(Base): 'reverse': 'yes'}) self.assertEqual(results, ['baza', 'bar']) + def testLeadingDelimiter(self): + cont = self.env.account.container(Utils.create_name()) + self.assertTrue(cont.create()) + + delimiter = '/' + files = ['test', delimiter.join(['', 'test', 'bar']), + delimiter.join(['', 'test', 'bar', 'foo'])] + for f in files: + file_item = cont.file(f) + self.assertTrue(file_item.write_random()) + + results = cont.files(parms={'delimiter': delimiter}) + self.assertEqual(results, [delimiter, 'test']) + def testCreate(self): cont = self.env.account.container(Utils.create_name()) self.assertTrue(cont.create()) diff --git a/test/unit/container/test_backend.py b/test/unit/container/test_backend.py index 721f0f9094..ab53ebf65c 100644 --- a/test/unit/container/test_backend.py +++ b/test/unit/container/test_backend.py @@ -1187,6 +1187,9 @@ class TestContainerBroker(unittest.TestCase): listing = broker.list_objects_iter(100, None, None, '/pets/fish/', '/') self.assertEqual([row[0] for row in listing], ['/pets/fish/a', '/pets/fish/b']) + listing = broker.list_objects_iter(100, None, None, None, '/') + self.assertEqual([row[0] for row in listing], + ['/']) def test_list_objects_iter_order_and_reverse(self): # Test ContainerBroker.list_objects_iter diff --git a/test/unit/container/test_server.py b/test/unit/container/test_server.py index fb414207d5..17a8ed085a 100644 --- a/test/unit/container/test_server.py +++ b/test/unit/container/test_server.py @@ -2093,6 +2093,30 @@ class TestContainerController(unittest.TestCase): {"subdir": "US-TX-"}, {"subdir": "US-UT-"}]) + def test_GET_leading_delimiter(self): + req = Request.blank( + '/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', + 'HTTP_X_TIMESTAMP': '0'}) + resp = req.get_response(self.controller) + for i in ('US-TX-A', 'US-TX-B', '-UK', '-CH'): + req = Request.blank( + '/sda1/p/a/c/%s' % i, + environ={ + 'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1', + 'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x', + 'HTTP_X_SIZE': 0}) + self._update_object_put_headers(req) + resp = req.get_response(self.controller) + self.assertEqual(resp.status_int, 201) + req = Request.blank( + '/sda1/p/a/c?delimiter=-&format=json', + environ={'REQUEST_METHOD': 'GET'}) + resp = req.get_response(self.controller) + self.assertEqual( + json.loads(resp.body), + [{"subdir": "-"}, + {"subdir": "US-"}]) + def test_GET_delimiter_xml(self): req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', From ceaaedbe4057542e374200673dd323ba64bb42eb Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Wed, 11 Mar 2015 11:22:53 +0000 Subject: [PATCH 003/141] Make functional tests tolerate pre-existing versioned container test/functional/tests.py:TestAccount.testAccountHead relies on the account having a known number of containers. The test setup attempts to delete all existing containers but this fails a container has versions (and so is not emptied, and cannot be deleted). The tests then fails because the expected number of containers does not match the actual. 'bin/resetswift' before running tests will obviously clear all state but is not always convenient. This change removes any x-versions-location header before deleting containers during test setUp. Steps to recreate the pre-condition for failure on master: (based on original work by clayg) swift post target -r '.r:*, .rlistings' swift post source -H 'x-versions-location: target' for i in {1..4}; do echo "junk${i}" > junk swift upload source junk done Co-Authored-By: Clay Gerrard Change-Id: I3efb6c20dc1fb3e979087e8a93d04ba7e346b5b6 --- test/functional/swift_test_client.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/functional/swift_test_client.py b/test/functional/swift_test_client.py index 7d08e1f2ee..3c9bb0b5e2 100644 --- a/test/functional/swift_test_client.py +++ b/test/functional/swift_test_client.py @@ -460,6 +460,7 @@ class Account(Base): def delete_containers(self): for c in listing_items(self.containers): cont = self.container(c) + cont.update_metadata(hdrs={'x-versions-location': ''}) if not cont.delete_recursive(): return False From 335d58611d0daf87783e565b521974b456f5c636 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Nov=C3=BD?= Date: Tue, 15 Dec 2015 22:42:18 +0100 Subject: [PATCH 004/141] Keystone middleware deprecated option is_admin removed It has been deprecated from Swift 1.8.0 (Grizzly) Change-Id: Id6bc10c3e84262c0a9e6160a76af03c0ad363e9c --- doc/manpages/proxy-server.conf.5 | 5 ----- etc/proxy-server.conf-sample | 6 ------ swift/common/middleware/keystoneauth.py | 20 +++++------------- .../common/middleware/test_keystoneauth.py | 21 +++++++------------ 4 files changed, 13 insertions(+), 39 deletions(-) diff --git a/doc/manpages/proxy-server.conf.5 b/doc/manpages/proxy-server.conf.5 index 24e41aef84..07539ff7b2 100644 --- a/doc/manpages/proxy-server.conf.5 +++ b/doc/manpages/proxy-server.conf.5 @@ -330,11 +330,6 @@ This allows middleware higher in the WSGI pipeline to override auth processing, useful for middleware such as tempurl and formpost. If you know you're not going to use such middleware and you want a bit of extra security, you can set this to false. -.IP \fBis_admin\fR -[DEPRECATED] If is_admin is true, a user whose username is the same as the project name -and who has any role on the project will have access rights elevated to be -the same as if the user had an operator role. Note that the condition -compares names rather than UUIDs. This option is deprecated. .IP \fBservice_roles\fR If the service_roles parameter is present, an X-Service-Token must be present in the request that when validated, grants at least one role listed diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample index 59c5cc02aa..0e7de10daa 100644 --- a/etc/proxy-server.conf-sample +++ b/etc/proxy-server.conf-sample @@ -337,12 +337,6 @@ user_test5_tester5 = testing5 service # you can set this to false. # allow_overrides = true # -# If is_admin is true, a user whose username is the same as the project name -# and who has any role on the project will have access rights elevated to be -# the same as if the user had an operator role. Note that the condition -# compares names rather than UUIDs. This option is deprecated. -# is_admin = false -# # If the service_roles parameter is present, an X-Service-Token must be # present in the request that when validated, grants at least one role listed # in the parameter. The X-Service-Token may be scoped to any project. diff --git a/swift/common/middleware/keystoneauth.py b/swift/common/middleware/keystoneauth.py index a00701c39b..651aeacfbb 100644 --- a/swift/common/middleware/keystoneauth.py +++ b/swift/common/middleware/keystoneauth.py @@ -75,12 +75,6 @@ class KeystoneAuth(object): id.. For example, if the project id is ``1234``, the path is ``/v1/AUTH_1234``. - If the ``is_admin`` option is ``true``, a user whose username is the same - as the project name and who has any role on the project will have access - rights elevated to be the same as if the user had one of the - ``operator_roles``. Note that the condition compares names rather than - UUIDs. This option is deprecated. It is ``false`` by default. - If you need to have a different reseller_prefix to be able to mix different auth servers you can configure the option ``reseller_prefix`` in your keystoneauth entry like this:: @@ -188,7 +182,11 @@ class KeystoneAuth(object): self.reseller_admin_role = conf.get('reseller_admin_role', 'ResellerAdmin').lower() config_is_admin = conf.get('is_admin', "false").lower() - self.is_admin = swift_utils.config_true_value(config_is_admin) + if swift_utils.config_true_value(config_is_admin): + self.logger.warning("The 'is_admin' option for keystoneauth is no " + "longer supported. Remove the 'is_admin' " + "option from your keystoneauth config") + config_overrides = conf.get('allow_overrides', 't').lower() self.allow_overrides = swift_utils.config_true_value(config_overrides) self.default_domain_id = conf.get('default_domain_id', 'default') @@ -484,14 +482,6 @@ class KeystoneAuth(object): req.environ['swift_owner'] = True return - # If user is of the same name of the tenant then make owner of it. - if self.is_admin and user_name == tenant_name: - self.logger.warning("the is_admin feature has been deprecated " - "and will be removed in the future " - "update your config file") - req.environ['swift_owner'] = True - return - if acl_authorized is not None: return self.denied_response(req) diff --git a/test/unit/common/middleware/test_keystoneauth.py b/test/unit/common/middleware/test_keystoneauth.py index a81565119d..81b27fad12 100644 --- a/test/unit/common/middleware/test_keystoneauth.py +++ b/test/unit/common/middleware/test_keystoneauth.py @@ -647,21 +647,16 @@ class TestAuthorize(BaseTestAuthorize): req = self._check_authenticate(identity=identity) self.assertTrue(req.environ.get('swift_owner')) - def _check_authorize_for_tenant_owner_match(self, exception=None): + def test_authorize_fails_same_user_and_tenant(self): + # Historically the is_admin option allowed access when user_name + # matched tenant_name, but it is no longer supported. This test is a + # sanity check that the option no longer works. + self.test_auth.is_admin = True identity = self._get_identity(user_name='same_name', tenant_name='same_name') - req = self._check_authenticate(identity=identity, exception=exception) - expected = bool(exception is None) - self.assertEqual(bool(req.environ.get('swift_owner')), expected) - - def test_authorize_succeeds_as_owner_for_tenant_owner_match(self): - self.test_auth.is_admin = True - self._check_authorize_for_tenant_owner_match() - - def test_authorize_fails_as_owner_for_tenant_owner_match(self): - self.test_auth.is_admin = False - self._check_authorize_for_tenant_owner_match( - exception=HTTP_FORBIDDEN) + req = self._check_authenticate(identity=identity, + exception=HTTP_FORBIDDEN) + self.assertFalse(bool(req.environ.get('swift_owner'))) def test_authorize_succeeds_for_container_sync(self): env = {'swift_sync_key': 'foo', 'REMOTE_ADDR': '127.0.0.1'} From 7aaca029175661d45d05d41cce367c0ae19cab41 Mon Sep 17 00:00:00 2001 From: Christian Schwede Date: Thu, 11 Feb 2016 13:18:07 +0100 Subject: [PATCH 005/141] Add comments to role-based access control functional tests Change-Id: Ic18c8a2abc33e2f175f948991af0189935b8702f --- test/functional/test_access_control.py | 219 +++++++++++++++++++++++++ 1 file changed, 219 insertions(+) diff --git a/test/functional/test_access_control.py b/test/functional/test_access_control.py index 7b6a68bd1b..78a4384416 100644 --- a/test/functional/test_access_control.py +++ b/test/functional/test_access_control.py @@ -91,40 +91,55 @@ TEST_CASE_FORMAT = ( # A scenario of put for account, container and object with # several roles. RBAC_PUT = [ + # PUT container in own account: ok ('PUT', None, None, 'UUID', None, None, None, 'tester', 'tester', None, 201), ('PUT', None, None, 'UUID', None, None, None, 'tester', 'tester', 'tester', 201), + + # PUT container in other users account: not allowed for role admin ('PUT', None, None, 'UUID', None, None, None, 'tester2', 'tester', None, 403), ('PUT', None, None, 'UUID', None, None, None, 'tester4', 'tester', None, 403), + + # PUT container in other users account: not allowed for role _member_ ('PUT', None, None, 'UUID', None, None, None, 'tester3', 'tester3', None, 403), ('PUT', None, None, 'UUID', None, None, None, 'tester2', 'tester3', None, 403), ('PUT', None, None, 'UUID', None, None, None, 'tester4', 'tester3', None, 403), + + # PUT container in other users account: allowed for role ResellerAdmin ('PUT', None, None, 'UUID', None, None, None, 'tester6', 'tester6', None, 201), ('PUT', None, None, 'UUID', None, None, None, 'tester2', 'tester6', None, 201), ('PUT', None, None, 'UUID', None, None, None, 'tester4', 'tester6', None, 201), + + # PUT object in own account: ok ('PUT', None, None, 'UUID', 'UUID', None, None, 'tester', 'tester', None, 201), ('PUT', None, None, 'UUID', 'UUID', None, None, 'tester', 'tester', 'tester', 201), + + # PUT object in other users account: not allowed for role admin ('PUT', None, None, 'UUID', 'UUID', None, None, 'tester2', 'tester', None, 403), ('PUT', None, None, 'UUID', 'UUID', None, None, 'tester4', 'tester', None, 403), + + # PUT object in other users account: not allowed for role _member_ ('PUT', None, None, 'UUID', 'UUID', None, None, 'tester3', 'tester3', None, 403), ('PUT', None, None, 'UUID', 'UUID', None, None, 'tester2', 'tester3', None, 403), ('PUT', None, None, 'UUID', 'UUID', None, None, 'tester4', 'tester3', None, 403), + + # PUT object in other users account: allowed for role ResellerAdmin ('PUT', None, None, 'UUID', 'UUID', None, None, 'tester6', 'tester6', None, 201), ('PUT', None, None, 'UUID', 'UUID', None, @@ -135,8 +150,11 @@ RBAC_PUT = [ RBAC_PUT_WITH_SERVICE_PREFIX = [ + # PUT container in own account: ok ('PUT', None, None, 'UUID', None, None, None, 'tester', 'tester', 'tester5', 201), + + # PUT container in other users account: not allowed for role service ('PUT', None, None, 'UUID', None, None, None, 'tester', 'tester3', 'tester5', 403), ('PUT', None, None, 'UUID', None, None, @@ -147,8 +165,12 @@ RBAC_PUT_WITH_SERVICE_PREFIX = [ None, 'tester2', 'tester5', None, 403), ('PUT', None, None, 'UUID', None, None, None, 'tester4', 'tester5', None, 403), + + # PUT object in own account: ok ('PUT', None, None, 'UUID', 'UUID', None, None, 'tester', 'tester', 'tester5', 201), + + # PUT object in other users account: not allowed for role service ('PUT', None, None, 'UUID', 'UUID', None, None, 'tester', 'tester3', 'tester5', 403), ('PUT', None, None, 'UUID', 'UUID', None, @@ -159,8 +181,14 @@ RBAC_PUT_WITH_SERVICE_PREFIX = [ None, 'tester2', 'tester5', None, 403), ('PUT', None, None, 'UUID', 'UUID', None, None, 'tester4', 'tester5', None, 403), + + # All following actions are using SERVICE prefix + + # PUT container in own account: ok ('PUT', None, None, 'UUID', None, None, 'SERVICE', 'tester', 'tester', 'tester5', 201), + + # PUT container fails if wrong user, or only one token sent ('PUT', None, None, 'UUID', None, None, 'SERVICE', 'tester', 'tester3', 'tester5', 403), ('PUT', None, None, 'UUID', None, None, @@ -169,8 +197,12 @@ RBAC_PUT_WITH_SERVICE_PREFIX = [ 'SERVICE', 'tester', 'tester', 'tester', 403), ('PUT', None, None, 'UUID', None, None, 'SERVICE', 'tester', None, 'tester5', 401), + + # PUT object in own account: ok ('PUT', None, None, 'UUID', 'UUID', None, 'SERVICE', 'tester', 'tester', 'tester5', 201), + + # PUT object fails if wrong user, or only one token sent ('PUT', None, None, 'UUID', 'UUID', None, 'SERVICE', 'tester', 'tester3', 'tester5', 403), ('PUT', None, None, 'UUID', 'UUID', None, @@ -185,40 +217,55 @@ RBAC_PUT_WITH_SERVICE_PREFIX = [ # A scenario of delete for account, container and object with # several roles. RBAC_DELETE = [ + # DELETE container in own account: ok ('DELETE', None, None, 'UUID', None, None, None, 'tester', 'tester', None, 204), ('DELETE', None, None, 'UUID', None, None, None, 'tester', 'tester', 'tester', 204), + + # DELETE container in other users account: not allowed for role admin ('DELETE', None, None, 'UUID', None, None, None, 'tester2', 'tester', None, 403), ('DELETE', None, None, 'UUID', None, None, None, 'tester4', 'tester', None, 403), + + # DELETE container in other users account: not allowed for role _member_ ('DELETE', None, None, 'UUID', None, None, None, 'tester3', 'tester3', None, 403), ('DELETE', None, None, 'UUID', None, None, None, 'tester2', 'tester3', None, 403), ('DELETE', None, None, 'UUID', None, None, None, 'tester4', 'tester3', None, 403), + + # DELETE container in other users account: allowed for role ResellerAdmin ('DELETE', None, None, 'UUID', None, None, None, 'tester6', 'tester6', None, 204), ('DELETE', None, None, 'UUID', None, None, None, 'tester2', 'tester6', None, 204), ('DELETE', None, None, 'UUID', None, None, None, 'tester4', 'tester6', None, 204), + + # DELETE object in own account: ok ('DELETE', None, None, 'UUID', 'UUID', None, None, 'tester', 'tester', None, 204), ('DELETE', None, None, 'UUID', 'UUID', None, None, 'tester', 'tester', 'tester', 204), + + # DELETE object in other users account: not allowed for role admin ('DELETE', None, None, 'UUID', 'UUID', None, None, 'tester2', 'tester', None, 403), ('DELETE', None, None, 'UUID', 'UUID', None, None, 'tester4', 'tester', None, 403), + + # DELETE object in other users account: not allowed for role _member_ ('DELETE', None, None, 'UUID', 'UUID', None, None, 'tester3', 'tester3', None, 403), ('DELETE', None, None, 'UUID', 'UUID', None, None, 'tester2', 'tester3', None, 403), ('DELETE', None, None, 'UUID', 'UUID', None, None, 'tester4', 'tester3', None, 403), + + # DELETE object in other users account: allowed for role ResellerAdmin ('DELETE', None, None, 'UUID', 'UUID', None, None, 'tester6', 'tester6', None, 204), ('DELETE', None, None, 'UUID', 'UUID', None, @@ -229,8 +276,11 @@ RBAC_DELETE = [ RBAC_DELETE_WITH_SERVICE_PREFIX = [ + # DELETE container in own account: ok ('DELETE', None, None, 'UUID', None, None, None, 'tester', 'tester', 'tester5', 204), + + # DELETE container in other users account: not allowed for role service ('DELETE', None, None, 'UUID', None, None, None, 'tester', 'tester3', 'tester5', 403), ('DELETE', None, None, 'UUID', None, None, @@ -241,8 +291,12 @@ RBAC_DELETE_WITH_SERVICE_PREFIX = [ None, 'tester2', 'tester5', None, 403), ('DELETE', None, None, 'UUID', None, None, None, 'tester4', 'tester5', None, 403), + + # DELETE object in own account: ok ('DELETE', None, None, 'UUID', 'UUID', None, None, 'tester', 'tester', 'tester5', 204), + + # DELETE object in other users account: not allowed for role service ('DELETE', None, None, 'UUID', 'UUID', None, None, 'tester', 'tester3', 'tester5', 403), ('DELETE', None, None, 'UUID', 'UUID', None, @@ -253,8 +307,14 @@ RBAC_DELETE_WITH_SERVICE_PREFIX = [ None, 'tester2', 'tester5', None, 403), ('DELETE', None, None, 'UUID', 'UUID', None, None, 'tester4', 'tester5', None, 403), + + # All following actions are using SERVICE prefix + + # DELETE container in own account: ok ('DELETE', None, None, 'UUID', None, None, 'SERVICE', 'tester', 'tester', 'tester5', 204), + + # DELETE container fails if wrong user, or only one token sent ('DELETE', None, None, 'UUID', None, None, 'SERVICE', 'tester', 'tester3', 'tester5', 403), ('DELETE', None, None, 'UUID', None, None, @@ -263,8 +323,12 @@ RBAC_DELETE_WITH_SERVICE_PREFIX = [ 'SERVICE', 'tester', 'tester', 'tester', 403), ('DELETE', None, None, 'UUID', None, None, 'SERVICE', 'tester', None, 'tester5', 401), + + # DELETE object in own account: ok ('DELETE', None, None, 'UUID', 'UUID', None, 'SERVICE', 'tester', 'tester', 'tester5', 204), + + # DELETE object fails if wrong user, or only one token sent ('DELETE', None, None, 'UUID', 'UUID', None, 'SERVICE', 'tester', 'tester3', 'tester5', 403), ('DELETE', None, None, 'UUID', 'UUID', None, @@ -279,60 +343,83 @@ RBAC_DELETE_WITH_SERVICE_PREFIX = [ # A scenario of get for account, container and object with # several roles. RBAC_GET = [ + # GET own account: ok ('GET', None, None, None, None, None, None, 'tester', 'tester', None, 200), ('GET', None, None, None, None, None, None, 'tester', 'tester', 'tester', 200), + + # GET other users account: not allowed for role admin ('GET', None, None, None, None, None, None, 'tester2', 'tester', None, 403), ('GET', None, None, None, None, None, None, 'tester4', 'tester', None, 403), + + # GET other users account: not allowed for role _member_ ('GET', None, None, None, None, None, None, 'tester3', 'tester3', None, 403), ('GET', None, None, None, None, None, None, 'tester2', 'tester3', None, 403), ('GET', None, None, None, None, None, None, 'tester4', 'tester3', None, 403), + + # GET other users account: allowed for role ResellerAdmin ('GET', None, None, None, None, None, None, 'tester6', 'tester6', None, 200), ('GET', None, None, None, None, None, None, 'tester2', 'tester6', None, 200), ('GET', None, None, None, None, None, None, 'tester4', 'tester6', None, 200), + + # GET container in own account: ok ('GET', None, None, 'UUID', None, None, None, 'tester', 'tester', None, 200), ('GET', None, None, 'UUID', None, None, None, 'tester', 'tester', 'tester', 200), + + # GET container in other users account: not allowed for role admin ('GET', None, None, 'UUID', None, None, None, 'tester2', 'tester', None, 403), ('GET', None, None, 'UUID', None, None, None, 'tester4', 'tester', None, 403), + + # GET container in other users account: not allowed for role _member_ ('GET', None, None, 'UUID', None, None, None, 'tester3', 'tester3', None, 403), ('GET', None, None, 'UUID', None, None, None, 'tester2', 'tester3', None, 403), ('GET', None, None, 'UUID', None, None, None, 'tester4', 'tester3', None, 403), + + # GET container in other users account: allowed for role ResellerAdmin ('GET', None, None, 'UUID', None, None, None, 'tester6', 'tester6', None, 200), ('GET', None, None, 'UUID', None, None, None, 'tester2', 'tester6', None, 200), ('GET', None, None, 'UUID', None, None, None, 'tester4', 'tester6', None, 200), + + # GET object in own account: ok ('GET', None, None, 'UUID', 'UUID', None, None, 'tester', 'tester', None, 200), ('GET', None, None, 'UUID', 'UUID', None, None, 'tester', 'tester', 'tester', 200), + + # GET object in other users account: not allowed for role admin ('GET', None, None, 'UUID', 'UUID', None, None, 'tester2', 'tester', None, 403), ('GET', None, None, 'UUID', 'UUID', None, None, 'tester4', 'tester', None, 403), + + # GET object in other users account: not allowed for role _member_ ('GET', None, None, 'UUID', 'UUID', None, None, 'tester3', 'tester3', None, 403), ('GET', None, None, 'UUID', 'UUID', None, None, 'tester2', 'tester3', None, 403), ('GET', None, None, 'UUID', 'UUID', None, None, 'tester4', 'tester3', None, 403), + + # GET object in other users account: allowed for role ResellerAdmin ('GET', None, None, 'UUID', 'UUID', None, None, 'tester6', 'tester6', None, 200), ('GET', None, None, 'UUID', 'UUID', None, @@ -343,8 +430,11 @@ RBAC_GET = [ RBAC_GET_WITH_SERVICE_PREFIX = [ + # GET own account: ok ('GET', None, None, None, None, None, None, 'tester', 'tester', 'tester5', 200), + + # GET other account: not allowed for role service ('GET', None, None, None, None, None, None, 'tester', 'tester3', 'tester5', 403), ('GET', None, None, None, None, None, @@ -355,8 +445,12 @@ RBAC_GET_WITH_SERVICE_PREFIX = [ None, 'tester2', 'tester5', None, 403), ('GET', None, None, None, None, None, None, 'tester4', 'tester5', None, 403), + + # GET container in own account: ok ('GET', None, None, 'UUID', None, None, None, 'tester', 'tester', 'tester5', 200), + + # GET container in other users account: not allowed for role service ('GET', None, None, 'UUID', None, None, None, 'tester', 'tester3', 'tester5', 403), ('GET', None, None, 'UUID', None, None, @@ -367,8 +461,12 @@ RBAC_GET_WITH_SERVICE_PREFIX = [ None, 'tester2', 'tester5', None, 403), ('GET', None, None, 'UUID', None, None, None, 'tester4', 'tester5', None, 403), + + # GET object in own account: ok ('GET', None, None, 'UUID', 'UUID', None, None, 'tester', 'tester', 'tester5', 200), + + # GET object fails if wrong user, or only one token sent ('GET', None, None, 'UUID', 'UUID', None, None, 'tester', 'tester3', 'tester5', 403), ('GET', None, None, 'UUID', 'UUID', None, @@ -379,8 +477,14 @@ RBAC_GET_WITH_SERVICE_PREFIX = [ None, 'tester2', 'tester5', None, 403), ('GET', None, None, 'UUID', 'UUID', None, None, 'tester4', 'tester5', None, 403), + + # All following actions are using SERVICE prefix + + # GET own account: ok ('GET', None, None, None, None, None, 'SERVICE', 'tester', 'tester', 'tester5', 200), + + # GET other account: not allowed for role service ('GET', None, None, None, None, None, 'SERVICE', 'tester', 'tester3', 'tester5', 403), ('GET', None, None, None, None, None, @@ -389,8 +493,12 @@ RBAC_GET_WITH_SERVICE_PREFIX = [ 'SERVICE', 'tester', 'tester', 'tester', 403), ('GET', None, None, None, None, None, 'SERVICE', 'tester', None, 'tester5', 401), + + # GET container in own account: ok ('GET', None, None, 'UUID', None, None, 'SERVICE', 'tester', 'tester', 'tester5', 200), + + # GET container fails if wrong user, or only one token sent ('GET', None, None, 'UUID', None, None, 'SERVICE', 'tester', 'tester3', 'tester5', 403), ('GET', None, None, 'UUID', None, None, @@ -399,8 +507,12 @@ RBAC_GET_WITH_SERVICE_PREFIX = [ 'SERVICE', 'tester', 'tester', 'tester', 403), ('GET', None, None, 'UUID', None, None, 'SERVICE', 'tester', None, 'tester5', 401), + + # GET object in own account: ok ('GET', None, None, 'UUID', 'UUID', None, 'SERVICE', 'tester', 'tester', 'tester5', 200), + + # GET object fails if wrong user, or only one token sent ('GET', None, None, 'UUID', 'UUID', None, 'SERVICE', 'tester', 'tester3', 'tester5', 403), ('GET', None, None, 'UUID', 'UUID', None, @@ -415,60 +527,84 @@ RBAC_GET_WITH_SERVICE_PREFIX = [ # A scenario of head for account, container and object with # several roles. RBAC_HEAD = [ + # HEAD own account: ok ('HEAD', None, None, None, None, None, None, 'tester', 'tester', None, 204), ('HEAD', None, None, None, None, None, None, 'tester', 'tester', 'tester', 204), + + # HEAD other users account: not allowed for role admin ('HEAD', None, None, None, None, None, None, 'tester2', 'tester', None, 403), ('HEAD', None, None, None, None, None, None, 'tester4', 'tester', None, 403), + + # HEAD other users account: not allowed for role _member_ ('HEAD', None, None, None, None, None, None, 'tester3', 'tester3', None, 403), ('HEAD', None, None, None, None, None, None, 'tester2', 'tester3', None, 403), ('HEAD', None, None, None, None, None, None, 'tester4', 'tester3', None, 403), + + # HEAD other users account: allowed for role ResellerAdmin ('HEAD', None, None, None, None, None, None, 'tester6', 'tester6', None, 204), ('HEAD', None, None, None, None, None, None, 'tester2', 'tester6', None, 204), ('HEAD', None, None, None, None, None, None, 'tester4', 'tester6', None, 204), + + # HEAD container in own account: ok ('HEAD', None, None, 'UUID', None, None, None, 'tester', 'tester', None, 204), ('HEAD', None, None, 'UUID', None, None, None, 'tester', 'tester', 'tester', 204), + + # HEAD container in other users account: not allowed for role admin ('HEAD', None, None, 'UUID', None, None, None, 'tester2', 'tester', None, 403), ('HEAD', None, None, 'UUID', None, None, None, 'tester4', 'tester', None, 403), + + # HEAD container in other users account: not allowed for role _member_ ('HEAD', None, None, 'UUID', None, None, None, 'tester3', 'tester3', None, 403), ('HEAD', None, None, 'UUID', None, None, None, 'tester2', 'tester3', None, 403), ('HEAD', None, None, 'UUID', None, None, None, 'tester4', 'tester3', None, 403), + + # HEAD container in other users account: allowed for role ResellerAdmin ('HEAD', None, None, 'UUID', None, None, None, 'tester6', 'tester6', None, 204), ('HEAD', None, None, 'UUID', None, None, None, 'tester2', 'tester6', None, 204), ('HEAD', None, None, 'UUID', None, None, None, 'tester4', 'tester6', None, 204), + + + # HEAD object in own account: ok ('HEAD', None, None, 'UUID', 'UUID', None, None, 'tester', 'tester', None, 200), ('HEAD', None, None, 'UUID', 'UUID', None, None, 'tester', 'tester', 'tester', 200), + + # HEAD object in other users account: not allowed for role admin ('HEAD', None, None, 'UUID', 'UUID', None, None, 'tester2', 'tester', None, 403), ('HEAD', None, None, 'UUID', 'UUID', None, None, 'tester4', 'tester', None, 403), + + # HEAD object in other users account: not allowed for role _member_ ('HEAD', None, None, 'UUID', 'UUID', None, None, 'tester3', 'tester3', None, 403), ('HEAD', None, None, 'UUID', 'UUID', None, None, 'tester2', 'tester3', None, 403), ('HEAD', None, None, 'UUID', 'UUID', None, None, 'tester4', 'tester3', None, 403), + + # HEAD object in other users account: allowed for role ResellerAdmin ('HEAD', None, None, 'UUID', 'UUID', None, None, 'tester6', 'tester6', None, 200), ('HEAD', None, None, 'UUID', 'UUID', None, @@ -479,8 +615,11 @@ RBAC_HEAD = [ RBAC_HEAD_WITH_SERVICE_PREFIX = [ + # HEAD own account: ok ('HEAD', None, None, None, None, None, None, 'tester', 'tester', 'tester5', 204), + + # HEAD other account: not allowed for role service ('HEAD', None, None, None, None, None, None, 'tester', 'tester3', 'tester5', 403), ('HEAD', None, None, None, None, None, @@ -491,8 +630,12 @@ RBAC_HEAD_WITH_SERVICE_PREFIX = [ None, 'tester2', 'tester5', None, 403), ('HEAD', None, None, None, None, None, None, 'tester4', 'tester5', None, 403), + + # HEAD container in own account: ok ('HEAD', None, None, 'UUID', None, None, None, 'tester', 'tester', 'tester5', 204), + + # HEAD container in other users account: not allowed for role service ('HEAD', None, None, 'UUID', None, None, None, 'tester', 'tester3', 'tester5', 403), ('HEAD', None, None, 'UUID', None, None, @@ -503,8 +646,12 @@ RBAC_HEAD_WITH_SERVICE_PREFIX = [ None, 'tester2', 'tester5', None, 403), ('HEAD', None, None, 'UUID', None, None, None, 'tester4', 'tester5', None, 403), + + # HEAD object in own account: ok ('HEAD', None, None, 'UUID', 'UUID', None, None, 'tester', 'tester', 'tester5', 200), + + # HEAD object fails if wrong user, or only one token sent ('HEAD', None, None, 'UUID', 'UUID', None, None, 'tester', 'tester3', 'tester5', 403), ('HEAD', None, None, 'UUID', 'UUID', None, @@ -515,8 +662,14 @@ RBAC_HEAD_WITH_SERVICE_PREFIX = [ None, 'tester2', 'tester5', None, 403), ('HEAD', None, None, 'UUID', 'UUID', None, None, 'tester4', 'tester5', None, 403), + + # All following actions are using SERVICE prefix + + # HEAD own account: ok ('HEAD', None, None, None, None, None, 'SERVICE', 'tester', 'tester', 'tester5', 204), + + # HEAD other account: not allowed for role service ('HEAD', None, None, None, None, None, 'SERVICE', 'tester', 'tester3', 'tester5', 403), ('HEAD', None, None, None, None, None, @@ -525,8 +678,12 @@ RBAC_HEAD_WITH_SERVICE_PREFIX = [ 'SERVICE', 'tester', 'tester', 'tester', 403), ('HEAD', None, None, None, None, None, 'SERVICE', 'tester', None, 'tester5', 401), + + # HEAD container in own account: ok ('HEAD', None, None, 'UUID', None, None, 'SERVICE', 'tester', 'tester', 'tester5', 204), + + # HEAD container in other users account: not allowed for role service ('HEAD', None, None, 'UUID', None, None, 'SERVICE', 'tester', 'tester3', 'tester5', 403), ('HEAD', None, None, 'UUID', None, None, @@ -535,8 +692,12 @@ RBAC_HEAD_WITH_SERVICE_PREFIX = [ 'SERVICE', 'tester', 'tester', 'tester', 403), ('HEAD', None, None, 'UUID', None, None, 'SERVICE', 'tester', None, 'tester5', 401), + + # HEAD object in own account: ok ('HEAD', None, None, 'UUID', 'UUID', None, 'SERVICE', 'tester', 'tester', 'tester5', 200), + + # HEAD object fails if wrong user, or only one token sent ('HEAD', None, None, 'UUID', 'UUID', None, 'SERVICE', 'tester', 'tester3', 'tester5', 403), ('HEAD', None, None, 'UUID', 'UUID', None, @@ -551,60 +712,83 @@ RBAC_HEAD_WITH_SERVICE_PREFIX = [ # A scenario of post for account, container and object with # several roles. RBAC_POST = [ + # POST own account: ok ('POST', None, None, None, None, None, None, 'tester', 'tester', None, 204), ('POST', None, None, None, None, None, None, 'tester', 'tester', 'tester', 204), + + # POST other users account: not allowed for role admin ('POST', None, None, None, None, None, None, 'tester2', 'tester', None, 403), ('POST', None, None, None, None, None, None, 'tester4', 'tester', None, 403), + + # POST other users account: not allowed for role _member_ ('POST', None, None, None, None, None, None, 'tester3', 'tester3', None, 403), ('POST', None, None, None, None, None, None, 'tester2', 'tester3', None, 403), ('POST', None, None, None, None, None, None, 'tester4', 'tester3', None, 403), + + # POST other users account: allowed for role ResellerAdmin ('POST', None, None, None, None, None, None, 'tester6', 'tester6', None, 204), ('POST', None, None, None, None, None, None, 'tester2', 'tester6', None, 204), ('POST', None, None, None, None, None, None, 'tester4', 'tester6', None, 204), + + # POST container in own account: ok ('POST', None, None, 'UUID', None, None, None, 'tester', 'tester', None, 204), ('POST', None, None, 'UUID', None, None, None, 'tester', 'tester', 'tester', 204), + + # POST container in other users account: not allowed for role admin ('POST', None, None, 'UUID', None, None, None, 'tester2', 'tester', None, 403), ('POST', None, None, 'UUID', None, None, None, 'tester4', 'tester', None, 403), + + # POST container in other users account: not allowed for role _member_ ('POST', None, None, 'UUID', None, None, None, 'tester3', 'tester3', None, 403), ('POST', None, None, 'UUID', None, None, None, 'tester2', 'tester3', None, 403), ('POST', None, None, 'UUID', None, None, None, 'tester4', 'tester3', None, 403), + + # POST container in other users account: allowed for role ResellerAdmin ('POST', None, None, 'UUID', None, None, None, 'tester6', 'tester6', None, 204), ('POST', None, None, 'UUID', None, None, None, 'tester2', 'tester6', None, 204), ('POST', None, None, 'UUID', None, None, None, 'tester4', 'tester6', None, 204), + + # POST object in own account: ok ('POST', None, None, 'UUID', 'UUID', None, None, 'tester', 'tester', None, 202), ('POST', None, None, 'UUID', 'UUID', None, None, 'tester', 'tester', 'tester', 202), + + # POST object in other users account: not allowed for role admin ('POST', None, None, 'UUID', 'UUID', None, None, 'tester2', 'tester', None, 403), ('POST', None, None, 'UUID', 'UUID', None, None, 'tester4', 'tester', None, 403), + + # POST object in other users account: not allowed for role _member_ ('POST', None, None, 'UUID', 'UUID', None, None, 'tester3', 'tester3', None, 403), ('POST', None, None, 'UUID', 'UUID', None, None, 'tester2', 'tester3', None, 403), ('POST', None, None, 'UUID', 'UUID', None, None, 'tester4', 'tester3', None, 403), + + # POST object in other users account: allowed for role ResellerAdmin ('POST', None, None, 'UUID', 'UUID', None, None, 'tester6', 'tester6', None, 202), ('POST', None, None, 'UUID', 'UUID', None, @@ -615,8 +799,11 @@ RBAC_POST = [ RBAC_POST_WITH_SERVICE_PREFIX = [ + # POST own account: ok ('POST', None, None, None, None, None, None, 'tester', 'tester', 'tester5', 204), + + # POST own account: ok ('POST', None, None, None, None, None, None, 'tester', 'tester3', 'tester5', 403), ('POST', None, None, None, None, None, @@ -627,8 +814,12 @@ RBAC_POST_WITH_SERVICE_PREFIX = [ None, 'tester2', 'tester5', None, 403), ('POST', None, None, None, None, None, None, 'tester4', 'tester5', None, 403), + + # POST container in own account: ok ('POST', None, None, 'UUID', None, None, None, 'tester', 'tester', 'tester5', 204), + + # POST container in other users account: not allowed for role service ('POST', None, None, 'UUID', None, None, None, 'tester', 'tester3', 'tester5', 403), ('POST', None, None, 'UUID', None, None, @@ -639,8 +830,12 @@ RBAC_POST_WITH_SERVICE_PREFIX = [ None, 'tester2', 'tester5', None, 403), ('POST', None, None, 'UUID', None, None, None, 'tester4', 'tester5', None, 403), + + # POST object in own account: ok ('POST', None, None, 'UUID', 'UUID', None, None, 'tester', 'tester', 'tester5', 202), + + # POST object fails if wrong user, or only one token sent ('POST', None, None, 'UUID', 'UUID', None, None, 'tester', 'tester3', 'tester5', 403), ('POST', None, None, 'UUID', 'UUID', None, @@ -651,8 +846,14 @@ RBAC_POST_WITH_SERVICE_PREFIX = [ None, 'tester2', 'tester5', None, 403), ('POST', None, None, 'UUID', 'UUID', None, None, 'tester4', 'tester5', None, 403), + + # All following actions are using SERVICE prefix + + # POST own account: ok ('POST', None, None, None, None, None, 'SERVICE', 'tester', 'tester', 'tester5', 204), + + # POST other account: not allowed for role service ('POST', None, None, None, None, None, 'SERVICE', 'tester', 'tester3', 'tester5', 403), ('POST', None, None, None, None, None, @@ -661,8 +862,12 @@ RBAC_POST_WITH_SERVICE_PREFIX = [ 'SERVICE', 'tester', 'tester', 'tester', 403), ('POST', None, None, None, None, None, 'SERVICE', 'tester', None, 'tester5', 401), + + # POST container in own account: ok ('POST', None, None, 'UUID', None, None, 'SERVICE', 'tester', 'tester', 'tester5', 204), + + # POST container in other users account: not allowed for role service ('POST', None, None, 'UUID', None, None, 'SERVICE', 'tester', 'tester3', 'tester5', 403), ('POST', None, None, 'UUID', None, None, @@ -671,8 +876,12 @@ RBAC_POST_WITH_SERVICE_PREFIX = [ 'SERVICE', 'tester', 'tester', 'tester', 403), ('POST', None, None, 'UUID', None, None, 'SERVICE', 'tester', None, 'tester5', 401), + + # POST object in own account: ok ('POST', None, None, 'UUID', 'UUID', None, 'SERVICE', 'tester', 'tester', 'tester5', 202), + + # POST object fails if wrong user, or only one token sent ('POST', None, None, 'UUID', 'UUID', None, 'SERVICE', 'tester', 'tester3', 'tester5', 403), ('POST', None, None, 'UUID', 'UUID', None, @@ -687,6 +896,8 @@ RBAC_POST_WITH_SERVICE_PREFIX = [ # A scenario of options for account, container and object with # several roles. RBAC_OPTIONS = [ + # OPTIONS request is always ok + ('OPTIONS', None, None, None, None, None, None, 'tester', 'tester', None, 200), ('OPTIONS', None, None, None, None, None, @@ -786,11 +997,15 @@ RBAC_OPTIONS = [ None, 'UUID', None, {"X-Container-Meta-Access-Control-Allow-Origin": "*"}, None, 'tester', 'tester', None, 200), + + # Not OK for container: wrong origin ('OPTIONS', {"Origin": "http://localhost", "Access-Control-Request-Method": "GET"}, None, 'UUID', None, {"X-Container-Meta-Access-Control-Allow-Origin": "http://invalid.com"}, None, 'tester', 'tester', None, 401), + + # Not OK for object: missing X-Container-Meta-Access-Control-Allow-Origin ('OPTIONS', {"Origin": "http://localhost", "Access-Control-Request-Method": "GET"}, None, 'UUID', 'UUID', None, None, 'tester', 'tester', None, 401), @@ -799,6 +1014,8 @@ RBAC_OPTIONS = [ None, 'UUID', 'UUID', {"X-Container-Meta-Access-Control-Allow-Origin": "*"}, None, 'tester', None, None, 200), + + # Not OK for object: wrong origin ('OPTIONS', {"Origin": "http://localhost", "Access-Control-Request-Method": "GET"}, None, 'UUID', 'UUID', @@ -808,6 +1025,8 @@ RBAC_OPTIONS = [ RBAC_OPTIONS_WITH_SERVICE_PREFIX = [ + # OPTIONS request is always ok + ('OPTIONS', None, None, None, None, None, None, 'tester', 'tester', 'tester5', 200), ('OPTIONS', None, None, None, None, None, From b5a243e75a8033988063d2c1c90ac373bc0050d2 Mon Sep 17 00:00:00 2001 From: Janie Richling Date: Fri, 19 Feb 2016 18:07:18 -0600 Subject: [PATCH 006/141] Set backend content length for fallocate - EC Policy Currently, the ECObjectController removes the 'content-length' header. This part is ok, except that value is being used to set 'X-Backend-Obj-Content-Length', so it is always 0. This leads to not calling fallocate (details on bug) on a PUT since the size is 0. This change makes use of some numbers returned from the EC Driver get_segment_info method in order to calculate the expected on-disk size that should be allocated. The EC controller will now set the 'X-Backend-Obj-Content-Length' value appropriately. Co-Authored-By: Kota Tsuyuzaki Co-Authored-By: John Dickinson Co-Authored-By: Tim Burke Change-Id: Ifd16c1438539e6fd9bb2dbcd053d11bea2e09fee Fixes: bug 1532008 --- swift/proxy/controllers/obj.py | 38 +++++++++++++++++++++---- test/unit/proxy/controllers/test_obj.py | 12 ++++++++ 2 files changed, 44 insertions(+), 6 deletions(-) diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py index f3c13d589f..9b5756725f 100644 --- a/swift/proxy/controllers/obj.py +++ b/swift/proxy/controllers/obj.py @@ -1769,7 +1769,7 @@ class ECPutter(object): @classmethod def connect(cls, node, part, path, headers, conn_timeout, node_timeout, - chunked=False): + chunked=False, expected_frag_archive_size=None): """ Connect to a backend node and send the headers. @@ -1791,9 +1791,10 @@ class ECPutter(object): # we must use chunked encoding. headers['Transfer-Encoding'] = 'chunked' headers['Expect'] = '100-continue' - if 'Content-Length' in headers: - headers['X-Backend-Obj-Content-Length'] = \ - headers.pop('Content-Length') + + # make sure this isn't there + headers.pop('Content-Length') + headers['X-Backend-Obj-Content-Length'] = expected_frag_archive_size headers['X-Backend-Obj-Multipart-Mime-Boundary'] = mime_boundary @@ -2105,16 +2106,41 @@ class ECObjectController(BaseObjectController): # the object server will get different bytes, so these # values do not apply (Content-Length might, in general, but # in the specific case of replication vs. EC, it doesn't). - headers.pop('Content-Length', None) + client_cl = headers.pop('Content-Length', None) headers.pop('Etag', None) + expected_frag_size = None + if client_cl: + policy_index = int(headers.get('X-Backend-Storage-Policy-Index')) + policy = POLICIES.get_by_index(policy_index) + # TODO: PyECLib <= 1.2.0 looks to return the segment info + # different from the input for aligned data efficiency but + # Swift never does. So calculate the fragment length Swift + # will actually send to object sever by making two different + # get_segment_info calls (until PyECLib fixed). + # policy.fragment_size makes the call using segment size, + # and the next call is to get info for the last segment + + # get number of fragments except the tail - use truncation // + num_fragments = int(client_cl) // policy.ec_segment_size + expected_frag_size = policy.fragment_size * num_fragments + + # calculate the tail fragment_size by hand and add it to + # expected_frag_size + last_segment_size = int(client_cl) % policy.ec_segment_size + if last_segment_size: + last_info = policy.pyeclib_driver.get_segment_info( + last_segment_size, policy.ec_segment_size) + expected_frag_size += last_info['fragment_size'] + self.app.logger.thread_locals = logger_thread_locals for node in node_iter: try: putter = ECPutter.connect( node, part, path, headers, conn_timeout=self.app.conn_timeout, - node_timeout=self.app.node_timeout) + node_timeout=self.app.node_timeout, + expected_frag_archive_size=expected_frag_size) self.app.set_node_timing(node, putter.connect_duration) return putter except InsufficientStorage: diff --git a/test/unit/proxy/controllers/test_obj.py b/test/unit/proxy/controllers/test_obj.py index c39cd19502..ea4f6d1509 100755 --- a/test/unit/proxy/controllers/test_obj.py +++ b/test/unit/proxy/controllers/test_obj.py @@ -1483,6 +1483,8 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): conn_id = kwargs['connection_id'] put_requests[conn_id]['boundary'] = headers[ 'X-Backend-Obj-Multipart-Mime-Boundary'] + put_requests[conn_id]['backend-content-length'] = headers[ + 'X-Backend-Obj-Content-Length'] with set_http_connect(*codes, expect_headers=expect_headers, give_send=capture_body, @@ -1496,6 +1498,9 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): self.assertTrue(info['boundary'] is not None, "didn't get boundary for conn %r" % ( connection_id,)) + self.assertTrue(size > int(info['backend-content-length']) > 0, + "invalid backend-content-length for conn %r" % ( + connection_id,)) # email.parser.FeedParser doesn't know how to take a multipart # message and boundary together and parse it; it only knows how @@ -1517,6 +1522,13 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): self.assertEqual(obj_part['X-Document'], 'object body') frag_archives.append(obj_part.get_payload()) + # assert length was correct for this connection + self.assertEqual(int(info['backend-content-length']), + len(frag_archives[-1])) + # assert length was the same for all connections + self.assertEqual(int(info['backend-content-length']), + len(frag_archives[0])) + # validate some footer metadata self.assertEqual(footer_part['X-Document'], 'object metadata') footer_metadata = json.loads(footer_part.get_payload()) From 75ab3cb78862647644af7eaf5e0fb590e73ae341 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Wed, 14 Oct 2015 15:49:35 +0100 Subject: [PATCH 007/141] Stop staticweb revealing container existence to unauth'd requests When a container has `X-Container-Meta-Web-Listings: false` then staticweb will return a 404 in response to a GET or HEAD on the container, regardless of whether the request is auth'd. That provides a way to probe for container existence. It should return a 401 if the request is not auth'd. This patch adds a call to swift.authorize before returning the 404. Closes-Bug: 1506116 Change-Id: I382323b49dc8f6d67bf4494db7084a860a10db59 --- swift/common/middleware/staticweb.py | 18 ++++++-- test/unit/common/middleware/test_staticweb.py | 43 ++++++++++++++++++- 2 files changed, 56 insertions(+), 5 deletions(-) diff --git a/swift/common/middleware/staticweb.py b/swift/common/middleware/staticweb.py index e7552e4f54..7d647c561b 100644 --- a/swift/common/middleware/staticweb.py +++ b/swift/common/middleware/staticweb.py @@ -131,7 +131,8 @@ from swift.common.utils import human_readable, split_path, config_true_value, \ quote, register_swift_info, get_logger from swift.common.wsgi import make_env, WSGIContext from swift.common.http import is_success, is_redirection, HTTP_NOT_FOUND -from swift.common.swob import Response, HTTPMovedPermanently, HTTPNotFound +from swift.common.swob import Response, HTTPMovedPermanently, HTTPNotFound, \ + Request from swift.proxy.controllers.base import get_container_info @@ -196,10 +197,12 @@ class _StaticWebContext(WSGIContext): self._error, self._listings, self._listings_css and self._dir_type. :param env: The WSGI environment dict. + :return container_info: The container_info dict. """ self._index = self._error = self._listings = self._listings_css = \ self._dir_type = None - container_info = get_container_info(env, self.app, swift_source='SW') + container_info = get_container_info( + env, self.app, swift_source='SW') if is_success(container_info['status']): meta = container_info.get('meta', {}) self._index = meta.get('web-index', '').strip() @@ -208,6 +211,7 @@ class _StaticWebContext(WSGIContext): self._listings_label = meta.get('web-listings-label', '').strip() self._listings_css = meta.get('web-listings-css', '').strip() self._dir_type = meta.get('web-directory-type', '').strip() + return container_info def _listing(self, env, start_response, prefix=None): """ @@ -356,7 +360,15 @@ class _StaticWebContext(WSGIContext): :param env: The original WSGI environment dict. :param start_response: The original WSGI start_response hook. """ - self._get_container_info(env) + container_info = self._get_container_info(env) + req = Request(env) + req.acl = container_info['read_acl'] + # we checked earlier that swift.authorize is set in env + aresp = env['swift.authorize'](req) + if aresp: + resp = aresp(env, self._start_response) + return self._error_response(resp, env, start_response) + if not self._listings and not self._index: if config_true_value(env.get('HTTP_X_WEB_MODE', 'f')): return HTTPNotFound()(env, start_response) diff --git a/test/unit/common/middleware/test_staticweb.py b/test/unit/common/middleware/test_staticweb.py index 3ab73bf8af..0db4a37842 100644 --- a/test/unit/common/middleware/test_staticweb.py +++ b/test/unit/common/middleware/test_staticweb.py @@ -41,7 +41,8 @@ meta_map = { 'web-error': 'error.html'}}, 'c6b': {'meta': {'web-listings': 't', 'web-listings-label': 'foo'}}, - 'c7': {'meta': {'web-listings': 'f'}}, + 'c7': {'meta': {'web-listings': 'f', + 'web-error': 'error.html'}}, 'c8': {'meta': {'web-error': 'error.html', 'web-listings': 't', 'web-listings-css': @@ -202,6 +203,16 @@ class FakeApp(object): '''.strip())(env, start_response) elif env['PATH_INFO'] in ('/v1/a/c7', '/v1/a/c7/'): return self.listing(env, start_response) + elif env['PATH_INFO'] == '/v1/a/c7/404error.html': + return Response(status='404 Not Found')(env, start_response) + elif env['PATH_INFO'] == '/v1/a/c7/401error.html': + return Response(status='200 Ok', body=''' + + +

Hey, you're not authorized to see this!

+ + + '''.strip())(env, start_response) elif env['PATH_INFO'] in ('/v1/a/c8', '/v1/a/c8/'): return self.listing(env, start_response) elif env['PATH_INFO'] == '/v1/a/c8/subdir/': @@ -663,9 +674,37 @@ class TestStaticWeb(unittest.TestCase): self.assertIn(label, resp.body) def test_container7listing(self): + # container7 has web-listings = f, web-error=error.html resp = Request.blank('/v1/a/c7/').get_response(self.test_staticweb) self.assertEqual(resp.status_int, 404) - self.assertTrue('Web Listing Disabled' in resp.body) + self.assertIn("Web Listing Disabled", resp.body) + + # expect 301 if auth'd but no trailing '/' + resp = Request.blank('/v1/a/c7').get_response(self.test_staticweb) + self.assertEqual(resp.status_int, 301) + + # expect default 401 if request is not auth'd and no trailing '/' + test_staticweb = FakeAuthFilter( + staticweb.filter_factory({})(self.app), deny_listing=True, + deny_objects=True) + resp = Request.blank('/v1/a/c7').get_response(test_staticweb) + self.assertEqual(resp.status_int, 401) + self.assertNotIn("Hey, you're not authorized to see this!", resp.body) + + # expect custom 401 if request is not auth'd for listing + test_staticweb = FakeAuthFilter( + staticweb.filter_factory({})(self.app), deny_listing=True) + resp = Request.blank('/v1/a/c7/').get_response(test_staticweb) + self.assertEqual(resp.status_int, 401) + self.assertIn("Hey, you're not authorized to see this!", resp.body) + + # expect default 401 if request is not auth'd for listing or object GET + test_staticweb = FakeAuthFilter( + staticweb.filter_factory({})(self.app), deny_listing=True, + deny_objects=True) + resp = Request.blank('/v1/a/c7/').get_response(test_staticweb) + self.assertEqual(resp.status_int, 401) + self.assertNotIn("Hey, you're not authorized to see this!", resp.body) def test_container8listingcss(self): resp = Request.blank( From 2d73dd9e660b50ef04a20012963a15376983923f Mon Sep 17 00:00:00 2001 From: Cheng Li Date: Sat, 27 Feb 2016 23:26:47 +0800 Subject: [PATCH 008/141] keep etag of target source instead of possible_source In proxy/controllers/base.py : ResumingGetter._get_source_and_node self.used_source_etag should be etag of target source instead of possible_source. Change-Id: Id6c33e79f65cb4ad9e3249957dd57dc2da8cad98 Closes-bug: #1550734 --- swift/proxy/controllers/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/swift/proxy/controllers/base.py b/swift/proxy/controllers/base.py index 99b1551b27..ae4371b905 100644 --- a/swift/proxy/controllers/base.py +++ b/swift/proxy/controllers/base.py @@ -1055,7 +1055,7 @@ class ResumingGetter(object): self.used_nodes.append(node) src_headers = dict( (k.lower(), v) for k, v in - possible_source.getheaders()) + source.getheaders()) # Save off the source etag so that, if we lose the connection # and have to resume from a different node, we can be sure that From 0628f1268c804aa555ff3873d11831266c1b1cb1 Mon Sep 17 00:00:00 2001 From: Bill Huber Date: Fri, 23 Oct 2015 15:50:21 -0500 Subject: [PATCH 009/141] Add unit tests for swift.account.reaper This patch adds more unit tests to diminish missing pieces of the coverage in the account_reaper unit test. Change-Id: Ib9e875ddd1334a4a67037dcfbd42d3b008ccd4e7 --- test/unit/account/test_reaper.py | 58 ++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/test/unit/account/test_reaper.py b/test/unit/account/test_reaper.py index 84194cfcb0..b3bc74309a 100644 --- a/test/unit/account/test_reaper.py +++ b/test/unit/account/test_reaper.py @@ -23,6 +23,7 @@ import unittest from logging import DEBUG from mock import patch, call, DEFAULT import six +import eventlet from swift.account import reaper from swift.account.backend import DATADIR @@ -162,6 +163,8 @@ class TestReaper(unittest.TestCase): if self.amount_fail < self.max_fail: self.amount_fail += 1 raise self.myexp + if self.reap_obj_timeout: + raise eventlet.Timeout() def fake_direct_delete_container(self, *args, **kwargs): if self.amount_delete_fail < self.max_delete_fail: @@ -171,6 +174,8 @@ class TestReaper(unittest.TestCase): def fake_direct_get_container(self, *args, **kwargs): if self.get_fail: raise self.myexp + if self.timeout: + raise eventlet.Timeout() objects = [{'name': 'o1'}, {'name': 'o2'}, {'name': six.text_type('o3')}, @@ -319,6 +324,7 @@ class TestReaper(unittest.TestCase): r = self.init_reaper({}, fakelogger=True) self.amount_fail = 0 self.max_fail = 1 + self.reap_obj_timeout = False policy = random.choice(list(POLICIES)) with patch('swift.account.reaper.direct_delete_object', self.fake_direct_delete_object): @@ -347,6 +353,18 @@ class TestReaper(unittest.TestCase): self.assertEqual(r.stats_objects_remaining, 1) self.assertEqual(r.stats_objects_possibly_remaining, 1) + def test_reap_object_timeout(self): + r = self.init_reaper({}, fakelogger=True) + self.amount_fail = 1 + self.max_fail = 0 + self.reap_obj_timeout = True + with patch('swift.account.reaper.direct_delete_object', + self.fake_direct_delete_object): + r.reap_object('a', 'c', 'partition', cont_nodes, 'o', 1) + self.assertEqual(r.stats_objects_remaining, 4) + self.assertTrue(r.logger.get_lines_for_level( + 'error')[-1].startswith('Timeout Exception')) + def test_reap_object_non_exist_policy_index(self): r = self.init_reaper({}, fakelogger=True) r.reap_object('a', 'c', 'partition', cont_nodes, 'o', 2) @@ -434,6 +452,7 @@ class TestReaper(unittest.TestCase): def test_reap_container_partial_fail(self): r = self.init_reaper({}, fakelogger=True) self.get_fail = False + self.timeout = False self.reap_obj_fail = False self.amount_delete_fail = 0 self.max_delete_fail = 2 @@ -452,6 +471,7 @@ class TestReaper(unittest.TestCase): def test_reap_container_full_fail(self): r = self.init_reaper({}, fakelogger=True) self.get_fail = False + self.timeout = False self.reap_obj_fail = False self.amount_delete_fail = 0 self.max_delete_fail = 3 @@ -467,6 +487,25 @@ class TestReaper(unittest.TestCase): self.assertEqual(r.logger.get_increment_counts()['return_codes.4'], 3) self.assertEqual(r.stats_containers_remaining, 1) + def test_reap_container_get_object_timeout(self): + r = self.init_reaper({}, fakelogger=True) + self.get_fail = False + self.timeout = True + self.reap_obj_fail = False + self.amount_delete_fail = 0 + self.max_delete_fail = 0 + with patch('swift.account.reaper.direct_get_container', + self.fake_direct_get_container), \ + patch('swift.account.reaper.direct_delete_container', + self.fake_direct_delete_container), \ + patch('swift.account.reaper.AccountReaper.get_container_ring', + self.fake_container_ring), \ + patch('swift.account.reaper.AccountReaper.reap_object', + self.fake_reap_object): + r.reap_container('a', 'partition', acc_nodes, 'c') + self.assertTrue(r.logger.get_lines_for_level( + 'error')[-1].startswith('Timeout Exception')) + @patch('swift.account.reaper.Ring', lambda *args, **kwargs: unit.FakeRing()) def test_reap_container_non_exist_policy_index(self): @@ -500,6 +539,8 @@ class TestReaper(unittest.TestCase): self.r.stats_objects_remaining = 1 self.r.stats_containers_possibly_remaining = 1 self.r.stats_objects_possibly_remaining = 1 + self.r.stats_return_codes[2] = \ + self.r.stats_return_codes.get(2, 0) + 1 def test_reap_account(self): containers = ('c1', 'c2', 'c3', '') @@ -527,6 +568,16 @@ class TestReaper(unittest.TestCase): self.assertTrue(stat_line.find('1 objects remaining')) self.assertTrue(stat_line.find('1 containers possibly remaining')) self.assertTrue(stat_line.find('1 objects possibly remaining')) + self.assertTrue(stat_line.find('return codes: 2 2xxs')) + + @patch('swift.account.reaper.Ring', + lambda *args, **kwargs: unit.FakeRing()) + def test_basic_reap_account(self): + self.r = reaper.AccountReaper({}) + self.r.account_ring = None + self.r.get_account_ring() + self.assertEqual(self.r.account_ring.replica_count, 3) + self.assertEqual(len(self.r.account_ring.devs), 3) def test_reap_account_no_container(self): broker = FakeAccountBroker(tuple()) @@ -674,6 +725,13 @@ class TestReaper(unittest.TestCase): r.run_once() self.assertFalse(foo.called) + with patch('swift.account.reaper.AccountReaper.reap_device') as foo: + r.logger = unit.debug_logger('test-reaper') + r.devices = 'thisdeviceisbad' + r.run_once() + self.assertTrue(r.logger.get_lines_for_level( + 'error')[-1].startswith('Exception in top-level account reaper')) + def test_run_forever(self): def fake_sleep(val): self.val = val From fe70898daced5f5fd698dd27098451439fdd8d08 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Sun, 28 Feb 2016 01:18:07 +0000 Subject: [PATCH 010/141] Require account/container metadata be UTF-8 Otherwise, we get a UnicodeDecodeError when we call json.dumps() Change-Id: Ie200d029e1fd7f0ff0956c8ced98207e11ef9080 --- swift/common/constraints.py | 8 +++++++- swift/common/db.py | 11 +++++++--- test/unit/account/test_server.py | 23 +++++++++++++++++++++ test/unit/common/test_constraints.py | 18 ++++++++++++++--- test/unit/common/test_db.py | 12 +++++++++++ test/unit/container/test_server.py | 30 ++++++++++++++++++++++++++++ 6 files changed, 95 insertions(+), 7 deletions(-) diff --git a/swift/common/constraints.py b/swift/common/constraints.py index 451e7458bf..abfab4bb9e 100644 --- a/swift/common/constraints.py +++ b/swift/common/constraints.py @@ -129,7 +129,8 @@ def check_metadata(req, target_type): which type the target storage for the metadata is :returns: HTTPBadRequest with bad metadata otherwise None """ - prefix = 'x-%s-meta-' % target_type.lower() + target_type = target_type.lower() + prefix = 'x-%s-meta-' % target_type meta_count = 0 meta_size = 0 for key, value in req.headers.items(): @@ -145,6 +146,11 @@ def check_metadata(req, target_type): if not key: return HTTPBadRequest(body='Metadata name cannot be empty', request=req, content_type='text/plain') + bad_key = not check_utf8(key) + bad_value = value and not check_utf8(value) + if target_type in ('account', 'container') and (bad_key or bad_value): + return HTTPBadRequest(body='Metadata must be valid UTF-8', + request=req, content_type='text/plain') meta_count += 1 meta_size += len(key) + len(value) if len(key) > MAX_META_NAME_LENGTH: diff --git a/swift/common/db.py b/swift/common/db.py index cead803375..092018d6b3 100644 --- a/swift/common/db.py +++ b/swift/common/db.py @@ -32,7 +32,8 @@ from tempfile import mkstemp from eventlet import sleep, Timeout import sqlite3 -from swift.common.constraints import MAX_META_COUNT, MAX_META_OVERALL_SIZE +from swift.common.constraints import MAX_META_COUNT, MAX_META_OVERALL_SIZE, \ + check_utf8 from swift.common.utils import Timestamp, renamer, \ mkdirs, lock_parent_directory, fallocate from swift.common.exceptions import LockTimeout @@ -729,11 +730,11 @@ class DatabaseBroker(object): @staticmethod def validate_metadata(metadata): """ - Validates that metadata_falls within acceptable limits. + Validates that metadata falls within acceptable limits. :param metadata: to be validated :raises: HTTPBadRequest if MAX_META_COUNT or MAX_META_OVERALL_SIZE - is exceeded + is exceeded, or if metadata contains non-UTF-8 data """ meta_count = 0 meta_size = 0 @@ -747,6 +748,10 @@ class DatabaseBroker(object): key = key[len(prefix):] meta_count = meta_count + 1 meta_size = meta_size + len(key) + len(value) + bad_key = key and not check_utf8(key) + bad_value = value and not check_utf8(value) + if bad_key or bad_value: + raise HTTPBadRequest('Metadata must be valid UTF-8') if meta_count > MAX_META_COUNT: raise HTTPBadRequest('Too many metadata items; max %d' % MAX_META_COUNT) diff --git a/test/unit/account/test_server.py b/test/unit/account/test_server.py index db4f09cc1f..d2e7c087ef 100644 --- a/test/unit/account/test_server.py +++ b/test/unit/account/test_server.py @@ -383,6 +383,29 @@ class TestAccountController(unittest.TestCase): self.assertEqual(resp.body, 'Recently deleted') self.assertEqual(resp.headers['X-Account-Status'], 'Deleted') + def test_PUT_non_utf8_metadata(self): + # Set metadata header + req = Request.blank( + '/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'}, + headers={'X-Timestamp': normalize_timestamp(1), + 'X-Account-Meta-Test': b'\xff'}) + resp = req.get_response(self.controller) + self.assertEqual(resp.status_int, 400) + # Set sysmeta header + req = Request.blank( + '/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'}, + headers={'X-Timestamp': normalize_timestamp(1), + 'X-Account-Sysmeta-Access-Control': b'\xff'}) + resp = req.get_response(self.controller) + self.assertEqual(resp.status_int, 400) + # Send other + req = Request.blank( + '/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'}, + headers={'X-Timestamp': normalize_timestamp(1), + 'X-Will-Not-Be-Saved': b'\xff'}) + resp = req.get_response(self.controller) + self.assertEqual(resp.status_int, 202) + def test_PUT_GET_metadata(self): # Set metadata header req = Request.blank( diff --git a/test/unit/common/test_constraints.py b/test/unit/common/test_constraints.py index b250907527..2f7fb85d9b 100644 --- a/test/unit/common/test_constraints.py +++ b/test/unit/common/test_constraints.py @@ -22,7 +22,7 @@ from six.moves import range from test import safe_repr from test.unit import MockTrue -from swift.common.swob import HTTPBadRequest, Request, HTTPException +from swift.common.swob import Request, HTTPException from swift.common.http import HTTP_REQUEST_ENTITY_TOO_LARGE, \ HTTP_BAD_REQUEST, HTTP_LENGTH_REQUIRED, HTTP_NOT_IMPLEMENTED from swift.common import constraints, utils @@ -49,8 +49,20 @@ class TestConstraints(unittest.TestCase): def test_check_metadata_empty_name(self): headers = {'X-Object-Meta-': 'Value'} - self.assertTrue(constraints.check_metadata(Request.blank( - '/', headers=headers), 'object'), HTTPBadRequest) + self.assertEqual(constraints.check_metadata(Request.blank( + '/', headers=headers), 'object').status_int, HTTP_BAD_REQUEST) + + def test_check_metadata_non_utf8(self): + headers = {'X-Account-Meta-Foo': b'\xff'} + self.assertEqual(constraints.check_metadata(Request.blank( + '/', headers=headers), 'account').status_int, HTTP_BAD_REQUEST) + headers = {b'X-Container-Meta-\xff': 'foo'} + self.assertEqual(constraints.check_metadata(Request.blank( + '/', headers=headers), 'container').status_int, HTTP_BAD_REQUEST) + # Object's OK; its metadata isn't serialized as JSON + headers = {'X-Object-Meta-Foo': b'\xff'} + self.assertIsNone(constraints.check_metadata(Request.blank( + '/', headers=headers), 'object')) def test_check_metadata_name_length(self): name = 'a' * constraints.MAX_META_NAME_LENGTH diff --git a/test/unit/common/test_db.py b/test/unit/common/test_db.py index 925e71438a..7e7660e77c 100644 --- a/test/unit/common/test_db.py +++ b/test/unit/common/test_db.py @@ -1147,6 +1147,18 @@ class TestDatabaseBroker(unittest.TestCase): except HTTPException: self.fail('Unexpected HTTPException') + def test_metadata_raises_exception_on_non_utf8(self): + def try_validate(metadata): + try: + DatabaseBroker.validate_metadata(metadata) + except HTTPException as e: + self.assertEqual(str(e), '400 Bad Request') + else: + self.fail('HTTPException not raised') + ts = normalize_timestamp(1) + try_validate({'X-Account-Meta-Foo': (b'\xff', ts)}) + try_validate({b'X-Container-Meta-\xff': ('bar', ts)}) + def test_metadata_raises_exception_over_max_count(self): metadata = {} for c in range(MAX_META_COUNT + 1): diff --git a/test/unit/container/test_server.py b/test/unit/container/test_server.py index 22e0f00c41..4646b87fae 100644 --- a/test/unit/container/test_server.py +++ b/test/unit/container/test_server.py @@ -632,6 +632,36 @@ class TestContainerController(unittest.TestCase): self.assertEqual(resp.headers['X-Backend-Storage-Policy-Index'], str(non_default_policy.idx)) + def test_PUT_non_utf8_metadata(self): + # Set metadata header + req = Request.blank( + '/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, + headers={'X-Timestamp': Timestamp(1).internal, + 'X-Container-Meta-Test': b'\xff'}) + resp = req.get_response(self.controller) + self.assertEqual(resp.status_int, 400) + # Set sysmeta header + req = Request.blank( + '/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, + headers={'X-Timestamp': Timestamp(1).internal, + 'X-Container-Sysmeta-Test': b'\xff'}) + resp = req.get_response(self.controller) + self.assertEqual(resp.status_int, 400) + # Set ACL + req = Request.blank( + '/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, + headers={'X-Timestamp': Timestamp(1).internal, + 'X-Container-Read': b'\xff'}) + resp = req.get_response(self.controller) + self.assertEqual(resp.status_int, 400) + # Send other + req = Request.blank( + '/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, + headers={'X-Timestamp': Timestamp(1).internal, + 'X-Will-Not-Be-Saved': b'\xff'}) + resp = req.get_response(self.controller) + self.assertEqual(resp.status_int, 202) + def test_PUT_GET_metadata(self): # Set metadata header req = Request.blank( From 6d8be59fce17008369656628615efcd2677dfe36 Mon Sep 17 00:00:00 2001 From: Kota Tsuyuzaki Date: Mon, 22 Feb 2016 22:41:51 -0800 Subject: [PATCH 011/141] Fix ringbuilder tests Some of tests in test/unit/cli/test_ringbuilder doesn't assert the exit code and unfortunately some of these passed even if the statement fails for the assertion actually. This patch enables to assert the exit code from ringbuider and fixes some code/test bugs I noticed. Change-Id: I18fa675ba8a90678e2b5ccb5f90eafab01d22787 --- swift/cli/ringbuilder.py | 2 +- test/unit/cli/test_ringbuilder.py | 520 +++++++++++------------------- 2 files changed, 191 insertions(+), 331 deletions(-) diff --git a/swift/cli/ringbuilder.py b/swift/cli/ringbuilder.py index 076f1975f8..a7a005ef9a 100644 --- a/swift/cli/ringbuilder.py +++ b/swift/cli/ringbuilder.py @@ -1183,7 +1183,7 @@ swift-ring-builder set_overload [%] def main(arguments=None): global argv, backup_dir, builder, builder_file, ring_file - if arguments: + if arguments is not None: argv = arguments else: argv = sys_argv diff --git a/test/unit/cli/test_ringbuilder.py b/test/unit/cli/test_ringbuilder.py index 8856e039f1..fd80b1ab53 100644 --- a/test/unit/cli/test_ringbuilder.py +++ b/test/unit/cli/test_ringbuilder.py @@ -24,6 +24,7 @@ import uuid import shlex from swift.cli import ringbuilder +from swift.cli.ringbuilder import EXIT_SUCCESS, EXIT_WARNING, EXIT_ERROR from swift.common import exceptions from swift.common.ring import RingBuilder @@ -132,6 +133,11 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): }) ring.save(self.tmpfile) + def assertSystemExit(self, return_code, func, *argv): + with self.assertRaises(SystemExit) as cm: + func(*argv) + self.assertEqual(return_code, cm.exception.code) + def test_parse_search_values_old_format(self): # Test old format argv = ["d0r0z0-127.0.0.1:6000R127.0.0.1:6000/sda1_some meta data"] @@ -170,12 +176,8 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): def test_parse_search_values_number_of_arguments(self): # Test Number of arguments abnormal argv = ["--region", "2", "test"] - err = None - try: - ringbuilder._parse_search_values(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit( + EXIT_ERROR, ringbuilder._parse_search_values, argv) def test_find_parts(self): rb = RingBuilder(8, 3, 0) @@ -214,82 +216,52 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): def test_parse_list_parts_values_number_of_arguments(self): # Test Number of arguments abnormal argv = ["--region", "2", "test"] - err = None - try: - ringbuilder._parse_list_parts_values(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit( + EXIT_ERROR, ringbuilder._parse_list_parts_values, argv) def test_parse_add_values_number_of_arguments(self): # Test Number of arguments abnormal argv = ["--region", "2", "test"] - err = None - try: - ringbuilder._parse_add_values(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit( + EXIT_ERROR, ringbuilder._parse_add_values, argv) def test_set_weight_values_no_devices(self): # Test no devices - err = None - try: - ringbuilder._set_weight_values([], 100) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + # _set_weight_values doesn't take argv-like arguments + self.assertSystemExit( + EXIT_ERROR, ringbuilder._set_weight_values, [], 100) def test_parse_set_weight_values_number_of_arguments(self): # Test Number of arguments abnormal argv = ["r1", "100", "r2"] - err = None - try: - ringbuilder._parse_set_weight_values(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit( + EXIT_ERROR, ringbuilder._parse_set_weight_values, argv) argv = ["--region", "2"] - err = None - try: - ringbuilder._parse_set_weight_values(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit( + EXIT_ERROR, ringbuilder._parse_set_weight_values, argv) def test_set_info_values_no_devices(self): # Test no devices - err = None - try: - ringbuilder._set_info_values([], 100) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + # _set_info_values doesn't take argv-like arguments + self.assertSystemExit( + EXIT_ERROR, ringbuilder._set_info_values, [], 100) def test_parse_set_info_values_number_of_arguments(self): # Test Number of arguments abnormal argv = ["r1", "127.0.0.1", "r2"] - err = None - try: - ringbuilder._parse_set_info_values(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit( + EXIT_ERROR, ringbuilder._parse_set_info_values, argv) def test_parse_remove_values_number_of_arguments(self): # Test Number of arguments abnormal argv = ["--region", "2", "test"] - err = None - try: - ringbuilder._parse_remove_values(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit( + EXIT_ERROR, ringbuilder._parse_remove_values, argv) def test_create_ring(self): argv = ["", self.tmpfile, "create", "6", "3.14159265359", "1"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) self.assertEqual(ring.part_power, 6) self.assertEqual(ring.replicas, 3.14159265359) @@ -298,14 +270,14 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): def test_create_ring_number_of_arguments(self): # Test missing arguments argv = ["", self.tmpfile, "create"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) def test_add_device_ipv4_old_format(self): self.create_sample_ring() # Test ipv4(old format) argv = ["", self.tmpfile, "add", "r2z3-127.0.0.1:6000/sda3_some meta data", "3.14159265359"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) # Check that device was created with given data ring = RingBuilder.load(self.tmpfile) @@ -326,7 +298,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): argv = ["", self.tmpfile, "add", "r1z1-127.0.0.1:6000/sda9", "3.14159265359", "r1z1-127.0.0.1:6000/sda9", "2"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) def test_add_device_ipv6_old_format(self): self.create_sample_ring() @@ -336,7 +308,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "r2z3-2001:0000:1234:0000:0000:C1C0:ABCD:0876:6000" "R2::10:7000/sda3_some meta data", "3.14159265359"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) # Check that device was created with given data ring = RingBuilder.load(self.tmpfile) @@ -366,7 +338,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--replication-port", "6000", "--device", "sda3", "--meta", "some meta data", "--weight", "3.14159265359"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) # Check that device was created with given data ring = RingBuilder.load(self.tmpfile) @@ -396,7 +368,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", "--weight", "3.14159265359"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) # Check that device was created with given data ring = RingBuilder.load(self.tmpfile) @@ -426,7 +398,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", "--weight", "3.14159265359"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) # Check that device was created with given data ring = RingBuilder.load(self.tmpfile) @@ -447,23 +419,13 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): def test_add_device_number_of_arguments(self): # Test Number of arguments abnormal argv = ["", self.tmpfile, "add"] - err = None - try: - ringbuilder.main(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) def test_add_device_already_exists(self): # Test Add a device that already exists argv = ["", self.tmpfile, "add", "r0z0-127.0.0.1:6000/sda1_some meta data", "100"] - err = None - try: - ringbuilder.main(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) def test_add_device_old_missing_region(self): self.create_sample_ring() @@ -481,7 +443,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): for search_value in self.search_values: self.create_sample_ring() argv = ["", self.tmpfile, "remove", search_value] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) # Check that weight was set to 0 @@ -513,7 +475,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): # Test ipv4(old format) argv = ["", self.tmpfile, "remove", "d0r0z0-127.0.0.1:6000R127.0.0.1:6000/sda1_some meta data"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) # Check that weight was set to 0 @@ -552,13 +514,13 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", "--weight", "3.14159265359"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) # Test ipv6(old format) argv = ["", self.tmpfile, "remove", "d4r2z3-[2001:0000:1234:0000:0000:C1C0:ABCD:0876]:6000" "R[2::10]:7000/sda3_some meta data"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) # Check that second device in ring is not affected @@ -601,7 +563,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--replication-ip", "127.0.0.1", "--replication-port", "6000", "--device", "sda1", "--meta", "some meta data"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) # Check that weight was set to 0 @@ -639,7 +601,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--replication-port", "9000", "--device", "sda30", "--meta", "other meta data", "--weight", "3.14159265359"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) # Test ipv6(new format) argv = \ @@ -650,7 +612,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--replication-ip", "[3::10]", "--replication-port", "9000", "--device", "sda30", "--meta", "other meta data"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) # Check that second device in ring is not affected @@ -694,7 +656,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", "--weight", "3.14159265359"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) # Test domain name argv = \ @@ -705,7 +667,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--replication-ip", "r.test.com", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) # Check that second device in ring is not affected @@ -741,24 +703,14 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): self.create_sample_ring() # Test Number of arguments abnormal argv = ["", self.tmpfile, "remove"] - err = None - try: - ringbuilder.main(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) def test_remove_device_no_matching(self): self.create_sample_ring() # Test No matching devices argv = ["", self.tmpfile, "remove", "--ip", "unknown"] - err = None - try: - ringbuilder.main(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) def test_set_weight(self): for search_value in self.search_values: @@ -766,7 +718,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): argv = ["", self.tmpfile, "set_weight", search_value, "3.14159265359"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) # Check that weight was changed @@ -787,7 +739,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): argv = ["", self.tmpfile, "set_weight", "d0r0z0-127.0.0.1:6000R127.0.0.1:6000/sda1_some meta data", "3.14159265359"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) # Check that weight was changed @@ -814,13 +766,13 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", "--weight", "100"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) # Test ipv6(old format) argv = ["", self.tmpfile, "set_weight", "d4r2z3-[2001:0000:1234:0000:0000:C1C0:ABCD:0876]:6000" "R[2::10]:7000/sda3_some meta data", "3.14159265359"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) # Check that second device in ring is not affected @@ -850,7 +802,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--replication-ip", "127.0.0.1", "--replication-port", "6000", "--device", "sda1", "--meta", "some meta data", "3.14159265359"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) # Check that weight was changed @@ -877,7 +829,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", "--weight", "100"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) # Test ipv6(new format) argv = \ @@ -888,7 +840,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--replication-ip", "[2::10]", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", "3.14159265359"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) # Check that second device in ring is not affected @@ -919,7 +871,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", "--weight", "100"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) # Test domain name argv = \ @@ -930,7 +882,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--replication-ip", "r.test.com", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", "3.14159265359"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) # Check that second device in ring is not affected @@ -953,24 +905,14 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): self.create_sample_ring() # Test Number of arguments abnormal argv = ["", self.tmpfile, "set_weight"] - err = None - try: - ringbuilder.main(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) def test_set_weight_no_matching(self): self.create_sample_ring() # Test No matching devices argv = ["", self.tmpfile, "set_weight", "--ip", "unknown"] - err = None - try: - ringbuilder.main(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) def test_set_info(self): for search_value in self.search_values: @@ -978,7 +920,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): self.create_sample_ring() argv = ["", self.tmpfile, "set_info", search_value, "127.0.1.1:8000/sda1_other meta data"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) # Check that device was created with given data ring = RingBuilder.load(self.tmpfile) @@ -1005,7 +947,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): argv = ["", self.tmpfile, "set_info", "d0r0z0-127.0.0.1:6000R127.0.0.1:6000/sda1_some meta data", "127.0.1.1:8000R127.0.1.1:8000/sda10_other meta data"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) # Check that device was created with given data ring = RingBuilder.load(self.tmpfile) @@ -1040,7 +982,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", "--weight", "3.14159265359"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) # Test ipv6(old format) argv = ["", self.tmpfile, "set_info", @@ -1048,7 +990,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "R[2::10]:7000/sda3_some meta data", "[3001:0000:1234:0000:0000:C1C0:ABCD:0876]:8000" "R[3::10]:8000/sda30_other meta data"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) # Check that second device in ring is not affected @@ -1096,7 +1038,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--change-replication-ip", "127.0.2.1", "--change-replication-port", "9000", "--change-device", "sda100", "--change-meta", "other meta data"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) # Check that device was created with given data ring = RingBuilder.load(self.tmpfile) @@ -1131,7 +1073,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", "--weight", "3.14159265359"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) # Test ipv6(new format) argv = \ @@ -1147,7 +1089,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--change-replication-ip", "[4::10]", "--change-replication-port", "9000", "--change-device", "sda300", "--change-meta", "other meta data"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) # Check that second device in ring is not affected @@ -1192,7 +1134,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", "--weight", "3.14159265359"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) # Test domain name argv = \ @@ -1208,7 +1150,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--change-replication-ip", "r.test2.com", "--change-replication-port", "9000", "--change-device", "sda300", "--change-meta", "other meta data"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) # Check that second device in ring is not affected @@ -1244,24 +1186,14 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): self.create_sample_ring() # Test Number of arguments abnormal argv = ["", self.tmpfile, "set_info"] - err = None - try: - ringbuilder.main(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) def test_set_info_no_matching(self): self.create_sample_ring() # Test No matching devices argv = ["", self.tmpfile, "set_info", "--ip", "unknown"] - err = None - try: - ringbuilder.main(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) def test_set_info_already_exists(self): self.create_sample_ring() @@ -1279,17 +1211,12 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--change-replication-ip", "127.0.0.2", "--change-replication-port", "6001", "--change-device", "sda2", "--change-meta", ""] - err = None - try: - ringbuilder.main(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) def test_set_min_part_hours(self): self.create_sample_ring() argv = ["", self.tmpfile, "set_min_part_hours", "24"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) self.assertEqual(ring.min_part_hours, 24) @@ -1297,38 +1224,33 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): self.create_sample_ring() # Test Number of arguments abnormal argv = ["", self.tmpfile, "set_min_part_hours"] - err = None - try: - ringbuilder.main(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) def test_set_replicas(self): self.create_sample_ring() argv = ["", self.tmpfile, "set_replicas", "3.14159265359"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) self.assertEqual(ring.replicas, 3.14159265359) def test_set_overload(self): self.create_sample_ring() argv = ["", self.tmpfile, "set_overload", "0.19878"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) self.assertEqual(ring.overload, 0.19878) def test_set_overload_negative(self): self.create_sample_ring() argv = ["", self.tmpfile, "set_overload", "-0.19878"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) self.assertEqual(ring.overload, 0.0) def test_set_overload_non_numeric(self): self.create_sample_ring() argv = ["", self.tmpfile, "set_overload", "swedish fish"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) self.assertEqual(ring.overload, 0.0) @@ -1372,38 +1294,23 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): self.create_sample_ring() # Test missing arguments argv = ["", self.tmpfile, "set_overload"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) def test_set_replicas_number_of_arguments(self): self.create_sample_ring() # Test Number of arguments abnormal argv = ["", self.tmpfile, "set_replicas"] - err = None - try: - ringbuilder.main(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) def test_set_replicas_invalid_value(self): self.create_sample_ring() # Test not a valid number argv = ["", self.tmpfile, "set_replicas", "test"] - err = None - try: - ringbuilder.main(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) # Test new replicas is 0 argv = ["", self.tmpfile, "set_replicas", "0"] - err = None - try: - ringbuilder.main(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) def test_validate(self): self.create_sample_ring() @@ -1411,17 +1318,12 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ring.rebalance() ring.save(self.tmpfile) argv = ["", self.tmpfile, "validate"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) def test_validate_empty_file(self): open(self.tmpfile, 'a').close argv = ["", self.tmpfile, "validate"] - err = None - try: - ringbuilder.main(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) def test_validate_corrupted_file(self): self.create_sample_ring() @@ -1434,53 +1336,33 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): # corrupt the file with open(self.tmpfile, 'wb') as f: f.write(os.urandom(1024)) - err = None - try: - ringbuilder.main(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) def test_validate_non_existent_file(self): rand_file = '%s/%s' % ('/tmp', str(uuid.uuid4())) argv = ["", rand_file, "validate"] - err = None - try: - ringbuilder.main(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) def test_validate_non_accessible_file(self): with mock.patch.object( RingBuilder, 'load', mock.Mock(side_effect=exceptions.PermissionError)): argv = ["", self.tmpfile, "validate"] - err = None - try: - ringbuilder.main(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) def test_validate_generic_error(self): with mock.patch.object( RingBuilder, 'load', mock.Mock( side_effect=IOError('Generic error occurred'))): argv = ["", self.tmpfile, "validate"] - err = None - try: - ringbuilder.main(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) def test_search_device_ipv4_old_format(self): self.create_sample_ring() # Test ipv4(old format) argv = ["", self.tmpfile, "search", "d0r0z0-127.0.0.1:6000R127.0.0.1:6000/sda1_some meta data"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) def test_search_device_ipv6_old_format(self): self.create_sample_ring() @@ -1494,13 +1376,18 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", "--weight", "3.14159265359"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) + + # write ring file + ring = RingBuilder.load(self.tmpfile) + ring.rebalance() + ring.save(self.tmpfile) # Test ipv6(old format) argv = ["", self.tmpfile, "search", - "d2r2z3-[2001:0000:1234:0000:0000:C1C0:ABCD:0876]:6000" + "d4r2z3-[2001:0000:1234:0000:0000:C1C0:ABCD:0876]:6000" "R[2::10]:7000/sda3_some meta data"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) def test_search_device_ipv4_new_format(self): self.create_sample_ring() @@ -1513,7 +1400,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--replication-ip", "127.0.0.1", "--replication-port", "6000", "--device", "sda1", "--meta", "some meta data"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) def test_search_device_ipv6_new_format(self): self.create_sample_ring() @@ -1527,18 +1414,23 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", "--weight", "3.14159265359"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) + + # write ring file + ring = RingBuilder.load(self.tmpfile) + ring.rebalance() + ring.save(self.tmpfile) # Test ipv6(new format) argv = \ ["", self.tmpfile, "search", - "--id", "2", "--region", "2", "--zone", "3", + "--id", "4", "--region", "2", "--zone", "3", "--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]", "--port", "6000", "--replication-ip", "[2::10]", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) def test_search_device_domain_new_format(self): self.create_sample_ring() @@ -1552,48 +1444,45 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", "--weight", "3.14159265359"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) + # write ring file + ring = RingBuilder.load(self.tmpfile) + ring.rebalance() + ring.save(self.tmpfile) # Test domain name argv = \ ["", self.tmpfile, "search", - "--id", "2", "--region", "2", "--zone", "3", + "--id", "4", "--region", "2", "--zone", "3", "--ip", "test.test.com", "--port", "6000", "--replication-ip", "r.test.com", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) def test_search_device_number_of_arguments(self): self.create_sample_ring() # Test Number of arguments abnormal argv = ["", self.tmpfile, "search"] - err = None - try: - ringbuilder.main(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) def test_search_device_no_matching(self): self.create_sample_ring() # Test No matching devices argv = ["", self.tmpfile, "search", "--ip", "unknown"] - err = None - try: - ringbuilder.main(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) def test_list_parts_ipv4_old_format(self): self.create_sample_ring() + ring = RingBuilder.load(self.tmpfile) + ring.rebalance() + ring.save(self.tmpfile) # Test ipv4(old format) argv = ["", self.tmpfile, "list_parts", "d0r0z0-127.0.0.1:6000R127.0.0.1:6000/sda1_some meta data"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) def test_list_parts_ipv6_old_format(self): self.create_sample_ring() @@ -1607,16 +1496,24 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", "--weight", "3.14159265359"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) + + # write ring file + ring = RingBuilder.load(self.tmpfile) + ring.rebalance() + ring.save(self.tmpfile) # Test ipv6(old format) argv = ["", self.tmpfile, "list_parts", - "d2r2z3-[2001:0000:1234:0000:0000:C1C0:ABCD:0876]:6000" + "d4r2z3-[2001:0000:1234:0000:0000:C1C0:ABCD:0876]:6000" "R[2::10]:7000/sda3_some meta data"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) def test_list_parts_ipv4_new_format(self): self.create_sample_ring() + ring = RingBuilder.load(self.tmpfile) + ring.rebalance() + ring.save(self.tmpfile) # Test ipv4(new format) argv = \ ["", self.tmpfile, "list_parts", @@ -1626,7 +1523,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--replication-ip", "127.0.0.1", "--replication-port", "6000", "--device", "sda1", "--meta", "some meta data"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) def test_list_parts_ipv6_new_format(self): self.create_sample_ring() @@ -1640,18 +1537,23 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", "--weight", "3.14159265359"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) + + # write ring file + ring = RingBuilder.load(self.tmpfile) + ring.rebalance() + ring.save(self.tmpfile) # Test ipv6(new format) argv = \ ["", self.tmpfile, "list_parts", - "--id", "2", "--region", "2", "--zone", "3", + "--id", "4", "--region", "2", "--zone", "3", "--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]", "--port", "6000", "--replication-ip", "[2::10]", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) def test_list_parts_domain_new_format(self): self.create_sample_ring() @@ -1665,56 +1567,46 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", "--weight", "3.14159265359"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) + + # write ring file + ring = RingBuilder.load(self.tmpfile) + ring.rebalance() + ring.save(self.tmpfile) # Test domain name argv = \ ["", self.tmpfile, "list_parts", - "--id", "2", "--region", "2", "--zone", "3", + "--id", "4", "--region", "2", "--zone", "3", "--ip", "test.test.com", "--port", "6000", "--replication-ip", "r.test.com", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) def test_list_parts_number_of_arguments(self): self.create_sample_ring() # Test Number of arguments abnormal argv = ["", self.tmpfile, "list_parts"] - err = None - try: - ringbuilder.main(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) def test_list_parts_no_matching(self): self.create_sample_ring() # Test No matching devices argv = ["", self.tmpfile, "list_parts", "--ip", "unknown"] - err = None - try: - ringbuilder.main(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) def test_unknown(self): self.create_sample_ring() argv = ["", self.tmpfile, "unknown"] - err = None - try: - ringbuilder.main(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) def test_default(self): self.create_sample_ring() argv = ["", self.tmpfile] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) def test_default_show_removed(self): mock_stdout = six.StringIO() @@ -1724,17 +1616,17 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): # Note: it also sets device's weight to zero. argv = ["", self.tmpfile, "remove", "--id", "1"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) # Setting another device's weight to zero to be sure we distinguish # real removed device and device with zero weight. argv = ["", self.tmpfile, "set_weight", "0", "--id", "3"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) argv = ["", self.tmpfile] with mock.patch("sys.stdout", mock_stdout): with mock.patch("sys.stderr", mock_stderr): - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) expected = "%s, build version 6\n" \ "64 partitions, 3.000000 replicas, 4 regions, 4 zones, " \ @@ -1770,43 +1662,43 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): argv = ["", self.tmpfile] with mock.patch("sys.stdout", mock_stdout): with mock.patch("sys.stderr", mock_stderr): - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring_not_found_re = re.compile("Ring file .*\.ring\.gz not found") self.assertTrue(ring_not_found_re.findall(mock_stdout.getvalue())) # write ring file argv = ["", self.tmpfile, "rebalance"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) # ring file is up-to-date mock_stdout = six.StringIO() argv = ["", self.tmpfile] with mock.patch("sys.stdout", mock_stdout): with mock.patch("sys.stderr", mock_stderr): - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring_up_to_date_re = re.compile("Ring file .*\.ring\.gz is up-to-date") self.assertTrue(ring_up_to_date_re.findall(mock_stdout.getvalue())) # change builder (set weight) argv = ["", self.tmpfile, "set_weight", "0", "--id", "3"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) # ring file is obsolete after set_weight mock_stdout = six.StringIO() argv = ["", self.tmpfile] with mock.patch("sys.stdout", mock_stdout): with mock.patch("sys.stderr", mock_stderr): - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring_obsolete_re = re.compile("Ring file .*\.ring\.gz is obsolete") self.assertTrue(ring_obsolete_re.findall(mock_stdout.getvalue())) # write ring file argv = ["", self.tmpfile, "write_ring"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) # ring file up-to-date again mock_stdout = six.StringIO() argv = ["", self.tmpfile] with mock.patch("sys.stdout", mock_stdout): with mock.patch("sys.stderr", mock_stderr): - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) self.assertTrue(ring_up_to_date_re.findall(mock_stdout.getvalue())) # Break ring file e.g. just make it empty @@ -1816,14 +1708,14 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): argv = ["", self.tmpfile] with mock.patch("sys.stdout", mock_stdout): with mock.patch("sys.stderr", mock_stderr): - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring_invalid_re = re.compile("Ring file .*\.ring\.gz is invalid") self.assertTrue(ring_invalid_re.findall(mock_stdout.getvalue())) def test_rebalance(self): self.create_sample_ring() argv = ["", self.tmpfile, "rebalance", "3"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) self.assertTrue(ring.validate()) @@ -1834,24 +1726,14 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ring.save(self.tmpfile) # Test No change to the device argv = ["", self.tmpfile, "rebalance", "3"] - err = None - try: - ringbuilder.main(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 1) + self.assertSystemExit(EXIT_WARNING, ringbuilder.main, argv) def test_rebalance_no_devices(self): # Test no devices argv = ["", self.tmpfile, "create", "6", "3.14159265359", "1"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) argv = ["", self.tmpfile, "rebalance"] - err = None - try: - ringbuilder.main(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 2) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) def test_rebalance_remove_zero_weighted_device(self): self.create_sample_ring() @@ -1864,7 +1746,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): # Test rebalance after remove 0 weighted device argv = ["", self.tmpfile, "rebalance", "3"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) self.assertTrue(ring.validate()) self.assertEqual(ring.devs[3], None) @@ -1881,7 +1763,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): time += 3600 with mock.patch(time_path, return_value=time): self.assertEqual(ring.min_part_seconds_left, 0) - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) self.assertEqual(ring.min_part_seconds_left, 3600) @@ -1893,7 +1775,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): time += 3600 with mock.patch(time_path, return_value=time): self.assertEqual(ring.min_part_seconds_left, 0) - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_WARNING, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) self.assertTrue(ring.min_part_seconds_left, 3600) @@ -1915,7 +1797,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): # min part hours not passed with mock.patch(time_path, return_value=(3600 * 0.6)): - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_WARNING, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) self.assertEqual(ring.min_part_seconds_left, 3600 * 0.4) @@ -1923,7 +1805,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): # min part hours passed, no partitions need to be moved with mock.patch(time_path, return_value=(3600 * 1.5)): - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_WARNING, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) self.assertEqual(ring.min_part_seconds_left, 0) @@ -1931,21 +1813,21 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): self.create_sample_ring() # Test rebalance using explicit seed parameter argv = ["", self.tmpfile, "rebalance", "--seed", "2"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) def test_write_ring(self): self.create_sample_ring() argv = ["", self.tmpfile, "rebalance"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) argv = ["", self.tmpfile, "write_ring"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) def test_write_builder(self): # Test builder file already exists self.create_sample_ring() argv = ["", self.tmpfile, "rebalance"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) argv = ["", self.tmpfile, "write_builder"] exp_results = {'valid_exit_codes': [2]} self.run_srb(*argv, exp_results=exp_results) @@ -1955,17 +1837,17 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): # and lost the builder file self.create_sample_ring() - argv = ["", self.tmpfile, "add", "r1z1-127.0.0.1:6000/sdb" "1.0"] - self.assertRaises(SystemExit, ringbuilder.main, argv) - argv = ["", self.tmpfile, "add", "r1z1-127.0.0.1:6000/sdc" "1.0"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + argv = ["", self.tmpfile, "add", "r1z1-127.0.0.1:6000/sdb", "1.0"] + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) + argv = ["", self.tmpfile, "add", "r1z1-127.0.0.1:6000/sdc", "1.0"] + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) argv = ["", self.tmpfile, "rebalance"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_WARNING, ringbuilder.main, argv) argv = ["", self.tmpfile, "remove", "--id", "0"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) argv = ["", self.tmpfile, "rebalance"] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_WARNING, ringbuilder.main, argv) backup_file = os.path.join(os.path.dirname(self.tmpfile), os.path.basename(self.tmpfile) + ".ring.gz") @@ -1988,12 +1870,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ring.devs[0]['weight'] = 10 ring.save(self.tmpfile) argv = ["", self.tmpfile, "rebalance"] - err = None - try: - ringbuilder.main(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 1) + self.assertSystemExit(EXIT_WARNING, ringbuilder.main, argv) def test_no_warn_when_balanced(self): # when the number of total part replicas (3 * 2 ** 10 = 3072 in @@ -2009,28 +1886,18 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ring.devs[0]['weight'] = 10 ring.save(self.tmpfile) argv = ["", self.tmpfile, "rebalance"] - err = None - try: - ringbuilder.main(argv) - except SystemExit as e: - err = e - self.assertEqual(err.code, 0) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) def test_invalid_device_name(self): self.create_sample_ring() for device_name in ["", " ", " sda1", "sda1 ", " meta "]: - err = 0 argv = ["", self.tmpfile, "add", "r1z1-127.0.0.1:6000/%s" % device_name, "1"] - try: - ringbuilder.main(argv) - except SystemExit as exc: - err = exc - self.assertEqual(err.code, 2) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) argv = ["", self.tmpfile, @@ -2041,11 +1908,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--port", "6000", "--device", device_name, "--weight", "100"] - try: - ringbuilder.main(argv) - except SystemExit as exc: - err = exc - self.assertEqual(err.code, 2) + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) def test_dispersion_command(self): self.create_sample_ring() @@ -2060,12 +1923,9 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): argv = ["", "object.ring.gz"] - try: - with mock.patch("sys.stdout", mock_stdout): - with mock.patch("sys.stderr", mock_stderr): - ringbuilder.main(argv) - except SystemExit: - pass + with mock.patch("sys.stdout", mock_stdout): + with mock.patch("sys.stderr", mock_stderr): + self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) expected = "Note: using object.builder instead of object.ring.gz " \ "as builder file\n" \ "Ring Builder file does not exist: object.builder\n" @@ -2074,18 +1934,18 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): def test_main_no_arguments(self): # Test calling main with no arguments argv = [] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) def test_main_single_argument(self): # Test calling main with single argument argv = [""] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) def test_main_with_safe(self): # Test calling main with '-safe' argument self.create_sample_ring() argv = ["-safe", self.tmpfile] - self.assertRaises(SystemExit, ringbuilder.main, argv) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) class TestRebalanceCommand(unittest.TestCase, RunSwiftRingBuilderMixin): From 22685d6231003bd236e7fc363b58e86f35ab80bb Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Tue, 1 Mar 2016 13:54:09 +0000 Subject: [PATCH 012/141] Faster suffix invalidations on object PUT/DELETE Before this commit, we performed a full read-modify-write cycle on a partition's hashes.pkl to invalidate the relevant suffix whenever an object PUT or DELETE completes. Now we append invalid suffixes to a new file, "hashes.invalid", alongside hashes.pkl. When we actually get a REPLICATE request and need to compute the hashes, *then* we perform a full read-modify-write on hashes.pkl and clear out hashes.invalid. Change-Id: Ia7add438e25688b4b286f0110a4e43490e11ad75 --- swift/obj/diskfile.py | 96 +++++++++++++++++++++++++++++---- test/unit/obj/test_diskfile.py | 97 ++++++++++++++++++++-------------- 2 files changed, 141 insertions(+), 52 deletions(-) diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py index 6328a79387..7faf7ff86a 100644 --- a/swift/obj/diskfile.py +++ b/swift/obj/diskfile.py @@ -72,6 +72,7 @@ from functools import partial PICKLE_PROTOCOL = 2 ONE_WEEK = 604800 HASH_FILE = 'hashes.pkl' +HASH_INVALIDATIONS_FILE = 'hashes.invalid' METADATA_KEY = 'user.swift.metadata' DROP_CACHE_WINDOW = 1024 * 1024 # These are system-set metadata keys that cannot be changed with a POST. @@ -221,6 +222,73 @@ def quarantine_renamer(device_path, corrupted_file_path): return to_dir +def consolidate_hashes(partition_dir): + """ + Take what's in hashes.pkl and hashes.invalid, combine them, write the + result back to hashes.pkl, and clear out hashes.invalid. + + :param suffix_dir: absolute path to partition dir containing hashes.pkl + and hashes.invalid + + :returns: the hashes, or None if there's no hashes.pkl. + """ + hashes_file = join(partition_dir, HASH_FILE) + invalidations_file = join(partition_dir, HASH_INVALIDATIONS_FILE) + + if not os.path.exists(hashes_file): + if os.path.exists(invalidations_file): + # no hashes at all -> everything's invalid, so empty the file with + # the invalid suffixes in it, if it exists + try: + with open(invalidations_file, 'wb'): + pass + except OSError as e: + if e.errno != errno.ENOENT: + raise + return None + + with lock_path(partition_dir): + try: + with open(hashes_file, 'rb') as hashes_fp: + pickled_hashes = hashes_fp.read() + except (IOError, OSError): + hashes = {} + else: + try: + hashes = pickle.loads(pickled_hashes) + except Exception: + # pickle.loads() can raise a wide variety of exceptions when + # given invalid input depending on the way in which the + # input is invalid. + hashes = None + + modified = False + try: + with open(invalidations_file, 'rb') as inv_fh: + for line in inv_fh: + suffix = line.strip() + if hashes is not None and hashes.get(suffix) is not None: + hashes[suffix] = None + modified = True + except (IOError, OSError) as e: + if e.errno != errno.ENOENT: + raise + + if modified: + write_pickle(hashes, hashes_file, partition_dir, PICKLE_PROTOCOL) + + # Now that all the invalidations are reflected in hashes.pkl, it's + # safe to clear out the invalidations file. + try: + with open(invalidations_file, 'w') as inv_fh: + pass + except OSError as e: + if e.errno != errno.ENOENT: + raise + + return hashes + + def invalidate_hash(suffix_dir): """ Invalidates the hash for a suffix_dir in the partition's hashes file. @@ -234,16 +302,11 @@ def invalidate_hash(suffix_dir): hashes_file = join(partition_dir, HASH_FILE) if not os.path.exists(hashes_file): return + + invalidations_file = join(partition_dir, HASH_INVALIDATIONS_FILE) with lock_path(partition_dir): - try: - with open(hashes_file, 'rb') as fp: - hashes = pickle.load(fp) - if suffix in hashes and not hashes[suffix]: - return - except Exception: - return - hashes[suffix] = None - write_pickle(hashes, hashes_file, partition_dir, PICKLE_PROTOCOL) + with open(invalidations_file, 'ab') as inv_fh: + inv_fh.write(suffix + "\n") class AuditLocation(object): @@ -395,6 +458,7 @@ class BaseDiskFileManager(object): diskfile_cls = None # must be set by subclasses invalidate_hash = strip_self(invalidate_hash) + consolidate_hashes = strip_self(consolidate_hashes) quarantine_renamer = strip_self(quarantine_renamer) def __init__(self, conf, logger): @@ -792,12 +856,22 @@ class BaseDiskFileManager(object): recalculate = [] try: - with open(hashes_file, 'rb') as fp: - hashes = pickle.load(fp) mtime = getmtime(hashes_file) + except OSError as e: + if e.errno != errno.ENOENT: + raise + + try: + hashes = self.consolidate_hashes(partition_path) except Exception: do_listdir = True force_rewrite = True + else: + if hashes is None: # no hashes.pkl file; let's build it + do_listdir = True + force_rewrite = True + hashes = {} + if do_listdir: for suff in os.listdir(partition_path): if len(suff) == 3: diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py index de0cf4b1f9..5c5dec1f74 100644 --- a/test/unit/obj/test_diskfile.py +++ b/test/unit/obj/test_diskfile.py @@ -1590,7 +1590,7 @@ class TestECDiskFileManager(DiskFileManagerMixin, unittest.TestCase): fname = '%s#%s.data' % (ts.internal, frag) with self.assertRaises(DiskFileError) as cm: mgr.parse_on_disk_filename(fname) - self.assertTrue(msg in str(cm.exception).lower()) + self.assertIn(msg, str(cm.exception).lower()) with self.assertRaises(DiskFileError) as cm: mgr.parse_on_disk_filename('junk') @@ -2194,7 +2194,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): # non-fast-post updateable keys are preserved self.assertEqual('text/garbage', df._metadata['Content-Type']) # original fast-post updateable keys are removed - self.assertTrue('X-Object-Meta-Key1' not in df._metadata) + self.assertNotIn('X-Object-Meta-Key1', df._metadata) # new fast-post updateable keys are added self.assertEqual('Value2', df._metadata['X-Object-Meta-Key2']) @@ -2272,9 +2272,9 @@ class DiskFileMixin(BaseDiskFileTestMixin): 'plain/text', '\r\n--someheader\r\n', 30) value = ''.join(it) - self.assertTrue('0123456789' in value) - self.assertTrue('1123456789' in value) - self.assertTrue('2123456789' in value) + self.assertIn('0123456789', value) + self.assertIn('1123456789', value) + self.assertIn('2123456789', value) self.assertEqual(quarantine_msgs, []) def test_disk_file_app_iter_ranges_w_quarantine(self): @@ -2286,9 +2286,9 @@ class DiskFileMixin(BaseDiskFileTestMixin): 'plain/text', '\r\n--someheader\r\n', 30) value = ''.join(it) - self.assertTrue('0123456789' in value) - self.assertTrue('1123456789' in value) - self.assertTrue('2123456789' in value) + self.assertIn('0123456789', value) + self.assertIn('1123456789', value) + self.assertIn('2123456789', value) self.assertEqual(quarantine_msgs, ["Bytes read: 30, does not match metadata: 31"]) @@ -2300,7 +2300,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): 'plain/text', '\r\n--someheader\r\n', 30) value = ''.join(it) - self.assertTrue('0123456789' in value) + self.assertIn('0123456789', value) self.assertEqual(quarantine_msgs, []) def test_disk_file_app_iter_ranges_edges(self): @@ -2310,8 +2310,8 @@ class DiskFileMixin(BaseDiskFileTestMixin): it = reader.app_iter_ranges([(3, 10), (0, 2)], 'application/whatever', '\r\n--someheader\r\n', 30) value = ''.join(it) - self.assertTrue('3456789' in value) - self.assertTrue('01' in value) + self.assertIn('3456789', value) + self.assertIn('01', value) self.assertEqual(quarantine_msgs, []) def test_disk_file_large_app_iter_ranges(self): @@ -2777,7 +2777,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): dl = os.listdir(df._datadir) self.assertEqual(len(dl), file_count + 1) exp_name = '%s.meta' % timestamp - self.assertTrue(exp_name in set(dl)) + self.assertIn(exp_name, set(dl)) def test_write_metadata_no_xattr(self): timestamp = Timestamp(time()).internal @@ -2994,8 +2994,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): exp_name = '%s.ts' % ts.internal dl = os.listdir(df._datadir) self.assertEqual(len(dl), 1) - self.assertTrue(exp_name in set(dl), - 'Expected file %s missing in %s' % (exp_name, dl)) + self.assertIn(exp_name, set(dl)) # cleanup before next policy os.unlink(os.path.join(df._datadir, exp_name)) @@ -3006,7 +3005,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): exp_name = '%s.ts' % str(Timestamp(ts).internal) dl = os.listdir(df._datadir) self.assertEqual(len(dl), 1) - self.assertTrue(exp_name in set(dl)) + self.assertIn(exp_name, set(dl)) df = self._simple_get_diskfile() self.assertRaises(DiskFileDeleted, df.open) @@ -3017,7 +3016,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): exp_name = '%s.ts' % str(Timestamp(ts).internal) dl = os.listdir(df._datadir) self.assertEqual(len(dl), 1) - self.assertTrue(exp_name in set(dl)) + self.assertIn(exp_name, set(dl)) # it's pickle-format, so removing the last byte is sufficient to # corrupt it ts_fullpath = os.path.join(df._datadir, exp_name) @@ -3069,8 +3068,8 @@ class DiskFileMixin(BaseDiskFileTestMixin): self.assertEqual(reader._fp, None) error_lines = df._logger.get_lines_for_level('error') self.assertEqual(len(error_lines), 1) - self.assertTrue('close failure' in error_lines[0]) - self.assertTrue('Bad' in error_lines[0]) + self.assertIn('close failure', error_lines[0]) + self.assertIn('Bad', error_lines[0]) def test_mount_checking(self): @@ -3128,10 +3127,10 @@ class DiskFileMixin(BaseDiskFileTestMixin): self._create_ondisk_file(df, '', ext='.ts', timestamp=5) df = self._simple_get_diskfile() with df.open(): - self.assertTrue('X-Timestamp' in df._metadata) + self.assertIn('X-Timestamp', df._metadata) self.assertEqual(df._metadata['X-Timestamp'], Timestamp(10).internal) - self.assertTrue('deleted' not in df._metadata) + self.assertNotIn('deleted', df._metadata) def test_ondisk_search_loop_data_meta_ts(self): df = self._simple_get_diskfile() @@ -3146,10 +3145,10 @@ class DiskFileMixin(BaseDiskFileTestMixin): self._create_ondisk_file(df, '', ext='.meta', timestamp=5) df = self._simple_get_diskfile() with df.open(): - self.assertTrue('X-Timestamp' in df._metadata) + self.assertIn('X-Timestamp', df._metadata) self.assertEqual(df._metadata['X-Timestamp'], Timestamp(10).internal) - self.assertTrue('deleted' not in df._metadata) + self.assertNotIn('deleted', df._metadata) def test_ondisk_search_loop_wayward_files_ignored(self): df = self._simple_get_diskfile() @@ -3165,10 +3164,10 @@ class DiskFileMixin(BaseDiskFileTestMixin): self._create_ondisk_file(df, '', ext='.meta', timestamp=5) df = self._simple_get_diskfile() with df.open(): - self.assertTrue('X-Timestamp' in df._metadata) + self.assertIn('X-Timestamp', df._metadata) self.assertEqual(df._metadata['X-Timestamp'], Timestamp(10).internal) - self.assertTrue('deleted' not in df._metadata) + self.assertNotIn('deleted', df._metadata) def test_ondisk_search_loop_listdir_error(self): df = self._simple_get_diskfile() @@ -3202,7 +3201,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): pass reader.close() log_lines = df._logger.get_lines_for_level('error') - self.assertTrue('a very special error' in log_lines[-1]) + self.assertIn('a very special error', log_lines[-1]) def test_diskfile_names(self): df = self._simple_get_diskfile() @@ -3226,7 +3225,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): exp_name = '%s.ts' % str(Timestamp(ts).internal) dl = os.listdir(df._datadir) self.assertEqual(len(dl), 1) - self.assertTrue(exp_name in set(dl)) + self.assertIn(exp_name, set(dl)) df = self._simple_get_diskfile() exc = None try: @@ -3258,7 +3257,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): exp_name = '%s.ts' % str(Timestamp(ts).internal) dl = os.listdir(df._datadir) self.assertEqual(len(dl), 1) - self.assertTrue(exp_name in set(dl)) + self.assertIn(exp_name, set(dl)) df = self._simple_get_diskfile() exc = None try: @@ -3338,7 +3337,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): exp_name = '%s.ts' % str(Timestamp(ts).internal) dl = os.listdir(df._datadir) self.assertEqual(len(dl), file_count + 1) - self.assertTrue(exp_name in set(dl)) + self.assertIn(exp_name, set(dl)) def _system_can_zero_copy(self): if not splice.available: @@ -3381,7 +3380,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): self.assertFalse(reader.can_zero_copy_send()) log_lines = df_mgr.logger.get_lines_for_level('warning') - self.assertTrue('MD5 sockets' in log_lines[-1]) + self.assertIn('MD5 sockets', log_lines[-1]) def test_tee_to_md5_pipe_length_mismatch(self): if not self._system_can_zero_copy(): @@ -3492,7 +3491,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): self.fail("Expected exception DiskFileNoSpace") self.assertTrue(_m_fallocate.called) self.assertTrue(_m_unlink.called) - self.assertTrue('error' not in self.logger.all_log_lines()) + self.assertNotIn('error', self.logger.all_log_lines()) def test_create_unlink_cleanup_renamer_fails(self): # Test cleanup when renamer fails @@ -3519,7 +3518,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): self.assertFalse(writer.put_succeeded) self.assertTrue(_m_renamer.called) self.assertTrue(_m_unlink.called) - self.assertTrue('error' not in self.logger.all_log_lines()) + self.assertNotIn('error', self.logger.all_log_lines()) def test_create_unlink_cleanup_logging(self): # Test logging of os.unlink() failures. @@ -4489,12 +4488,15 @@ class TestSuffixHashes(unittest.TestCase): part_path = os.path.join(self.devices, 'sda1', diskfile.get_data_dir(policy), '0') hashes_file = os.path.join(part_path, diskfile.HASH_FILE) + inv_file = os.path.join( + part_path, diskfile.HASH_INVALIDATIONS_FILE) self.assertFalse(os.path.exists(hashes_file)) # sanity with mock.patch('swift.obj.diskfile.lock_path') as mock_lock: df_mgr.invalidate_hash(suffix_dir) self.assertFalse(mock_lock.called) - # does not create file + # does not create files self.assertFalse(os.path.exists(hashes_file)) + self.assertFalse(os.path.exists(inv_file)) def test_invalidate_hash_file_exists(self): for policy in self.iter_policies(): @@ -4506,19 +4508,32 @@ class TestSuffixHashes(unittest.TestCase): suffix_dir = os.path.dirname(df._datadir) suffix = os.path.basename(suffix_dir) hashes = df_mgr.get_hashes('sda1', '0', [], policy) - self.assertTrue(suffix in hashes) # sanity + self.assertIn(suffix, hashes) # sanity # sanity check hashes file part_path = os.path.join(self.devices, 'sda1', diskfile.get_data_dir(policy), '0') hashes_file = os.path.join(part_path, diskfile.HASH_FILE) + invalidations_file = os.path.join( + part_path, diskfile.HASH_INVALIDATIONS_FILE) with open(hashes_file, 'rb') as f: self.assertEqual(hashes, pickle.load(f)) + # invalidate the hash with mock.patch('swift.obj.diskfile.lock_path') as mock_lock: df_mgr.invalidate_hash(suffix_dir) self.assertTrue(mock_lock.called) + with open(invalidations_file, 'rb') as f: + self.assertEqual(suffix + "\n", f.read()) + + # consolidate the hash and the invalidations + with mock.patch('swift.obj.diskfile.lock_path') as mock_lock: + hashes = df_mgr.consolidate_hashes(part_path) + self.assertIsNone(hashes.get(suffix)) + with open(hashes_file, 'rb') as f: - self.assertEqual({suffix: None}, pickle.load(f)) + self.assertEqual(hashes, pickle.load(f)) + with open(invalidations_file, 'rb') as f: + self.assertEqual("", f.read()) # invalidate_hash tests - error handling @@ -4545,7 +4560,7 @@ class TestSuffixHashes(unittest.TestCase): self.assertEqual(f.read(), 'asdf') # ... but get_hashes will hashes = df_mgr.get_hashes('sda1', '0', [], policy) - self.assertTrue(suffix in hashes) + self.assertIn(suffix, hashes) # get_hashes tests - hash_suffix behaviors @@ -4803,7 +4818,7 @@ class TestSuffixHashes(unittest.TestCase): self.assertTrue(os.path.exists(hsh_path)) # sanity # get_hashes will cleanup empty hsh_path and leave valid one hashes = df_mgr.get_hashes('sda1', '0', [], policy) - self.assertTrue(suffix in hashes) + self.assertIn(suffix, hashes) self.assertTrue(os.path.exists(df._datadir)) for hsh_path in empty_hsh_paths: self.assertFalse(os.path.exists(hsh_path)) @@ -5029,7 +5044,7 @@ class TestSuffixHashes(unittest.TestCase): # get_hashes will find the untracked suffix dir self.assertFalse(os.path.exists(hashes_file)) # sanity hashes = df_mgr.get_hashes(self.existing_device, '0', [], policy) - self.assertTrue(suffix in hashes) + self.assertIn(suffix, hashes) # ... and create a hashes pickle for it self.assertTrue(os.path.exists(hashes_file)) @@ -5059,7 +5074,7 @@ class TestSuffixHashes(unittest.TestCase): # ... unless remote end asks for a recalc hashes = df_mgr.get_hashes(self.existing_device, '0', [suffix], policy) - self.assertTrue(suffix in hashes) + self.assertIn(suffix, hashes) def test_get_hashes_does_not_rehash_known_suffix_dirs(self): for policy in self.iter_policies(): @@ -5071,7 +5086,7 @@ class TestSuffixHashes(unittest.TestCase): df.delete(timestamp) # create the baseline hashes file hashes = df_mgr.get_hashes(self.existing_device, '0', [], policy) - self.assertTrue(suffix in hashes) + self.assertIn(suffix, hashes) # now change the contents of the suffix w/o calling # invalidate_hash rmtree(df._datadir) @@ -5253,8 +5268,8 @@ class TestSuffixHashes(unittest.TestCase): diskfile.get_data_dir(policy), '0') open(os.path.join(part_dir, 'bad'), 'w').close() hashes = df_mgr.get_hashes(self.existing_device, '0', [], policy) - self.assertTrue(suffix in hashes) - self.assertFalse('bad' in hashes) + self.assertIn(suffix, hashes) + self.assertNotIn('bad', hashes) def test_get_hashes_hash_suffix_other_oserror(self): for policy in self.iter_policies(): From 6b4e73bb15c680ada8b30279b090c6123ce5731f Mon Sep 17 00:00:00 2001 From: Charles Hsu Date: Thu, 25 Feb 2016 13:31:51 +0800 Subject: [PATCH 013/141] Fix account-reaper unable to delete all containers. When reaper try to find the correct container_shard number, but it doesn't compare local device's name, so it always return first entry's index of local devices from the ring to container_shard. That causes some containers will be skip and objects under these containers won't be delete. Change-Id: I0e2bbdd99add86ee46e856920c0740c4aa13c77d Closes-Bug: #1549615 --- swift/account/reaper.py | 4 +- test/unit/account/test_reaper.py | 97 +++++++++++++++++++++++++++----- 2 files changed, 85 insertions(+), 16 deletions(-) diff --git a/swift/account/reaper.py b/swift/account/reaper.py index 696277ca2d..a88f612918 100644 --- a/swift/account/reaper.py +++ b/swift/account/reaper.py @@ -171,7 +171,9 @@ class AccountReaper(Daemon): container_shard = None for container_shard, node in enumerate(nodes): if is_local_device(self.myips, None, node['ip'], None) and \ - (not self.bind_port or self.bind_port == node['port']): + (not self.bind_port or + self.bind_port == node['port']) and \ + (device == node['device']): break else: continue diff --git a/test/unit/account/test_reaper.py b/test/unit/account/test_reaper.py index 84194cfcb0..63d634f818 100644 --- a/test/unit/account/test_reaper.py +++ b/test/unit/account/test_reaper.py @@ -100,15 +100,23 @@ class FakeRing(object): self.nodes = [{'id': '1', 'ip': '10.10.10.1', 'port': 6002, - 'device': None}, + 'device': 'sda1'}, {'id': '2', 'ip': '10.10.10.2', 'port': 6002, - 'device': None}, + 'device': 'sda1'}, {'id': '3', 'ip': '10.10.10.3', 'port': 6002, 'device': None}, + {'id': '4', + 'ip': '10.10.10.1', + 'port': 6002, + 'device': 'sda2'}, + {'id': '5', + 'ip': '10.10.10.1', + 'port': 6002, + 'device': 'sda3'}, ] def get_nodes(self, *args, **kwargs): @@ -124,6 +132,12 @@ acc_nodes = [{'device': 'sda1', {'device': 'sda1', 'ip': '', 'port': ''}, + {'device': 'sda1', + 'ip': '', + 'port': ''}, + {'device': 'sda1', + 'ip': '', + 'port': ''}, {'device': 'sda1', 'ip': '', 'port': ''}] @@ -134,6 +148,12 @@ cont_nodes = [{'device': 'sda1', {'device': 'sda1', 'ip': '', 'port': ''}, + {'device': 'sda1', + 'ip': '', + 'port': ''}, + {'device': 'sda1', + 'ip': '', + 'port': ''}, {'device': 'sda1', 'ip': '', 'port': ''}] @@ -184,11 +204,11 @@ class TestReaper(unittest.TestCase): if self.reap_obj_fail: raise Exception - def prepare_data_dir(self, ts=False): + def prepare_data_dir(self, ts=False, device='sda1'): devices_path = tempfile.mkdtemp() # will be deleted by teardown self.to_delete.append(devices_path) - path = os.path.join(devices_path, 'sda1', DATADIR) + path = os.path.join(devices_path, device, DATADIR) os.makedirs(path) path = os.path.join(path, '100', 'a86', 'a8c682d2472e1720f2d81ff8993aba6') @@ -436,7 +456,7 @@ class TestReaper(unittest.TestCase): self.get_fail = False self.reap_obj_fail = False self.amount_delete_fail = 0 - self.max_delete_fail = 2 + self.max_delete_fail = 4 with patch('swift.account.reaper.direct_get_container', self.fake_direct_get_container), \ patch('swift.account.reaper.direct_delete_container', @@ -446,7 +466,7 @@ class TestReaper(unittest.TestCase): patch('swift.account.reaper.AccountReaper.reap_object', self.fake_reap_object): r.reap_container('a', 'partition', acc_nodes, 'c') - self.assertEqual(r.logger.get_increment_counts()['return_codes.4'], 2) + self.assertEqual(r.logger.get_increment_counts()['return_codes.4'], 4) self.assertEqual(r.stats_containers_possibly_remaining, 1) def test_reap_container_full_fail(self): @@ -454,7 +474,7 @@ class TestReaper(unittest.TestCase): self.get_fail = False self.reap_obj_fail = False self.amount_delete_fail = 0 - self.max_delete_fail = 3 + self.max_delete_fail = 5 with patch('swift.account.reaper.direct_get_container', self.fake_direct_get_container), \ patch('swift.account.reaper.direct_delete_container', @@ -464,7 +484,7 @@ class TestReaper(unittest.TestCase): patch('swift.account.reaper.AccountReaper.reap_object', self.fake_reap_object): r.reap_container('a', 'partition', acc_nodes, 'c') - self.assertEqual(r.logger.get_increment_counts()['return_codes.4'], 3) + self.assertEqual(r.logger.get_increment_counts()['return_codes.4'], 5) self.assertEqual(r.stats_containers_remaining, 1) @patch('swift.account.reaper.Ring', @@ -518,7 +538,7 @@ class TestReaper(unittest.TestCase): container_shard=container_shard)) self.assertEqual(self.called_amount, 4) info_lines = r.logger.get_lines_for_level('info') - self.assertEqual(len(info_lines), 6) + self.assertEqual(len(info_lines), 10) for start_line, stat_line in zip(*[iter(info_lines)] * 2): self.assertEqual(start_line, 'Beginning pass on account a') self.assertTrue(stat_line.find('1 containers deleted')) @@ -604,6 +624,42 @@ class TestReaper(unittest.TestCase): # 10.10.10.2 is second node from ring self.assertEqual(container_shard_used[0], 1) + def test_reap_device_with_sharding_and_various_devices(self): + devices = self.prepare_data_dir(device='sda2') + conf = {'devices': devices} + r = self.init_reaper(conf) + container_shard_used = [-1] + + def fake_reap_account(*args, **kwargs): + container_shard_used[0] = kwargs.get('container_shard') + + with patch('swift.account.reaper.AccountBroker', + FakeAccountBroker), \ + patch('swift.account.reaper.AccountReaper.get_account_ring', + self.fake_account_ring), \ + patch('swift.account.reaper.AccountReaper.reap_account', + fake_reap_account): + r.reap_device('sda2') + + # 10.10.10.2 is second node from ring + self.assertEqual(container_shard_used[0], 3) + + devices = self.prepare_data_dir(device='sda3') + conf = {'devices': devices} + r = self.init_reaper(conf) + container_shard_used = [-1] + + with patch('swift.account.reaper.AccountBroker', + FakeAccountBroker), \ + patch('swift.account.reaper.AccountReaper.get_account_ring', + self.fake_account_ring), \ + patch('swift.account.reaper.AccountReaper.reap_account', + fake_reap_account): + r.reap_device('sda3') + + # 10.10.10.2 is second node from ring + self.assertEqual(container_shard_used[0], 4) + def test_reap_account_with_sharding(self): devices = self.prepare_data_dir() self.called_amount = 0 @@ -632,20 +688,31 @@ class TestReaper(unittest.TestCase): fake_list_containers_iter), \ patch('swift.account.reaper.AccountReaper.reap_container', fake_reap_container): - fake_broker = FakeAccountBroker(['c', 'd', 'e']) - r.reap_account(fake_broker, 10, fake_ring.nodes, 0) - self.assertEqual(container_reaped[0], 1) - fake_broker = FakeAccountBroker(['c', 'd', 'e']) + fake_broker = FakeAccountBroker(['c', 'd', 'e', 'f', 'g']) + r.reap_account(fake_broker, 10, fake_ring.nodes, 0) + self.assertEqual(container_reaped[0], 0) + + fake_broker = FakeAccountBroker(['c', 'd', 'e', 'f', 'g']) container_reaped[0] = 0 r.reap_account(fake_broker, 10, fake_ring.nodes, 1) - self.assertEqual(container_reaped[0], 2) + self.assertEqual(container_reaped[0], 1) container_reaped[0] = 0 - fake_broker = FakeAccountBroker(['c', 'd', 'e']) + fake_broker = FakeAccountBroker(['c', 'd', 'e', 'f', 'g']) r.reap_account(fake_broker, 10, fake_ring.nodes, 2) self.assertEqual(container_reaped[0], 0) + container_reaped[0] = 0 + fake_broker = FakeAccountBroker(['c', 'd', 'e', 'f', 'g']) + r.reap_account(fake_broker, 10, fake_ring.nodes, 3) + self.assertEqual(container_reaped[0], 3) + + container_reaped[0] = 0 + fake_broker = FakeAccountBroker(['c', 'd', 'e', 'f', 'g']) + r.reap_account(fake_broker, 10, fake_ring.nodes, 4) + self.assertEqual(container_reaped[0], 1) + def test_run_once(self): def prepare_data_dir(): devices_path = tempfile.mkdtemp() From 3ff94cb785867382ff6c37cb256d1b0f5381abaa Mon Sep 17 00:00:00 2001 From: Christian Schwede Date: Mon, 15 Feb 2016 19:16:08 +0000 Subject: [PATCH 014/141] Add internal method to increase ring partition power This method increases the partition power of an existing ring by one. It does not move any data nor does it exposes a CLI command yet; it is only intended to be used in a future version to do the actual ring modification itself. An existing object that is currently located on partition X will be placed either on partition 2*X or 2*X+1 after the partition power got increased. The reason for this is the Ring.get_part() method, that does a bitwise shift to the right. To avoid actual data movement to different disks or even nodes, the allocation of partitions to nodes needs to be changed. The allocation is pairwise due to the above mentioned new partition scheme. Therefore devices are allocated like this, with the partition being the index and the value being the device id: OLD: 0, 3, 7, 5, 2, 1, ... NEW: 0, 0, 3, 3, 7, 7, 5, 5, 2, 2, 1, 1, ... If an operator stops the cluster, increases the partition power and renames & hardlinks the existing data it is possible to do a power shift without actually moving data. Please see the partition power spec for further details on this. Change-Id: I063fd8077497ee8c14d9065f07b4ec0fb5cbe180 Partially-Implements: spec increasing_partition_power --- swift/common/ring/builder.py | 35 ++++++++++++++++ test/unit/common/ring/test_builder.py | 58 +++++++++++++++++++++++++++ 2 files changed, 93 insertions(+) diff --git a/swift/common/ring/builder.py b/swift/common/ring/builder.py index e47ad986f4..0459cd60de 100644 --- a/swift/common/ring/builder.py +++ b/swift/common/ring/builder.py @@ -1688,3 +1688,38 @@ class RingBuilder(object): if matched: matched_devs.append(dev) return matched_devs + + def increase_partition_power(self): + """ Increases ring partition power by one. + + Devices will be assigned to partitions like this: + + OLD: 0, 3, 7, 5, 2, 1, ... + NEW: 0, 0, 3, 3, 7, 7, 5, 5, 2, 2, 1, 1, ... + + """ + + new_replica2part2dev = [] + for replica in self._replica2part2dev: + new_replica = array('H') + for device in replica: + new_replica.append(device) + new_replica.append(device) # append device a second time + new_replica2part2dev.append(new_replica) + self._replica2part2dev = new_replica2part2dev + + for device in self._iter_devs(): + device['parts'] *= 2 + + # We need to update the time when a partition has been moved the last + # time. Since this is an array of all partitions, we need to double it + # two + new_last_part_moves = [] + for partition in self._last_part_moves: + new_last_part_moves.append(partition) + new_last_part_moves.append(partition) + self._last_part_moves = new_last_part_moves + + self.part_power += 1 + self.parts *= 2 + self.version += 1 diff --git a/test/unit/common/ring/test_builder.py b/test/unit/common/ring/test_builder.py index 0f7443b2ea..6213089f1a 100644 --- a/test/unit/common/ring/test_builder.py +++ b/test/unit/common/ring/test_builder.py @@ -2424,6 +2424,64 @@ class TestRingBuilder(unittest.TestCase): except exceptions.DuplicateDeviceError: self.fail("device hole not reused") + def test_increase_partition_power(self): + rb = ring.RingBuilder(8, 3.0, 1) + self.assertEqual(rb.part_power, 8) + + # add more devices than replicas to the ring + for i in range(10): + dev = "sdx%s" % i + rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'weight': 1, + 'ip': '127.0.0.1', 'port': 10000, 'device': dev}) + rb.rebalance(seed=1) + + # Let's save the ring, and get the nodes for an object + ring_file = os.path.join(self.testdir, 'test_partpower.ring.gz') + rd = rb.get_ring() + rd.save(ring_file) + r = ring.Ring(ring_file) + old_part, old_nodes = r.get_nodes("acc", "cont", "obj") + old_version = rb.version + + rb.increase_partition_power() + rb.validate() + changed_parts, _balance, removed_devs = rb.rebalance() + + self.assertEqual(changed_parts, 0) + self.assertEqual(removed_devs, 0) + + rd = rb.get_ring() + rd.save(ring_file) + r = ring.Ring(ring_file) + new_part, new_nodes = r.get_nodes("acc", "cont", "obj") + + # sanity checks + self.assertEqual(rb.part_power, 9) + self.assertEqual(rb.version, old_version + 2) + + # make sure there is always the same device assigned to every pair of + # partitions + for replica in rb._replica2part2dev: + for part in range(0, len(replica), 2): + dev = replica[part] + next_dev = replica[part + 1] + self.assertEqual(dev, next_dev) + + # same for last_part moves + for part in range(0, len(replica), 2): + this_last_moved = rb._last_part_moves[part] + next_last_moved = rb._last_part_moves[part + 1] + self.assertEqual(this_last_moved, next_last_moved) + + # Due to the increased partition power, the partition each object is + # assigned to has changed. If the old partition was X, it will now be + # either located in 2*X or 2*X+1 + self.assertTrue(new_part in [old_part * 2, old_part * 2 + 1]) + + # Importantly, we expect the objects to be placed on the same nodes + # after increasing the partition power + self.assertEqual(old_nodes, new_nodes) + class TestGetRequiredOverload(unittest.TestCase): From d456d9e934d8eb845d83b1e0c29247aac87e2e92 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Wed, 7 Oct 2015 10:17:39 +0100 Subject: [PATCH 015/141] Don't ssync data when only a durable is missing If an EC diskfile is missing its .durable file (for example due to a partial PUT failure) then the ssync missing check will fail to open the file and will consider it missing. This can result in possible reconstruction of the fragment archive (for a sync job) and definite transmission of the fragment archive (for sync and revert jobs), which is wasteful. This patch makes the ssync receiver inspect the diskfile state after attempting to open it, and if fragments exist at the timestamp of the sender's diskfile, but a .durable file is missing, then the receiver will commit the diskfile at the sender's timestamp. As a result, there is no longer any need to send a fragment archive. Change-Id: I4766864fcc0a3553976e8fd85bbb2fc782f04abd --- swift/obj/ssync_receiver.py | 44 +++++++--- test/unit/obj/common.py | 21 +++-- test/unit/obj/test_ssync.py | 127 +++++++++++++++++++++++++-- test/unit/obj/test_ssync_receiver.py | 111 +++++++++++++++++++++++ 4 files changed, 277 insertions(+), 26 deletions(-) diff --git a/swift/obj/ssync_receiver.py b/swift/obj/ssync_receiver.py index 79a23da4ac..4825f94740 100644 --- a/swift/obj/ssync_receiver.py +++ b/swift/obj/ssync_receiver.py @@ -66,7 +66,6 @@ def encode_wanted(remote, local): The decoder for this line is :py:func:`~swift.obj.ssync_sender.decode_wanted` """ - want = {} if 'ts_data' in local: # we have something, let's get just the right stuff @@ -248,7 +247,7 @@ class Receiver(object): raise swob.HTTPInsufficientStorage(drive=self.device) self.fp = self.request.environ['wsgi.input'] - def _check_local(self, object_hash): + def _check_local(self, remote, make_durable=True): """ Parse local diskfile and return results of current representative for comparison to remote. @@ -257,21 +256,42 @@ class Receiver(object): """ try: df = self.diskfile_mgr.get_diskfile_from_hash( - self.device, self.partition, object_hash, + self.device, self.partition, remote['object_hash'], self.policy, frag_index=self.frag_index) except exceptions.DiskFileNotExist: return {} try: df.open() except exceptions.DiskFileDeleted as err: - return {'ts_data': err.timestamp} - except exceptions.DiskFileError as err: - return {} - return { - 'ts_data': df.data_timestamp, - 'ts_meta': df.timestamp, - 'ts_ctype': df.content_type_timestamp, - } + result = {'ts_data': err.timestamp} + except exceptions.DiskFileError: + result = {} + else: + result = { + 'ts_data': df.data_timestamp, + 'ts_meta': df.timestamp, + 'ts_ctype': df.content_type_timestamp, + } + if (make_durable and df.fragments and + remote['ts_data'] in df.fragments and + self.frag_index in df.fragments[remote['ts_data']] and + (df.durable_timestamp is None or + df.durable_timestamp < remote['ts_data'])): + # We have the frag, just missing a .durable, so try to create the + # .durable now. Try this just once to avoid looping if it fails. + try: + with df.create() as writer: + writer.commit(remote['ts_data']) + return self._check_local(remote, make_durable=False) + except Exception: + # if commit fails then log exception and fall back to wanting + # a full update + self.app.logger.exception( + '%s/%s/%s EXCEPTION in replication.Receiver while ' + 'attempting commit of %s' + % (self.request.remote_addr, self.device, self.partition, + df._datadir)) + return result def _check_missing(self, line): """ @@ -282,7 +302,7 @@ class Receiver(object): Anchor point for tests to mock legacy protocol changes. """ remote = decode_missing(line) - local = self._check_local(remote['object_hash']) + local = self._check_local(remote) return encode_wanted(remote, local) def missing_check(self): diff --git a/test/unit/obj/common.py b/test/unit/obj/common.py index 33acb631d5..8cb618f4f0 100644 --- a/test/unit/obj/common.py +++ b/test/unit/obj/common.py @@ -51,10 +51,11 @@ class BaseTest(unittest.TestCase): def tearDown(self): shutil.rmtree(self.tmpdir, ignore_errors=True) - def _make_open_diskfile(self, device='dev', partition='9', - account='a', container='c', obj='o', body='test', - extra_metadata=None, policy=None, - frag_index=None, timestamp=None, df_mgr=None): + def _make_diskfile(self, device='dev', partition='9', + account='a', container='c', obj='o', body='test', + extra_metadata=None, policy=None, + frag_index=None, timestamp=None, df_mgr=None, + commit=True): policy = policy or POLICIES.legacy object_parts = account, container, obj timestamp = Timestamp(time.time()) if timestamp is None else timestamp @@ -75,6 +76,16 @@ class BaseTest(unittest.TestCase): if extra_metadata: metadata.update(extra_metadata) writer.put(metadata) - writer.commit(timestamp) + if commit: + writer.commit(timestamp) + return df + + def _make_open_diskfile(self, device='dev', partition='9', + account='a', container='c', obj='o', body='test', + extra_metadata=None, policy=None, + frag_index=None, timestamp=None, df_mgr=None): + df = self._make_diskfile(device, partition, account, container, obj, + body, extra_metadata, policy, frag_index, + timestamp, df_mgr) df.open() return df diff --git a/test/unit/obj/test_ssync.py b/test/unit/obj/test_ssync.py index 1349e16440..e51a7c4455 100644 --- a/test/unit/obj/test_ssync.py +++ b/test/unit/obj/test_ssync.py @@ -115,7 +115,7 @@ class TestBaseSsync(BaseTest): return wrapped_connect, trace def _create_ondisk_files(self, df_mgr, obj_name, policy, timestamp, - frag_indexes=None): + frag_indexes=None, commit=True): frag_indexes = [None] if frag_indexes is None else frag_indexes metadata = {'Content-Type': 'plain/text'} diskfiles = [] @@ -123,16 +123,18 @@ class TestBaseSsync(BaseTest): object_data = '/a/c/%s___%s' % (obj_name, frag_index) if frag_index is not None: metadata['X-Object-Sysmeta-Ec-Frag-Index'] = str(frag_index) - df = self._make_open_diskfile( + df = self._make_diskfile( device=self.device, partition=self.partition, account='a', container='c', obj=obj_name, body=object_data, extra_metadata=metadata, timestamp=timestamp, policy=policy, - frag_index=frag_index, df_mgr=df_mgr) - # sanity checks - listing = os.listdir(df._datadir) - self.assertTrue(listing) - for filename in listing: - self.assertTrue(filename.startswith(timestamp.internal)) + frag_index=frag_index, df_mgr=df_mgr, commit=commit) + if commit: + df.open() + # sanity checks + listing = os.listdir(df._datadir) + self.assertTrue(listing) + for filename in listing: + self.assertTrue(filename.startswith(timestamp.internal)) diskfiles.append(df) return diskfiles @@ -325,10 +327,12 @@ class TestSsyncEC(TestBaseSsync): t2 = next(self.ts_iter) tx_objs['o2'] = self._create_ondisk_files( tx_df_mgr, 'o2', policy, t2, (tx_node_index,)) - # o3 only has handoff + # o3 only has handoff, rx has other frag index t3 = next(self.ts_iter) tx_objs['o3'] = self._create_ondisk_files( tx_df_mgr, 'o3', policy, t3, (rx_node_index,)) + rx_objs['o3'] = self._create_ondisk_files( + rx_df_mgr, 'o3', policy, t3, (14,)) # o4 primary and handoff fragment archives on tx, handoff in sync on rx t4 = next(self.ts_iter) tx_objs['o4'] = self._create_ondisk_files( @@ -386,6 +390,111 @@ class TestSsyncEC(TestBaseSsync): tx_objs, policy, frag_index, rx_node_index) self._verify_tombstones(tx_tombstones, policy) + def test_handoff_fragment_only_missing_durable(self): + # test that a sync_revert type job does not PUT when the rx is only + # missing a durable file + policy = POLICIES.default + rx_node_index = frag_index = 0 + tx_node_index = 1 + + # create sender side diskfiles... + tx_objs = {} + rx_objs = {} + tx_df_mgr = self.daemon._diskfile_router[policy] + rx_df_mgr = self.rx_controller._diskfile_router[policy] + + expected_subreqs = defaultdict(list) + + # o1 in sync on rx but rx missing .durable - no PUT required + t1a = next(self.ts_iter) # older rx .data with .durable + t1b = next(self.ts_iter) # rx .meta + t1c = next(self.ts_iter) # tx .data with .durable, rx missing .durable + obj_name = 'o1' + tx_objs[obj_name] = self._create_ondisk_files( + tx_df_mgr, obj_name, policy, t1c, (tx_node_index, rx_node_index,)) + rx_objs[obj_name] = self._create_ondisk_files( + rx_df_mgr, obj_name, policy, t1a, (rx_node_index,)) + metadata = {'X-Timestamp': t1b.internal} + rx_objs[obj_name][0].write_metadata(metadata) + rx_objs[obj_name] = self._create_ondisk_files( + rx_df_mgr, obj_name, policy, t1c, (rx_node_index, 9), commit=False) + + # o2 on rx has wrong frag_indexes and missing .durable - PUT required + t2 = next(self.ts_iter) + obj_name = 'o2' + tx_objs[obj_name] = self._create_ondisk_files( + tx_df_mgr, obj_name, policy, t2, (tx_node_index, rx_node_index,)) + rx_objs[obj_name] = self._create_ondisk_files( + rx_df_mgr, obj_name, policy, t2, (13, 14), commit=False) + expected_subreqs['PUT'].append(obj_name) + + # o3 on rx has frag at other time missing .durable - PUT required + t3 = next(self.ts_iter) + obj_name = 'o3' + tx_objs[obj_name] = self._create_ondisk_files( + tx_df_mgr, obj_name, policy, t3, (tx_node_index, rx_node_index,)) + t3b = next(self.ts_iter) + rx_objs[obj_name] = self._create_ondisk_files( + rx_df_mgr, obj_name, policy, t3b, (rx_node_index,), commit=False) + expected_subreqs['PUT'].append(obj_name) + + # o4 on rx has a newer tombstone and even newer frags - no PUT required + t4 = next(self.ts_iter) + obj_name = 'o4' + tx_objs[obj_name] = self._create_ondisk_files( + tx_df_mgr, obj_name, policy, t4, (tx_node_index, rx_node_index,)) + rx_objs[obj_name] = self._create_ondisk_files( + rx_df_mgr, obj_name, policy, t4, (rx_node_index,)) + t4b = next(self.ts_iter) + rx_objs[obj_name][0].delete(t4b) + t4c = next(self.ts_iter) + rx_objs[obj_name] = self._create_ondisk_files( + rx_df_mgr, obj_name, policy, t4c, (rx_node_index,), commit=False) + + suffixes = set() + for diskfiles in tx_objs.values(): + for df in diskfiles: + suffixes.add(os.path.basename(os.path.dirname(df._datadir))) + + # create ssync sender instance... + job = {'device': self.device, + 'partition': self.partition, + 'policy': policy, + 'frag_index': frag_index} + node = dict(self.rx_node) + node.update({'index': rx_node_index}) + sender = ssync_sender.Sender(self.daemon, node, job, suffixes) + # wrap connection from tx to rx to capture ssync messages... + sender.connect, trace = self.make_connect_wrapper(sender) + + # run the sync protocol... + sender() + + # verify protocol + results = self._analyze_trace(trace) + self.assertEqual(4, len(results['tx_missing'])) + self.assertEqual(2, len(results['rx_missing'])) + self.assertEqual(2, len(results['tx_updates'])) + self.assertFalse(results['rx_updates']) + for subreq in results.get('tx_updates'): + obj = subreq['path'].split('/')[3] + method = subreq['method'] + self.assertTrue(obj in expected_subreqs[method], + 'Unexpected %s subreq for object %s, expected %s' + % (method, obj, expected_subreqs[method])) + expected_subreqs[method].remove(obj) + if method == 'PUT': + expected_body = '%s___%s' % (subreq['path'], rx_node_index) + self.assertEqual(expected_body, subreq['body']) + # verify all expected subreqs consumed + for _method, expected in expected_subreqs.items(): + self.assertFalse(expected) + + # verify on disk files... + tx_objs.pop('o4') # o4 should not have been sync'd + self._verify_ondisk_files( + tx_objs, policy, frag_index, rx_node_index) + def test_fragment_sync(self): # check that a sync_only type job does call reconstructor to build a # diskfile to send, and continues making progress despite an error diff --git a/test/unit/obj/test_ssync_receiver.py b/test/unit/obj/test_ssync_receiver.py index 037828741a..fc233601b2 100644 --- a/test/unit/obj/test_ssync_receiver.py +++ b/test/unit/obj/test_ssync_receiver.py @@ -665,6 +665,117 @@ class TestReceiver(unittest.TestCase): self.assertFalse(self.controller.logger.error.called) self.assertFalse(self.controller.logger.exception.called) + @patch_policies(with_ec_default=True) + def test_MISSING_CHECK_missing_durable(self): + self.controller.logger = mock.MagicMock() + self.controller._diskfile_router = diskfile.DiskFileRouter( + self.conf, self.controller.logger) + + # make rx disk file but don't commit it, so .durable is missing + ts1 = next(make_timestamp_iter()).internal + object_dir = utils.storage_directory( + os.path.join(self.testdir, 'sda1', + diskfile.get_data_dir(POLICIES[0])), + '1', self.hash1) + utils.mkdirs(object_dir) + fp = open(os.path.join(object_dir, ts1 + '#2.data'), 'w+') + fp.write('1') + fp.flush() + metadata1 = { + 'name': self.name1, + 'X-Timestamp': ts1, + 'Content-Length': '1'} + diskfile.write_metadata(fp, metadata1) + + # make a request - expect no data to be wanted + req = swob.Request.blank( + '/sda1/1', + environ={'REQUEST_METHOD': 'SSYNC', + 'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '0', + 'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': '2'}, + body=':MISSING_CHECK: START\r\n' + + self.hash1 + ' ' + ts1 + '\r\n' + ':MISSING_CHECK: END\r\n' + ':UPDATES: START\r\n:UPDATES: END\r\n') + resp = req.get_response(self.controller) + self.assertEqual( + self.body_lines(resp.body), + [':MISSING_CHECK: START', + ':MISSING_CHECK: END', + ':UPDATES: START', ':UPDATES: END']) + self.assertEqual(resp.status_int, 200) + self.assertFalse(self.controller.logger.error.called) + self.assertFalse(self.controller.logger.exception.called) + + @patch_policies(with_ec_default=True) + @mock.patch('swift.obj.diskfile.ECDiskFileWriter.commit') + def test_MISSING_CHECK_missing_durable_but_commit_fails(self, mock_commit): + self.controller.logger = mock.MagicMock() + self.controller._diskfile_router = diskfile.DiskFileRouter( + self.conf, self.controller.logger) + + # make rx disk file but don't commit it, so .durable is missing + ts1 = next(make_timestamp_iter()).internal + object_dir = utils.storage_directory( + os.path.join(self.testdir, 'sda1', + diskfile.get_data_dir(POLICIES[0])), + '1', self.hash1) + utils.mkdirs(object_dir) + fp = open(os.path.join(object_dir, ts1 + '#2.data'), 'w+') + fp.write('1') + fp.flush() + metadata1 = { + 'name': self.name1, + 'X-Timestamp': ts1, + 'Content-Length': '1'} + diskfile.write_metadata(fp, metadata1) + + # make a request with commit disabled - expect data to be wanted + req = swob.Request.blank( + '/sda1/1', + environ={'REQUEST_METHOD': 'SSYNC', + 'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '0', + 'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': '2'}, + body=':MISSING_CHECK: START\r\n' + + self.hash1 + ' ' + ts1 + '\r\n' + ':MISSING_CHECK: END\r\n' + ':UPDATES: START\r\n:UPDATES: END\r\n') + resp = req.get_response(self.controller) + self.assertEqual( + self.body_lines(resp.body), + [':MISSING_CHECK: START', + self.hash1 + ' dm', + ':MISSING_CHECK: END', + ':UPDATES: START', ':UPDATES: END']) + self.assertEqual(resp.status_int, 200) + self.assertFalse(self.controller.logger.error.called) + self.assertFalse(self.controller.logger.exception.called) + + # make a request with commit raising error - expect data to be wanted + mock_commit.side_effect = Exception + req = swob.Request.blank( + '/sda1/1', + environ={'REQUEST_METHOD': 'SSYNC', + 'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '0', + 'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': '2'}, + body=':MISSING_CHECK: START\r\n' + + self.hash1 + ' ' + ts1 + '\r\n' + ':MISSING_CHECK: END\r\n' + ':UPDATES: START\r\n:UPDATES: END\r\n') + resp = req.get_response(self.controller) + self.assertEqual( + self.body_lines(resp.body), + [':MISSING_CHECK: START', + self.hash1 + ' dm', + ':MISSING_CHECK: END', + ':UPDATES: START', ':UPDATES: END']) + self.assertEqual(resp.status_int, 200) + self.assertFalse(self.controller.logger.error.called) + self.assertTrue(self.controller.logger.exception.called) + self.assertIn( + 'EXCEPTION in replication.Receiver while attempting commit of', + self.controller.logger.exception.call_args[0][0]) + def test_MISSING_CHECK_storage_policy(self): # update router post policy patch self.controller._diskfile_router = diskfile.DiskFileRouter( From 936881543e60fa437cd0f91f029f9874fd7437d9 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Thu, 14 Jan 2016 18:31:21 +0000 Subject: [PATCH 016/141] Reclaim isolated .meta files It is possible for an object dir to contain only a .meta file*, and currently this file will never be deleted. This patch changes the diskfile on disk file processing to identify any isolated meta files and mark them as ready to be reclaimed, so that the cleanup will reclaim them if they are older than reclaim age. *An isolated .meta can occur as follows: Consider two replicas of the same object whose ondisk files have diverged due to failures: A has t2.ts B has t1.data, t4.meta (The DELETE at t2 did not make it to B. The POST at t4 was rejected by A.) After ssync replication the two ondisk file sets will be: A has t2.ts B has t2.ts, t4.meta Once t2 becomes older than (current time - reclaim age) we are left with: B has t4.meta Closes-Bug: #1534685 Change-Id: I3a175fd948a331d08df1b28f9831d7529216c6f1 --- swift/obj/diskfile.py | 17 ++- test/unit/obj/test_diskfile.py | 201 +++++++++++++++++++++++---------- 2 files changed, 155 insertions(+), 63 deletions(-) diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py index 3a8b41bb54..bffb566f75 100644 --- a/swift/obj/diskfile.py +++ b/swift/obj/diskfile.py @@ -694,7 +694,7 @@ class BaseDiskFileManager(object): if exts.get('.ts'): results['ts_info'] = exts['.ts'][0] if 'data_info' in results and exts.get('.meta'): - # only report meta files if there is a data file + # only report a meta file if a data file has been chosen results['meta_info'] = exts['.meta'][0] ctype_info = exts['.meta'].pop() if (ctype_info['ctype_timestamp'] @@ -742,7 +742,7 @@ class BaseDiskFileManager(object): remove_file(join(hsh_path, results['ts_info']['filename'])) files.remove(results.pop('ts_info')['filename']) for file_info in results.get('possible_reclaim', []): - # stray fragments are not deleted until reclaim-age + # stray files are not deleted until reclaim-age if is_reclaimable(file_info['timestamp']): results.setdefault('obsolete', []).append(file_info) for file_info in results.get('obsolete', []): @@ -2360,6 +2360,11 @@ class DiskFileManager(BaseDiskFileManager): # set results results['data_info'] = exts['.data'][0] + # .meta files *may* be ready for reclaim if there is no data + if exts.get('.meta') and not exts.get('.data'): + results.setdefault('possible_reclaim', []).extend( + exts.get('.meta')) + def _update_suffix_hashes(self, hashes, ondisk_info): """ Applies policy specific updates to the given dict of md5 hashes for @@ -2700,13 +2705,17 @@ class ECDiskFileManager(BaseDiskFileManager): results.setdefault('obsolete', []).extend(exts['.durable']) exts.pop('.durable') - # Fragments *may* be ready for reclaim, unless they are durable or - # at the timestamp we have just chosen for constructing the diskfile. + # Fragments *may* be ready for reclaim, unless they are durable for frag_set in frag_sets.values(): if frag_set == durable_frag_set: continue results.setdefault('possible_reclaim', []).extend(frag_set) + # .meta files *may* be ready for reclaim if there is no durable data + if exts.get('.meta') and not durable_frag_set: + results.setdefault('possible_reclaim', []).extend( + exts.get('.meta')) + def _verify_ondisk_files(self, results, frag_index=None, **kwargs): """ Verify that the final combination of on disk files complies with the diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py index 657a29ed53..feecc4fc50 100644 --- a/test/unit/obj/test_diskfile.py +++ b/test/unit/obj/test_diskfile.py @@ -38,6 +38,7 @@ from contextlib import closing, contextmanager from gzip import GzipFile from eventlet import hubs, timeout, tpool +from swift.obj.diskfile import MD5_OF_EMPTY_STRING from test.unit import (FakeLogger, mock as unit_mock, temptree, patch_policies, debug_logger, EMPTY_ETAG, make_timestamp_iter, DEFAULT_TEST_EC_TYPE) @@ -656,6 +657,40 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): 'Unexpected file %s' % os.path.join(datadir, unexpected))) + def test_hash_cleanup_listdir_reclaim_non_data_files(self): + # Each scenario specifies a list of (filename, extension, [survives]) + # tuples. If extension is set or 'survives' is True, the filename + # should still be in the dir after cleanup. + much_older = Timestamp(time() - 2000).internal + older = Timestamp(time() - 1001).internal + newer = Timestamp(time() - 900).internal + scenarios = [ + [('%s.ts' % older, False, False)], + + # fresh tombstone is preserved + [('%s.ts' % newer, '.ts', True)], + + # tombstone reclaimed despite junk file + [('junk', False, True), + ('%s.ts' % much_older, '.ts', False)], + + # fresh .meta not reclaimed even if isolated + [('%s.meta' % newer, '.meta')], + + # fresh .meta not reclaimed when tombstone is reclaimed + [('%s.meta' % newer, '.meta'), + ('%s.ts' % older, False, False)], + + # stale isolated .meta is reclaimed + [('%s.meta' % older, False, False)], + + # stale .meta is reclaimed along with tombstone + [('%s.meta' % older, False, False), + ('%s.ts' % older, False, False)]] + + self._test_hash_cleanup_listdir_files(scenarios, POLICIES.default, + reclaim_age=1000) + def test_construct_dev_path(self): res_path = self.df_mgr.construct_dev_path('abc') self.assertEqual(os.path.join(self.df_mgr.devices, 'abc'), res_path) @@ -1123,35 +1158,30 @@ class TestDiskFileManager(DiskFileManagerMixin, unittest.TestCase): self.assertEqual("Invalid Timestamp value in filename 'junk'", str(cm.exception)) - def test_hash_cleanup_listdir_reclaim(self): + def test_hash_cleanup_listdir_reclaim_with_data_files(self): # Each scenario specifies a list of (filename, extension, [survives]) # tuples. If extension is set or 'survives' is True, the filename # should still be in the dir after cleanup. much_older = Timestamp(time() - 2000).internal older = Timestamp(time() - 1001).internal newer = Timestamp(time() - 900).internal - scenarios = [[('%s.ts' % older, False, False)], + scenarios = [ + # .data files are not reclaimed, ever + [('%s.data' % older, '.data', True)], + [('%s.data' % newer, '.data', True)], - # fresh tombstone is preserved - [('%s.ts' % newer, '.ts', True)], + # ... and we could have a mixture of fresh and stale .data + [('%s.data' % newer, '.data', True), + ('%s.data' % older, False, False)], - # .data files are not reclaimed, ever - [('%s.data' % older, '.data', True)], - [('%s.data' % newer, '.data', True)], + # tombstone reclaimed despite newer data + [('%s.data' % newer, '.data', True), + ('%s.data' % older, False, False), + ('%s.ts' % much_older, '.ts', False)], - # ... and we could have a mixture of fresh and stale .data - [('%s.data' % newer, '.data', True), - ('%s.data' % older, False, False)], - - # tombstone reclaimed despite newer data - [('%s.data' % newer, '.data', True), - ('%s.data' % older, False, False), - ('%s.ts' % much_older, '.ts', False)], - - # tombstone reclaimed despite junk file - [('junk', False, True), - ('%s.ts' % much_older, '.ts', False)], - ] + # .meta not reclaimed if there is a .data file + [('%s.meta' % older, '.meta'), + ('%s.data' % much_older, '.data')]] self._test_hash_cleanup_listdir_files(scenarios, POLICIES.default, reclaim_age=1000) @@ -1471,57 +1501,61 @@ class TestECDiskFileManager(DiskFileManagerMixin, unittest.TestCase): # note: not calling self._test_hash_cleanup_listdir_files(scenarios, 0) # here due to the anomalous scenario as commented above - def test_hash_cleanup_listdir_reclaim(self): + def test_hash_cleanup_listdir_reclaim_with_data_files(self): # Each scenario specifies a list of (filename, extension, [survives]) # tuples. If extension is set or 'survives' is True, the filename # should still be in the dir after cleanup. much_older = Timestamp(time() - 2000).internal older = Timestamp(time() - 1001).internal newer = Timestamp(time() - 900).internal - scenarios = [[('%s.ts' % older, False, False)], + scenarios = [ + # isolated .durable is cleaned up immediately + [('%s.durable' % newer, False, False)], - # fresh tombstone is preserved - [('%s.ts' % newer, '.ts', True)], + # ...even when other older files are in dir + [('%s.durable' % older, False, False), + ('%s.ts' % much_older, False, False)], - # isolated .durable is cleaned up immediately - [('%s.durable' % newer, False, False)], + # isolated .data files are cleaned up when stale + [('%s#2.data' % older, False, False), + ('%s#4.data' % older, False, False)], - # ...even when other older files are in dir - [('%s.durable' % older, False, False), - ('%s.ts' % much_older, False, False)], + # ...even when there is an older durable fileset + [('%s#2.data' % older, False, False), + ('%s#4.data' % older, False, False), + ('%s#2.data' % much_older, '.data', True), + ('%s#4.data' % much_older, False, True), + ('%s.durable' % much_older, '.durable', True)], - # isolated .data files are cleaned up when stale - [('%s#2.data' % older, False, False), - ('%s#4.data' % older, False, False)], + # ... but preserved if still fresh + [('%s#2.data' % newer, False, True), + ('%s#4.data' % newer, False, True)], - # ...even when there is an older durable fileset - [('%s#2.data' % older, False, False), - ('%s#4.data' % older, False, False), - ('%s#2.data' % much_older, '.data', True), - ('%s#4.data' % much_older, False, True), - ('%s.durable' % much_older, '.durable', True)], + # ... and we could have a mixture of fresh and stale .data + [('%s#2.data' % newer, False, True), + ('%s#4.data' % older, False, False)], - # ... but preserved if still fresh - [('%s#2.data' % newer, False, True), - ('%s#4.data' % newer, False, True)], + # tombstone reclaimed despite newer non-durable data + [('%s#2.data' % newer, False, True), + ('%s#4.data' % older, False, False), + ('%s.ts' % much_older, '.ts', False)], - # ... and we could have a mixture of fresh and stale .data - [('%s#2.data' % newer, False, True), - ('%s#4.data' % older, False, False)], + # tombstone reclaimed despite much older durable + [('%s.ts' % older, '.ts', False), + ('%s.durable' % much_older, False, False)], - # tombstone reclaimed despite newer non-durable data - [('%s#2.data' % newer, False, True), - ('%s#4.data' % older, False, False), - ('%s.ts' % much_older, '.ts', False)], + # .meta not reclaimed if there is durable data + [('%s.meta' % older, '.meta'), + ('%s#4.data' % much_older, False, True), + ('%s.durable' % much_older, '.durable', True)], - # tombstone reclaimed despite much older durable - [('%s.ts' % older, '.ts', False), - ('%s.durable' % much_older, False, False)], + # stale .meta reclaimed along with stale non-durable .data + [('%s.meta' % older, False, False), + ('%s#4.data' % much_older, False, False)], - # tombstone reclaimed despite junk file - [('junk', False, True), - ('%s.ts' % much_older, '.ts', False)], - ] + # stale .meta reclaimed along with stale .durable + [('%s.meta' % older, False, False), + ('%s.durable' % much_older, False, False)]] self._test_hash_cleanup_listdir_files(scenarios, POLICIES.default, reclaim_age=1000) @@ -4677,8 +4711,9 @@ class TestSuffixHashes(unittest.TestCase): file_list = [file1] self.check_hash_cleanup_listdir(policy, file_list, []) - def test_hash_cleanup_listdir_meta_keeps_old_ts(self): + def test_hash_cleanup_listdir_keep_isolated_meta_purge_old_ts(self): for policy in self.iter_policies(): + # A single old .ts file will be removed despite presence of a .meta old_float = time() - (diskfile.ONE_WEEK + 1) file1 = Timestamp(old_float).internal + '.ts' file2 = Timestamp(time() + 2).internal + '.meta' @@ -4706,13 +4741,13 @@ class TestSuffixHashes(unittest.TestCase): file_list = [file1] self.check_hash_cleanup_listdir(policy, file_list, []) - def test_hash_cleanup_listdir_keep_single_old_meta(self): + def test_hash_cleanup_listdir_purges_single_old_meta(self): for policy in self.iter_policies(): - # A single old .meta file will not be removed + # A single old .meta file will be removed old_float = time() - (diskfile.ONE_WEEK + 1) file1 = Timestamp(old_float).internal + '.meta' file_list = [file1] - self.check_hash_cleanup_listdir(policy, file_list, [file1]) + self.check_hash_cleanup_listdir(policy, file_list, []) # hash_cleanup_listdir tests - error handling @@ -4871,6 +4906,54 @@ class TestSuffixHashes(unittest.TestCase): }[policy.policy_type] self.assertEqual(hashes, expected) + def test_hash_suffix_one_reclaim_tombstone_and_one_meta(self): + # An isolated meta file can happen if a tombstone is replicated to a + # node with a newer meta file but older data file, and the tombstone is + # subsequently reclaimed. The meta file will be ignored when the + # diskfile is opened so the effective state of the disk files is + # equivalent to having no files. + for policy in self.iter_policies(): + if policy.policy_type == EC_POLICY: + continue + df_mgr = self.df_router[policy] + df = df_mgr.get_diskfile( + 'sda1', '0', 'a', 'c', 'o', policy=policy) + suffix = os.path.basename(os.path.dirname(df._datadir)) + now = time() + # scale back the df manager's reclaim age a bit + df_mgr.reclaim_age = 1000 + # write a tombstone that's just a *little* older than reclaim time + df.delete(Timestamp(now - 10001)) + # write a meta file that's not quite so old + ts_meta = Timestamp(now - 501) + df.write_metadata({'X-Timestamp': ts_meta.internal}) + # sanity check + self.assertEqual(2, len(os.listdir(df._datadir))) + + hashes = df_mgr.get_hashes('sda1', '0', [], policy) + # the tombstone is reclaimed, the meta file remains, the suffix + # hash is not updated BUT the suffix dir cannot be deleted so + # a suffix hash equal to hash of empty string is reported. + # TODO: this is not same result as if the meta file did not exist! + self.assertEqual([ts_meta.internal + '.meta'], + os.listdir(df._datadir)) + self.assertEqual(hashes, {suffix: MD5_OF_EMPTY_STRING}) + + # scale back the df manager's reclaim age even more - call to + # get_hashes does not trigger reclaim because the suffix has + # MD5_OF_EMPTY_STRING in hashes.pkl + df_mgr.reclaim_age = 500 + hashes = df_mgr.get_hashes('sda1', '0', [], policy) + self.assertEqual([ts_meta.internal + '.meta'], + os.listdir(df._datadir)) + self.assertEqual(hashes, {suffix: MD5_OF_EMPTY_STRING}) + + # call get_hashes with recalculate = [suffix] and the suffix dir + # gets re-hashed so the .meta if finally reclaimed. + hashes = df_mgr.get_hashes('sda1', '0', [suffix], policy) + self.assertFalse(os.path.exists(os.path.dirname(df._datadir))) + self.assertEqual(hashes, {}) + def test_hash_suffix_one_reclaim_tombstone(self): for policy in self.iter_policies(): df_mgr = self.df_router[policy] From be54d0c928528cdc1b12e1bcb1614ea8859fae2e Mon Sep 17 00:00:00 2001 From: janonymous Date: Mon, 7 Dec 2015 21:45:43 +0530 Subject: [PATCH 017/141] clear pycache and remove all pyc/pyo before starting unit test Delete python bytecode before every test run. Because python creates pyc files during tox runs, certain changes in the tree, like deletes of files, or switching branches, can create spurious errors. Closes-Bug: #1368661 Change-Id: Iedcb400fa3b0417f5bb8e943b17758fcfb4070c6 --- test/unit/common/middleware/test_xprofile.py | 22 ++++++++++---------- tox.ini | 5 ++++- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/test/unit/common/middleware/test_xprofile.py b/test/unit/common/middleware/test_xprofile.py index 7296f69211..720a722311 100644 --- a/test/unit/common/middleware/test_xprofile.py +++ b/test/unit/common/middleware/test_xprofile.py @@ -458,17 +458,17 @@ class Test_html_viewer(unittest.TestCase): self.log_files) def test_format_source_code(self): - nfl_os = '%s:%d(%s)' % (os.__file__[:-1], 136, 'makedirs') - self.assertTrue('makedirs' in self.viewer.format_source_code(nfl_os)) - self.assertFalse('makedirsXYZ' in - self.viewer.format_source_code(nfl_os)) - nfl_illegal = '%s:136(makedirs)' % os.__file__ - self.assertTrue(_('The file type are forbidden to access!') in - self.viewer.format_source_code(nfl_illegal)) - nfl_not_exist = '%s.py:136(makedirs)' % os.__file__ - expected_msg = _('Can not access the file %s.') % os.__file__ - self.assertTrue(expected_msg in - self.viewer.format_source_code(nfl_not_exist)) + osfile = os.__file__.rstrip('c') + nfl_os = '%s:%d(%s)' % (osfile, 136, 'makedirs') + self.assertIn('makedirs', self.viewer.format_source_code(nfl_os)) + self.assertNotIn('makedirsXYZ', self.viewer.format_source_code(nfl_os)) + nfl_illegal = '%sc:136(makedirs)' % osfile + self.assertIn(_('The file type are forbidden to access!'), + self.viewer.format_source_code(nfl_illegal)) + nfl_not_exist = '%s.py:136(makedirs)' % osfile + expected_msg = _('Can not access the file %s.py.') % osfile + self.assertIn(expected_msg, + self.viewer.format_source_code(nfl_not_exist)) class TestStats2(unittest.TestCase): diff --git a/tox.ini b/tox.ini index ac22896de4..fbce200522 100644 --- a/tox.ini +++ b/tox.ini @@ -12,7 +12,10 @@ setenv = VIRTUAL_ENV={envdir} deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt -commands = nosetests {posargs:test/unit} +commands = find . -type f -name "*.py[c|o]" -delete + find . -type d -name "__pycache__" -delete + nosetests {posargs:test/unit} +whitelist_externals = find passenv = SWIFT_* *_proxy [testenv:cover] From fa0c5f244f907ed2d5bd6d90f0be44869d7d1de6 Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Mon, 29 Feb 2016 13:14:59 +0000 Subject: [PATCH 018/141] Additionally break container servers during fast-post probes The existing probetests were already doing brainsplitting on the object servers in order to introduce composite metdata timestamp reconciliation. But they were not introducing failures at the container server level, and therefore not covering functional testing of those servers replication consistency repair. This change adds a brain splitter for the container servers to additionally exercise the consistency engine at that layer under the existing probetest so that you can observe their correct behavior while exercising the probetests [1]. 1. I used a script like this to watch the object servers and container db's repair themselves -> https://gist.github.com/clayg/a8077d9d29ff68bb0edd Change-Id: Ic5bb03dffef7e30c58338cef969a105577b84620 --- .../probe/test_object_metadata_replication.py | 83 +++++++++++++++++++ 1 file changed, 83 insertions(+) diff --git a/test/probe/test_object_metadata_replication.py b/test/probe/test_object_metadata_replication.py index d93715bc74..4759d5dfc3 100644 --- a/test/probe/test_object_metadata_replication.py +++ b/test/probe/test_object_metadata_replication.py @@ -45,6 +45,8 @@ class Test(ReplProbeTest): self.brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name, 'object', policy=self.policy) + self.container_brain = BrainSplitter(self.url, self.token, + self.container_name) self.int_client = self.make_internal_client(object_post_as_copy=False) def tearDown(self): @@ -182,40 +184,51 @@ class Test(ReplProbeTest): # put newer object with sysmeta to first server subset self.brain.stop_primary_half() + self.container_brain.stop_primary_half() self._put_object() self.brain.start_primary_half() + self.container_brain.start_primary_half() # delete object on second server subset self.brain.stop_handoff_half() + self.container_brain.stop_handoff_half() self._delete_object() self.brain.start_handoff_half() + self.container_brain.start_handoff_half() # run replicator self.get_to_final_state() # check object deletion has been replicated on first server set self.brain.stop_primary_half() + self.container_brain.stop_primary_half() self._get_object(expect_statuses=(4,)) self.brain.start_primary_half() + self.container_brain.start_primary_half() # check object deletion persists on second server set self.brain.stop_handoff_half() + self.container_brain.stop_handoff_half() self._get_object(expect_statuses=(4,)) # put newer object to second server set self._put_object() self.brain.start_handoff_half() + self.container_brain.start_handoff_half() # run replicator self.get_to_final_state() # check new object has been replicated on first server set self.brain.stop_primary_half() + self.container_brain.stop_primary_half() self._get_object() self.brain.start_primary_half() + self.container_brain.start_primary_half() # check new object persists on second server set self.brain.stop_handoff_half() + self.container_brain.stop_handoff_half() self._get_object() def test_object_after_replication_with_subsequent_post(self): @@ -226,10 +239,12 @@ class Test(ReplProbeTest): # put newer object to first server subset self.brain.stop_primary_half() + self.container_brain.stop_primary_half() self._put_object(headers={'Content-Type': 'bar'}, body=u'newer') metadata = self._get_object_metadata() etag = metadata['etag'] self.brain.start_primary_half() + self.container_brain.start_primary_half() # post some user meta to all servers self._post_object({'x-object-meta-bar': 'meta-bar'}) @@ -239,11 +254,13 @@ class Test(ReplProbeTest): # check that newer data has been replicated to second server subset self.brain.stop_handoff_half() + self.container_brain.stop_handoff_half() metadata = self._get_object_metadata() self.assertEqual(etag, metadata['etag']) self.assertEqual('bar', metadata['content-type']) self.assertEqual('meta-bar', metadata['x-object-meta-bar']) self.brain.start_handoff_half() + self.container_brain.start_handoff_half() self._assert_consistent_object_metadata() self._assert_consistent_container_dbs() @@ -257,15 +274,18 @@ class Test(ReplProbeTest): # put object with sysmeta to first server subset self.brain.stop_primary_half() + self.container_brain.stop_primary_half() self._put_object(headers=sysmeta) metadata = self._get_object_metadata() for key in sysmeta: self.assertTrue(key in metadata) self.assertEqual(metadata[key], sysmeta[key]) self.brain.start_primary_half() + self.container_brain.start_primary_half() # put object with updated sysmeta to second server subset self.brain.stop_handoff_half() + self.container_brain.stop_handoff_half() self._put_object(headers=sysmeta2) metadata = self._get_object_metadata() for key in sysmeta2: @@ -281,12 +301,14 @@ class Test(ReplProbeTest): self.assertEqual(metadata[key], sysmeta2[key]) self.brain.start_handoff_half() + self.container_brain.start_handoff_half() # run replicator self.get_to_final_state() # check sysmeta has been replicated to first server subset self.brain.stop_primary_half() + self.container_brain.stop_primary_half() metadata = self._get_object_metadata() for key in usermeta: self.assertTrue(key in metadata) @@ -295,9 +317,11 @@ class Test(ReplProbeTest): self.assertTrue(key in metadata, key) self.assertEqual(metadata[key], sysmeta2[key]) self.brain.start_primary_half() + self.container_brain.start_primary_half() # check user sysmeta ok on second server subset self.brain.stop_handoff_half() + self.container_brain.stop_handoff_half() metadata = self._get_object_metadata() for key in usermeta: self.assertTrue(key in metadata) @@ -306,6 +330,7 @@ class Test(ReplProbeTest): self.assertTrue(key in metadata, key) self.assertEqual(metadata[key], sysmeta2[key]) self.brain.start_handoff_half() + self.container_brain.start_handoff_half() self._assert_consistent_object_metadata() self._assert_consistent_container_dbs() @@ -319,15 +344,18 @@ class Test(ReplProbeTest): self._put_object() # put newer object with sysmeta to first server subset self.brain.stop_primary_half() + self.container_brain.stop_primary_half() self._put_object(headers=sysmeta) metadata = self._get_object_metadata() for key in sysmeta: self.assertTrue(key in metadata) self.assertEqual(metadata[key], sysmeta[key]) self.brain.start_primary_half() + self.container_brain.start_primary_half() # post some user meta to second server subset self.brain.stop_handoff_half() + self.container_brain.stop_handoff_half() self._post_object(usermeta) metadata = self._get_object_metadata() for key in usermeta: @@ -336,6 +364,7 @@ class Test(ReplProbeTest): for key in sysmeta: self.assertFalse(key in metadata) self.brain.start_handoff_half() + self.container_brain.start_handoff_half() # run replicator self.get_to_final_state() @@ -343,6 +372,7 @@ class Test(ReplProbeTest): # check user metadata has been replicated to first server subset # and sysmeta is unchanged self.brain.stop_primary_half() + self.container_brain.stop_primary_half() metadata = self._get_object_metadata() expected = dict(sysmeta) expected.update(usermeta) @@ -350,14 +380,17 @@ class Test(ReplProbeTest): self.assertTrue(key in metadata, key) self.assertEqual(metadata[key], expected[key]) self.brain.start_primary_half() + self.container_brain.start_primary_half() # check user metadata and sysmeta both on second server subset self.brain.stop_handoff_half() + self.container_brain.stop_handoff_half() metadata = self._get_object_metadata() for key in expected.keys(): self.assertTrue(key in metadata, key) self.assertEqual(metadata[key], expected[key]) self.brain.start_handoff_half() + self.container_brain.start_handoff_half() self._assert_consistent_object_metadata() self._assert_consistent_container_dbs() @@ -372,21 +405,25 @@ class Test(ReplProbeTest): # put user meta to first server subset self.brain.stop_handoff_half() + self.container_brain.stop_handoff_half() self._post_object(headers=usermeta) metadata = self._get_object_metadata() for key in usermeta: self.assertTrue(key in metadata) self.assertEqual(metadata[key], usermeta[key]) self.brain.start_handoff_half() + self.container_brain.start_handoff_half() # put newer object with sysmeta to second server subset self.brain.stop_primary_half() + self.container_brain.stop_primary_half() self._put_object(headers=sysmeta) metadata = self._get_object_metadata() for key in sysmeta: self.assertTrue(key in metadata) self.assertEqual(metadata[key], sysmeta[key]) self.brain.start_primary_half() + self.container_brain.start_primary_half() # run replicator self.get_to_final_state() @@ -394,6 +431,7 @@ class Test(ReplProbeTest): # check stale user metadata is not replicated to first server subset # and sysmeta is unchanged self.brain.stop_primary_half() + self.container_brain.stop_primary_half() metadata = self._get_object_metadata() for key in sysmeta: self.assertTrue(key in metadata) @@ -401,10 +439,12 @@ class Test(ReplProbeTest): for key in usermeta: self.assertFalse(key in metadata) self.brain.start_primary_half() + self.container_brain.start_primary_half() # check stale user metadata is removed from second server subset # and sysmeta is replicated self.brain.stop_handoff_half() + self.container_brain.stop_handoff_half() metadata = self._get_object_metadata() for key in sysmeta: self.assertTrue(key in metadata) @@ -412,6 +452,7 @@ class Test(ReplProbeTest): for key in usermeta: self.assertFalse(key in metadata) self.brain.start_handoff_half() + self.container_brain.start_handoff_half() self._assert_consistent_object_metadata() self._assert_consistent_container_dbs() @@ -432,18 +473,24 @@ class Test(ReplProbeTest): # incomplete write to primary half self.brain.stop_handoff_half() + self.container_brain.stop_handoff_half() self._put_object(headers={'Content-Type': 'foo'}) self.brain.start_handoff_half() + self.container_brain.start_handoff_half() # handoff write self.brain.stop_primary_half() + self.container_brain.stop_primary_half() self._put_object(headers={'Content-Type': 'bar'}) self.brain.start_primary_half() + self.container_brain.start_primary_half() # content-type update to primary half self.brain.stop_handoff_half() + self.container_brain.stop_handoff_half() self._post_object(headers={'Content-Type': 'baz'}) self.brain.start_handoff_half() + self.container_brain.start_handoff_half() self.get_to_final_state() @@ -481,18 +528,24 @@ class Test(ReplProbeTest): # incomplete write self.brain.stop_handoff_half() + self.container_brain.stop_handoff_half() self._put_object(headers={'Content-Type': 'foo'}) self.brain.start_handoff_half() + self.container_brain.start_handoff_half() # handoff write self.brain.stop_primary_half() + self.container_brain.stop_primary_half() self._put_object(headers={'Content-Type': 'bar'}) self.brain.start_primary_half() + self.container_brain.start_primary_half() # metadata update with newest data unavailable self.brain.stop_handoff_half() + self.container_brain.stop_handoff_half() self._post_object(headers={'X-Object-Meta-Color': 'Blue'}) self.brain.start_handoff_half() + self.container_brain.start_handoff_half() self.get_to_final_state() @@ -535,26 +588,34 @@ class Test(ReplProbeTest): # incomplete write self.brain.stop_handoff_half() + self.container_brain.stop_handoff_half() self._put_object(headers={'Content-Type': 'foo', 'X-Object-Sysmeta-Test': 'older'}) self.brain.start_handoff_half() + self.container_brain.start_handoff_half() # handoff write self.brain.stop_primary_half() + self.container_brain.stop_primary_half() self._put_object(headers={'Content-Type': 'bar', 'X-Object-Sysmeta-Test': 'newer'}) self.brain.start_primary_half() + self.container_brain.start_primary_half() # incomplete post with content type self.brain.stop_handoff_half() + self.container_brain.stop_handoff_half() self._post_object(headers={'Content-Type': 'bif'}) self.brain.start_handoff_half() + self.container_brain.start_handoff_half() # incomplete post to handoff with content type self.brain.stop_primary_half() + self.container_brain.stop_primary_half() self._post_object(headers={'Content-Type': 'baz', 'X-Object-Meta-Color': 'Red'}) self.brain.start_primary_half() + self.container_brain.start_primary_half() # complete post with no content type self._post_object(headers={'X-Object-Meta-Color': 'Blue', @@ -601,20 +662,26 @@ class Test(ReplProbeTest): # incomplete write to handoff half self.brain.stop_primary_half() + self.container_brain.stop_primary_half() self._put_object(headers={'Content-Type': 'bar', 'X-Object-Sysmeta-Test': 'newer'}) self.brain.start_primary_half() + self.container_brain.start_primary_half() # incomplete post with no content type to primary half self.brain.stop_handoff_half() + self.container_brain.stop_handoff_half() self._post_object(headers={'X-Object-Meta-Color': 'Red', 'X-Object-Sysmeta-Test': 'ignored'}) self.brain.start_handoff_half() + self.container_brain.start_handoff_half() # incomplete post with no content type to handoff half self.brain.stop_primary_half() + self.container_brain.stop_primary_half() self._post_object(headers={'X-Object-Meta-Color': 'Blue'}) self.brain.start_primary_half() + self.container_brain.start_primary_half() self.get_to_final_state() @@ -645,38 +712,48 @@ class Test(ReplProbeTest): self.brain.put_container(policy_index=0) # incomplete put to handoff self.brain.stop_primary_half() + self.container_brain.stop_primary_half() self._put_object(headers={'Content-Type': 'oldest', 'X-Object-Sysmeta-Test': 'oldest', 'X-Object-Meta-Test': 'oldest'}) self.brain.start_primary_half() + self.container_brain.start_primary_half() # incomplete put to primary self.brain.stop_handoff_half() + self.container_brain.stop_handoff_half() self._put_object(headers={'Content-Type': 'oldest', 'X-Object-Sysmeta-Test': 'oldest', 'X-Object-Meta-Test': 'oldest'}) self.brain.start_handoff_half() + self.container_brain.start_handoff_half() # incomplete post with content-type to handoff self.brain.stop_primary_half() + self.container_brain.stop_primary_half() self._post_object(headers={'Content-Type': 'newer', 'X-Object-Meta-Test': 'newer'}) self.brain.start_primary_half() + self.container_brain.start_primary_half() # incomplete put to primary self.brain.stop_handoff_half() + self.container_brain.stop_handoff_half() self._put_object(headers={'Content-Type': 'newest', 'X-Object-Sysmeta-Test': 'newest', 'X-Object-Meta-Test': 'newer'}) self.brain.start_handoff_half() + self.container_brain.start_handoff_half() # incomplete post with no content-type to handoff which still has # out of date content-type self.brain.stop_primary_half() + self.container_brain.stop_primary_half() self._post_object(headers={'X-Object-Meta-Test': 'newest'}) metadata = self._get_object_metadata() self.assertEqual(metadata['x-object-meta-test'], 'newest') self.assertEqual(metadata['content-type'], 'newer') self.brain.start_primary_half() + self.container_brain.start_primary_half() self.get_to_final_state() @@ -707,21 +784,26 @@ class Test(ReplProbeTest): self.brain.put_container(policy_index=0) # incomplete put self.brain.stop_primary_half() + self.container_brain.stop_primary_half() self._put_object(headers={'Content-Type': 'oldest', 'X-Object-Sysmeta-Test': 'oldest', 'X-Object-Meta-Test': 'oldest'}) self.brain.start_primary_half() + self.container_brain.start_primary_half() # incomplete put then delete self.brain.stop_handoff_half() + self.container_brain.stop_handoff_half() self._put_object(headers={'Content-Type': 'oldest', 'X-Object-Sysmeta-Test': 'oldest', 'X-Object-Meta-Test': 'oldest'}) self._delete_object() self.brain.start_handoff_half() + self.container_brain.start_handoff_half() # handoff post self.brain.stop_primary_half() + self.container_brain.stop_primary_half() self._post_object(headers={'Content-Type': 'newest', 'X-Object-Sysmeta-Test': 'ignored', 'X-Object-Meta-Test': 'newest'}) @@ -733,6 +815,7 @@ class Test(ReplProbeTest): self.assertEqual(metadata['content-type'], 'newest') self.brain.start_primary_half() + self.container_brain.start_primary_half() # delete trumps later post self.get_to_final_state() From 9430f4c9f5e79026f7275f8ec32ef4a180c5e8fc Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Wed, 2 Mar 2016 10:28:51 +0000 Subject: [PATCH 019/141] Move HeaderKeyDict to avoid an inline import There was a function in swift.common.utils that was importing swob.HeaderKeyDict at call time. It couldn't import it at compilation time since utils can't import from swob or else it blows up with a circular import error. This commit just moves HeaderKeyDict into swift.common.header_key_dict so that we can remove the inline import. Change-Id: I656fde8cc2e125327c26c589cf1045cb81ffc7e5 --- swift/common/direct_client.py | 2 +- swift/common/header_key_dict.py | 63 +++++++++++++++++ swift/common/middleware/tempurl.py | 5 +- swift/common/swob.py | 48 +------------ swift/common/utils.py | 2 +- swift/container/server.py | 3 +- swift/obj/reconstructor.py | 2 +- swift/obj/server.py | 5 +- swift/proxy/controllers/base.py | 3 +- swift/proxy/controllers/obj.py | 3 +- test/unit/__init__.py | 7 +- test/unit/account/test_utils.py | 2 +- test/unit/common/middleware/helpers.py | 7 +- test/unit/common/middleware/test_bulk.py | 3 +- test/unit/common/middleware/test_dlo.py | 51 +++++++------- test/unit/common/middleware/test_slo.py | 59 ++++++++-------- test/unit/common/middleware/test_tempurl.py | 3 +- test/unit/common/test_direct_client.py | 3 +- test/unit/common/test_header_key_dict.py | 75 +++++++++++++++++++++ test/unit/common/test_internal_client.py | 3 +- test/unit/common/test_swob.py | 58 ---------------- test/unit/common/test_utils.py | 3 +- test/unit/container/test_reconciler.py | 3 +- test/unit/container/test_server.py | 4 +- test/unit/obj/test_reconstructor.py | 2 +- test/unit/obj/test_server.py | 3 +- test/unit/obj/test_updater.py | 8 +-- test/unit/proxy/controllers/test_base.py | 4 +- test/unit/proxy/controllers/test_obj.py | 3 +- test/unit/proxy/test_server.py | 3 +- 30 files changed, 246 insertions(+), 194 deletions(-) create mode 100644 swift/common/header_key_dict.py create mode 100644 test/unit/common/test_header_key_dict.py diff --git a/swift/common/direct_client.py b/swift/common/direct_client.py index 96f2579de0..0dea8acefc 100644 --- a/swift/common/direct_client.py +++ b/swift/common/direct_client.py @@ -33,7 +33,7 @@ from swift.common.exceptions import ClientException from swift.common.utils import Timestamp, FileLikeIter from swift.common.http import HTTP_NO_CONTENT, HTTP_INSUFFICIENT_STORAGE, \ is_success, is_server_error -from swift.common.swob import HeaderKeyDict +from swift.common.header_key_dict import HeaderKeyDict from swift.common.utils import quote diff --git a/swift/common/header_key_dict.py b/swift/common/header_key_dict.py new file mode 100644 index 0000000000..fc67bb0f29 --- /dev/null +++ b/swift/common/header_key_dict.py @@ -0,0 +1,63 @@ +# Copyright (c) 2010-2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six + + +class HeaderKeyDict(dict): + """ + A dict that title-cases all keys on the way in, so as to be + case-insensitive. + """ + def __init__(self, base_headers=None, **kwargs): + if base_headers: + self.update(base_headers) + self.update(kwargs) + + def update(self, other): + if hasattr(other, 'keys'): + for key in other.keys(): + self[key.title()] = other[key] + else: + for key, value in other: + self[key.title()] = value + + def __getitem__(self, key): + return dict.get(self, key.title()) + + def __setitem__(self, key, value): + if value is None: + self.pop(key.title(), None) + elif isinstance(value, six.text_type): + return dict.__setitem__(self, key.title(), value.encode('utf-8')) + else: + return dict.__setitem__(self, key.title(), str(value)) + + def __contains__(self, key): + return dict.__contains__(self, key.title()) + + def __delitem__(self, key): + return dict.__delitem__(self, key.title()) + + def get(self, key, default=None): + return dict.get(self, key.title(), default) + + def setdefault(self, key, value=None): + if key not in self: + self[key] = value + return self[key] + + def pop(self, key, default=None): + return dict.pop(self, key.title(), default) diff --git a/swift/common/middleware/tempurl.py b/swift/common/middleware/tempurl.py index b71df51c35..7fece15b34 100644 --- a/swift/common/middleware/tempurl.py +++ b/swift/common/middleware/tempurl.py @@ -169,8 +169,9 @@ from six.moves.urllib.parse import parse_qs from six.moves.urllib.parse import urlencode from swift.proxy.controllers.base import get_account_info, get_container_info -from swift.common.swob import HeaderKeyDict, header_to_environ_key, \ - HTTPUnauthorized, HTTPBadRequest +from swift.common.header_key_dict import HeaderKeyDict +from swift.common.swob import header_to_environ_key, HTTPUnauthorized, \ + HTTPBadRequest from swift.common.utils import split_path, get_valid_utf8_str, \ register_swift_info, get_hmac, streq_const_time, quote diff --git a/swift/common/swob.py b/swift/common/swob.py index c7c9afe358..98ee37278e 100644 --- a/swift/common/swob.py +++ b/swift/common/swob.py @@ -50,6 +50,7 @@ from six import BytesIO from six import StringIO from six.moves import urllib +from swift.common.header_key_dict import HeaderKeyDict from swift.common.utils import reiterate, split_path, Timestamp, pairs, \ close_if_possible from swift.common.exceptions import InvalidTimestamp @@ -271,53 +272,6 @@ class HeaderEnvironProxy(MutableMapping): return keys -class HeaderKeyDict(dict): - """ - A dict that title-cases all keys on the way in, so as to be - case-insensitive. - """ - def __init__(self, base_headers=None, **kwargs): - if base_headers: - self.update(base_headers) - self.update(kwargs) - - def update(self, other): - if hasattr(other, 'keys'): - for key in other.keys(): - self[key.title()] = other[key] - else: - for key, value in other: - self[key.title()] = value - - def __getitem__(self, key): - return dict.get(self, key.title()) - - def __setitem__(self, key, value): - if value is None: - self.pop(key.title(), None) - elif isinstance(value, six.text_type): - return dict.__setitem__(self, key.title(), value.encode('utf-8')) - else: - return dict.__setitem__(self, key.title(), str(value)) - - def __contains__(self, key): - return dict.__contains__(self, key.title()) - - def __delitem__(self, key): - return dict.__delitem__(self, key.title()) - - def get(self, key, default=None): - return dict.get(self, key.title(), default) - - def setdefault(self, key, value=None): - if key not in self: - self[key] = value - return self[key] - - def pop(self, key, default=None): - return dict.pop(self, key.title(), default) - - def _resp_status_property(): """ Set and retrieve the value of Response.status diff --git a/swift/common/utils.py b/swift/common/utils.py index 66f6ad777b..9547bf8f6a 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -68,6 +68,7 @@ from swift import gettext_ as _ import swift.common.exceptions from swift.common.http import is_success, is_redirection, HTTP_NOT_FOUND, \ HTTP_PRECONDITION_FAILED, HTTP_REQUESTED_RANGE_NOT_SATISFIABLE +from swift.common.header_key_dict import HeaderKeyDict if six.PY3: stdlib_queue = eventlet.patcher.original('queue') @@ -3648,7 +3649,6 @@ def parse_mime_headers(doc_file): :param doc_file: binary file-like object containing a MIME document :returns: a swift.common.swob.HeaderKeyDict containing the headers """ - from swift.common.swob import HeaderKeyDict # avoid circular import headers = [] while True: line = doc_file.readline() diff --git a/swift/container/server.py b/swift/container/server.py index 7e24bd4e50..92bb595e8f 100644 --- a/swift/container/server.py +++ b/swift/container/server.py @@ -41,10 +41,11 @@ from swift.common.exceptions import ConnectionTimeout from swift.common.http import HTTP_NOT_FOUND, is_success from swift.common.storage_policy import POLICIES from swift.common.base_storage_server import BaseStorageServer +from swift.common.header_key_dict import HeaderKeyDict from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPConflict, \ HTTPCreated, HTTPInternalServerError, HTTPNoContent, HTTPNotFound, \ HTTPPreconditionFailed, HTTPMethodNotAllowed, Request, Response, \ - HTTPInsufficientStorage, HTTPException, HeaderKeyDict + HTTPInsufficientStorage, HTTPException def gen_resp_headers(info, is_deleted=False): diff --git a/swift/obj/reconstructor.py b/swift/obj/reconstructor.py index 151c00c1e7..e2ad368344 100644 --- a/swift/obj/reconstructor.py +++ b/swift/obj/reconstructor.py @@ -32,7 +32,7 @@ from swift.common.utils import ( whataremyips, unlink_older_than, compute_eta, get_logger, dump_recon_cache, mkdirs, config_true_value, list_from_csv, get_hub, tpool_reraise, GreenAsyncPile, Timestamp, remove_file) -from swift.common.swob import HeaderKeyDict +from swift.common.header_key_dict import HeaderKeyDict from swift.common.bufferedhttp import http_connect from swift.common.daemon import Daemon from swift.common.ring.utils import is_local_device diff --git a/swift/obj/server.py b/swift/obj/server.py index ac3c7f39e5..5fdcd56a5f 100644 --- a/swift/obj/server.py +++ b/swift/obj/server.py @@ -44,14 +44,15 @@ from swift.common.exceptions import ConnectionTimeout, DiskFileQuarantined, \ from swift.obj import ssync_receiver from swift.common.http import is_success from swift.common.base_storage_server import BaseStorageServer +from swift.common.header_key_dict import HeaderKeyDict from swift.common.request_helpers import get_name_and_placement, \ is_user_meta, is_sys_or_user_meta from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPCreated, \ HTTPInternalServerError, HTTPNoContent, HTTPNotFound, \ HTTPPreconditionFailed, HTTPRequestTimeout, HTTPUnprocessableEntity, \ HTTPClientDisconnect, HTTPMethodNotAllowed, Request, Response, \ - HTTPInsufficientStorage, HTTPForbidden, HTTPException, HeaderKeyDict, \ - HTTPConflict, HTTPServerError + HTTPInsufficientStorage, HTTPForbidden, HTTPException, HTTPConflict, \ + HTTPServerError from swift.obj.diskfile import DATAFILE_SYSTEM_META, DiskFileRouter diff --git a/swift/proxy/controllers/base.py b/swift/proxy/controllers/base.py index fc36d9dfae..e25620c58e 100644 --- a/swift/proxy/controllers/base.py +++ b/swift/proxy/controllers/base.py @@ -47,11 +47,12 @@ from swift.common.utils import Timestamp, config_true_value, \ from swift.common.bufferedhttp import http_connect from swift.common.exceptions import ChunkReadTimeout, ChunkWriteTimeout, \ ConnectionTimeout, RangeAlreadyComplete +from swift.common.header_key_dict import HeaderKeyDict from swift.common.http import is_informational, is_success, is_redirection, \ is_server_error, HTTP_OK, HTTP_PARTIAL_CONTENT, HTTP_MULTIPLE_CHOICES, \ HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_SERVICE_UNAVAILABLE, \ HTTP_INSUFFICIENT_STORAGE, HTTP_UNAUTHORIZED, HTTP_CONTINUE -from swift.common.swob import Request, Response, HeaderKeyDict, Range, \ +from swift.common.swob import Request, Response, Range, \ HTTPException, HTTPRequestedRangeNotSatisfiable, HTTPServiceUnavailable, \ status_map from swift.common.request_helpers import strip_sys_meta_prefix, \ diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py index f3c13d589f..dea29eab3a 100644 --- a/swift/proxy/controllers/obj.py +++ b/swift/proxy/controllers/obj.py @@ -57,6 +57,7 @@ from swift.common.exceptions import ChunkReadTimeout, \ ChunkWriteTimeout, ConnectionTimeout, ResponseTimeout, \ InsufficientStorage, FooterNotSupported, MultiphasePUTNotSupported, \ PutterConnectError, ChunkReadError +from swift.common.header_key_dict import HeaderKeyDict from swift.common.http import ( is_informational, is_success, is_client_error, is_server_error, HTTP_CONTINUE, HTTP_CREATED, HTTP_MULTIPLE_CHOICES, @@ -69,7 +70,7 @@ from swift.proxy.controllers.base import Controller, delay_denial, \ cors_validation, ResumingGetter from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPNotFound, \ HTTPPreconditionFailed, HTTPRequestEntityTooLarge, HTTPRequestTimeout, \ - HTTPServerError, HTTPServiceUnavailable, Request, HeaderKeyDict, \ + HTTPServerError, HTTPServiceUnavailable, Request, \ HTTPClientDisconnect, HTTPUnprocessableEntity, Response, HTTPException, \ HTTPRequestedRangeNotSatisfiable, Range, HTTPInternalServerError from swift.common.request_helpers import is_sys_or_user_meta, is_sys_meta, \ diff --git a/test/unit/__init__.py b/test/unit/__init__.py index 9068d84307..377ee2ecc2 100644 --- a/test/unit/__init__.py +++ b/test/unit/__init__.py @@ -34,7 +34,8 @@ from tempfile import mkdtemp from shutil import rmtree from swift.common.utils import Timestamp, NOTICE from test import get_config -from swift.common import swob, utils +from swift.common import utils +from swift.common.header_key_dict import HeaderKeyDict from swift.common.ring import Ring, RingData from hashlib import md5 import logging.handlers @@ -901,7 +902,7 @@ def fake_http_connect(*code_iter, **kwargs): else: etag = '"68b329da9893e34099c7d8ad5cb9c940"' - headers = swob.HeaderKeyDict({ + headers = HeaderKeyDict({ 'content-length': len(self.body), 'content-type': 'x-application/test', 'x-timestamp': self.timestamp, @@ -960,7 +961,7 @@ def fake_http_connect(*code_iter, **kwargs): eventlet.sleep(value) def getheader(self, name, default=None): - return swob.HeaderKeyDict(self.getheaders()).get(name, default) + return HeaderKeyDict(self.getheaders()).get(name, default) def close(self): pass diff --git a/test/unit/account/test_utils.py b/test/unit/account/test_utils.py index 35467d0dab..f931ad58bd 100644 --- a/test/unit/account/test_utils.py +++ b/test/unit/account/test_utils.py @@ -20,7 +20,7 @@ import mock from swift.account import utils, backend from swift.common.storage_policy import POLICIES from swift.common.utils import Timestamp -from swift.common.swob import HeaderKeyDict +from swift.common.header_key_dict import HeaderKeyDict from test.unit import patch_policies diff --git a/test/unit/common/middleware/helpers.py b/test/unit/common/middleware/helpers.py index 1387a773b4..0847a1cbcf 100644 --- a/test/unit/common/middleware/helpers.py +++ b/test/unit/common/middleware/helpers.py @@ -19,6 +19,7 @@ from collections import defaultdict from copy import deepcopy from hashlib import md5 from swift.common import swob +from swift.common.header_key_dict import HeaderKeyDict from swift.common.utils import split_path from test.unit import FakeLogger, FakeRing @@ -85,18 +86,18 @@ class FakeSwift(object): try: resp_class, raw_headers, body = self._find_response(method, path) - headers = swob.HeaderKeyDict(raw_headers) + headers = HeaderKeyDict(raw_headers) except KeyError: if (env.get('QUERY_STRING') and (method, env['PATH_INFO']) in self._responses): resp_class, raw_headers, body = self._find_response( method, env['PATH_INFO']) - headers = swob.HeaderKeyDict(raw_headers) + headers = HeaderKeyDict(raw_headers) elif method == 'HEAD' and ('GET', path) in self._responses: resp_class, raw_headers, body = self._find_response( 'GET', path) body = None - headers = swob.HeaderKeyDict(raw_headers) + headers = HeaderKeyDict(raw_headers) elif method == 'GET' and obj and path in self.uploaded: resp_class = swob.HTTPOk headers, body = self.uploaded[path] diff --git a/test/unit/common/middleware/test_bulk.py b/test/unit/common/middleware/test_bulk.py index a024a94ff6..1888261629 100644 --- a/test/unit/common/middleware/test_bulk.py +++ b/test/unit/common/middleware/test_bulk.py @@ -29,9 +29,10 @@ from eventlet import sleep from mock import patch, call from test.unit.common.middleware.helpers import FakeSwift from swift.common import utils, constraints +from swift.common.header_key_dict import HeaderKeyDict from swift.common.middleware import bulk from swift.common.swob import Request, Response, HTTPException, \ - HTTPNoContent, HTTPCreated, HeaderKeyDict + HTTPNoContent, HTTPCreated from swift.common.http import HTTP_NOT_FOUND, HTTP_UNAUTHORIZED diff --git a/test/unit/common/middleware/test_dlo.py b/test/unit/common/middleware/test_dlo.py index 00d107ad33..1374b403df 100644 --- a/test/unit/common/middleware/test_dlo.py +++ b/test/unit/common/middleware/test_dlo.py @@ -24,6 +24,7 @@ import time import unittest from swift.common import exceptions, swob +from swift.common.header_key_dict import HeaderKeyDict from swift.common.middleware import dlo from swift.common.utils import closing_if_possible from test.unit.common.middleware.helpers import FakeSwift @@ -248,7 +249,7 @@ class TestDloHeadManifest(DloTestCase): req = swob.Request.blank('/v1/AUTH_test/mancon/manifest', environ={'REQUEST_METHOD': 'HEAD'}) status, headers, body = self.call_dlo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(headers["Etag"], expected_etag) self.assertEqual(headers["Content-Length"], "25") @@ -257,7 +258,7 @@ class TestDloHeadManifest(DloTestCase): environ={'REQUEST_METHOD': 'HEAD'}) with mock.patch(LIMIT, 3): status, headers, body = self.call_dlo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) # etag is manifest's etag self.assertEqual(headers["Etag"], "etag-manyseg") @@ -267,7 +268,7 @@ class TestDloHeadManifest(DloTestCase): req = swob.Request.blank('/v1/AUTH_test/mancon/manifest-no-segments', environ={'REQUEST_METHOD': 'HEAD'}) status, headers, body = self.call_dlo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(headers["Etag"], '"%s"' % md5hex("")) self.assertEqual(headers["Content-Length"], "0") @@ -291,7 +292,7 @@ class TestDloGetManifest(DloTestCase): req = swob.Request.blank('/v1/AUTH_test/mancon/manifest', environ={'REQUEST_METHOD': 'GET'}) status, headers, body = self.call_dlo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(headers["Etag"], expected_etag) self.assertEqual(headers["Content-Length"], "25") self.assertEqual(body, 'aaaaabbbbbcccccdddddeeeee') @@ -336,7 +337,7 @@ class TestDloGetManifest(DloTestCase): environ={'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'multipart-manifest=get'}) status, headers, body = self.call_dlo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(headers["Etag"], "manifest-etag") self.assertEqual(body, "manifest-contents") @@ -354,7 +355,7 @@ class TestDloGetManifest(DloTestCase): environ={'REQUEST_METHOD': 'GET'}, headers={'Range': 'bytes=8-17'}) status, headers, body = self.call_dlo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, "206 Partial Content") self.assertEqual(headers["Content-Length"], "10") self.assertEqual(body, "bbcccccddd") @@ -368,7 +369,7 @@ class TestDloGetManifest(DloTestCase): environ={'REQUEST_METHOD': 'GET'}, headers={'Range': 'bytes=10-19'}) status, headers, body = self.call_dlo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, "206 Partial Content") self.assertEqual(headers["Content-Length"], "10") self.assertEqual(body, "cccccddddd") @@ -378,7 +379,7 @@ class TestDloGetManifest(DloTestCase): environ={'REQUEST_METHOD': 'GET'}, headers={'Range': 'bytes=0-0'}) status, headers, body = self.call_dlo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, "206 Partial Content") self.assertEqual(headers["Content-Length"], "1") self.assertEqual(body, "a") @@ -388,7 +389,7 @@ class TestDloGetManifest(DloTestCase): environ={'REQUEST_METHOD': 'GET'}, headers={'Range': 'bytes=24-24'}) status, headers, body = self.call_dlo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, "206 Partial Content") self.assertEqual(headers["Content-Length"], "1") self.assertEqual(body, "e") @@ -398,7 +399,7 @@ class TestDloGetManifest(DloTestCase): environ={'REQUEST_METHOD': 'GET'}, headers={'Range': 'bytes=18-30'}) status, headers, body = self.call_dlo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, "206 Partial Content") self.assertEqual(headers["Content-Length"], "7") self.assertEqual(headers["Content-Range"], "bytes 18-24/25") @@ -417,7 +418,7 @@ class TestDloGetManifest(DloTestCase): headers={'Range': 'bytes=3-12'}) with mock.patch(LIMIT, 3): status, headers, body = self.call_dlo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, "206 Partial Content") self.assertEqual(headers["Content-Length"], "10") # The /15 here indicates that this is a 15-byte object. DLO can't tell @@ -448,7 +449,7 @@ class TestDloGetManifest(DloTestCase): headers={'Range': 'bytes=10-22'}) with mock.patch(LIMIT, 3): status, headers, body = self.call_dlo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, "200 OK") # this requires multiple pages of container listing, so we can't send # a Content-Length header @@ -460,7 +461,7 @@ class TestDloGetManifest(DloTestCase): environ={'REQUEST_METHOD': 'GET'}, headers={'Range': 'bytes=-40'}) status, headers, body = self.call_dlo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, "206 Partial Content") self.assertEqual(headers["Content-Length"], "25") self.assertEqual(body, "aaaaabbbbbcccccdddddeeeee") @@ -471,7 +472,7 @@ class TestDloGetManifest(DloTestCase): headers={'Range': 'bytes=-5'}) with mock.patch(LIMIT, 3): status, headers, body = self.call_dlo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, "200 OK") self.assertEqual(headers.get("Content-Length"), None) self.assertEqual(headers.get("Content-Range"), None) @@ -485,7 +486,7 @@ class TestDloGetManifest(DloTestCase): headers={'Range': 'bytes=5-9,15-19'}) with mock.patch(LIMIT, 3): status, headers, body = self.call_dlo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, "200 OK") self.assertEqual(headers.get("Content-Length"), None) self.assertEqual(headers.get("Content-Range"), None) @@ -500,7 +501,7 @@ class TestDloGetManifest(DloTestCase): headers={'If-Match': manifest_etag}) status, headers, body = self.call_dlo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '200 OK') self.assertEqual(headers['Content-Length'], '25') @@ -512,7 +513,7 @@ class TestDloGetManifest(DloTestCase): headers={'If-Match': 'not it'}) status, headers, body = self.call_dlo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '412 Precondition Failed') self.assertEqual(headers['Content-Length'], '0') @@ -527,7 +528,7 @@ class TestDloGetManifest(DloTestCase): headers={'If-None-Match': manifest_etag}) status, headers, body = self.call_dlo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '304 Not Modified') self.assertEqual(headers['Content-Length'], '0') @@ -539,7 +540,7 @@ class TestDloGetManifest(DloTestCase): headers={'If-None-Match': 'not it'}) status, headers, body = self.call_dlo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '200 OK') self.assertEqual(headers['Content-Length'], '25') @@ -582,7 +583,7 @@ class TestDloGetManifest(DloTestCase): req = swob.Request.blank('/v1/AUTH_test/mancon/manifest', environ={'REQUEST_METHOD': 'GET'}) status, headers, body, exc = self.call_dlo(req, expect_exception=True) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertTrue(isinstance(exc, exceptions.SegmentError)) self.assertEqual(status, "200 OK") @@ -628,7 +629,7 @@ class TestDloGetManifest(DloTestCase): req = swob.Request.blank('/v1/AUTH_test/mancon/manifest', environ={'REQUEST_METHOD': 'GET'}) status, headers, body, exc = self.call_dlo(req, expect_exception=True) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertTrue(isinstance(exc, exceptions.SegmentError)) self.assertEqual(status, "200 OK") @@ -653,7 +654,7 @@ class TestDloGetManifest(DloTestCase): req = swob.Request.blank('/v1/AUTH_test/mani/festo', environ={'REQUEST_METHOD': 'HEAD'}) status, headers, body = self.call_dlo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(headers["Etag"], '"' + hashlib.md5("abcdef").hexdigest() + '"') @@ -729,7 +730,7 @@ class TestDloGetManifest(DloTestCase): '/v1/AUTH_test/mancon/manifest', environ={'REQUEST_METHOD': 'GET'}) status, headers, body, exc = self.call_dlo(req, expect_exception=True) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '200 OK') # sanity check self.assertEqual(headers.get('Content-Length'), '25') # sanity check @@ -762,7 +763,7 @@ class TestDloGetManifest(DloTestCase): '/v1/AUTH_test/mancon/manifest', environ={'REQUEST_METHOD': 'GET'}) status, headers, body, exc = self.call_dlo(req, expect_exception=True) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '200 OK') # sanity check self.assertEqual(headers.get('Content-Length'), '25') # sanity check @@ -781,7 +782,7 @@ class TestDloGetManifest(DloTestCase): environ={'REQUEST_METHOD': 'GET'}, headers={'Range': 'bytes=0-14'}) status, headers, body, exc = self.call_dlo(req, expect_exception=True) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '206 Partial Content') # sanity check self.assertEqual(headers.get('Content-Length'), '15') # sanity check diff --git a/test/unit/common/middleware/test_slo.py b/test/unit/common/middleware/test_slo.py index c33860cb74..34024f1e47 100644 --- a/test/unit/common/middleware/test_slo.py +++ b/test/unit/common/middleware/test_slo.py @@ -24,6 +24,7 @@ from mock import patch from hashlib import md5 from swift.common import swob, utils from swift.common.exceptions import ListingIterError, SegmentError +from swift.common.header_key_dict import HeaderKeyDict from swift.common.middleware import slo from swift.common.swob import Request, Response, HTTPException from swift.common.utils import quote, closing_if_possible, close_if_possible @@ -1054,7 +1055,7 @@ class TestSloHeadManifest(SloTestCase): '/v1/AUTH_test/headtest/man', environ={'REQUEST_METHOD': 'HEAD'}) status, headers, body = self.call_slo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '200 OK') self.assertEqual(headers.get('Etag', '').strip("'\""), @@ -1331,7 +1332,7 @@ class TestSloGetManifest(SloTestCase): '/v1/AUTH_test/gettest/manifest-bc', environ={'REQUEST_METHOD': 'GET'}) status, headers, body = self.call_slo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) manifest_etag = md5hex(md5hex("b" * 10) + md5hex("c" * 15)) self.assertEqual(status, '200 OK') @@ -1382,7 +1383,7 @@ class TestSloGetManifest(SloTestCase): '/v1/AUTH_test/gettest/manifest-aabbccdd', environ={'REQUEST_METHOD': 'GET'}) status, headers, body = self.call_slo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '200 OK') self.assertEqual(body, ( @@ -1469,7 +1470,7 @@ class TestSloGetManifest(SloTestCase): environ={'REQUEST_METHOD': 'GET'}, headers={'If-None-Match': self.manifest_abcd_etag}) status, headers, body = self.call_slo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '304 Not Modified') self.assertEqual(headers['Content-Length'], '0') @@ -1481,7 +1482,7 @@ class TestSloGetManifest(SloTestCase): environ={'REQUEST_METHOD': 'GET'}, headers={'If-None-Match': "not-%s" % self.manifest_abcd_etag}) status, headers, body = self.call_slo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '200 OK') self.assertEqual( @@ -1493,7 +1494,7 @@ class TestSloGetManifest(SloTestCase): environ={'REQUEST_METHOD': 'GET'}, headers={'If-Match': self.manifest_abcd_etag}) status, headers, body = self.call_slo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '200 OK') self.assertEqual( @@ -1505,7 +1506,7 @@ class TestSloGetManifest(SloTestCase): environ={'REQUEST_METHOD': 'GET'}, headers={'If-Match': "not-%s" % self.manifest_abcd_etag}) status, headers, body = self.call_slo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '412 Precondition Failed') self.assertEqual(headers['Content-Length'], '0') @@ -1518,7 +1519,7 @@ class TestSloGetManifest(SloTestCase): headers={'If-Match': self.manifest_abcd_etag, 'Range': 'bytes=3-6'}) status, headers, body = self.call_slo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '206 Partial Content') self.assertEqual(headers['Content-Length'], '4') @@ -1529,7 +1530,7 @@ class TestSloGetManifest(SloTestCase): '/v1/AUTH_test/gettest/manifest-abcd', environ={'REQUEST_METHOD': 'GET'}) status, headers, body = self.call_slo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '200 OK') self.assertEqual(headers['Content-Length'], '50') @@ -1543,7 +1544,7 @@ class TestSloGetManifest(SloTestCase): environ={'REQUEST_METHOD': 'GET'}, headers={'Range': 'bytes=3-17'}) status, headers, body = self.call_slo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '206 Partial Content') self.assertEqual(headers['Content-Length'], '15') @@ -1582,7 +1583,7 @@ class TestSloGetManifest(SloTestCase): environ={'REQUEST_METHOD': 'GET'}, headers={'Range': 'bytes=0-999999999'}) status, headers, body = self.call_slo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '206 Partial Content') self.assertEqual( @@ -1619,7 +1620,7 @@ class TestSloGetManifest(SloTestCase): environ={'REQUEST_METHOD': 'GET'}, headers={'Range': 'bytes=100000-199999'}) status, headers, body = self.call_slo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '206 Partial Content') count_e = sum(1 if x == 'e' else 0 for x in body) @@ -1656,7 +1657,7 @@ class TestSloGetManifest(SloTestCase): environ={'REQUEST_METHOD': 'GET'}, headers={'Range': 'bytes=0-999999999'}) status, headers, body = self.call_slo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '206 Partial Content') self.assertEqual( @@ -1678,7 +1679,7 @@ class TestSloGetManifest(SloTestCase): environ={'REQUEST_METHOD': 'GET'}, headers={'Range': 'bytes=5-29'}) status, headers, body = self.call_slo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '206 Partial Content') self.assertEqual(headers['Content-Length'], '25') @@ -1706,7 +1707,7 @@ class TestSloGetManifest(SloTestCase): environ={'REQUEST_METHOD': 'GET'}, headers={'Range': 'bytes=0-0'}) status, headers, body = self.call_slo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '206 Partial Content') self.assertEqual(headers['Content-Length'], '1') @@ -1726,7 +1727,7 @@ class TestSloGetManifest(SloTestCase): environ={'REQUEST_METHOD': 'GET'}, headers={'Range': 'bytes=25-30'}) status, headers, body = self.call_slo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '206 Partial Content') self.assertEqual(headers['Content-Length'], '6') self.assertEqual(body, 'cccccd') @@ -1747,7 +1748,7 @@ class TestSloGetManifest(SloTestCase): environ={'REQUEST_METHOD': 'GET'}, headers={'Range': 'bytes=45-55'}) status, headers, body = self.call_slo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '206 Partial Content') self.assertEqual(headers['Content-Length'], '5') @@ -1769,7 +1770,7 @@ class TestSloGetManifest(SloTestCase): environ={'REQUEST_METHOD': 'GET'}, headers={'Range': 'bytes=0-0,2-2'}) status, headers, body = self.call_slo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '200 OK') self.assertEqual(headers['Content-Length'], '50') @@ -1799,7 +1800,7 @@ class TestSloGetManifest(SloTestCase): '/v1/AUTH_test/ünicode/manifest', environ={'REQUEST_METHOD': 'GET'}) status, headers, body = self.call_slo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '200 OK') self.assertEqual(body, segment_body) @@ -1808,7 +1809,7 @@ class TestSloGetManifest(SloTestCase): '/v1/AUTH_test/gettest/manifest-abcd-ranges', environ={'REQUEST_METHOD': 'GET'}) status, headers, body = self.call_slo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '200 OK') self.assertEqual(headers['Content-Length'], '32') @@ -1850,7 +1851,7 @@ class TestSloGetManifest(SloTestCase): '/v1/AUTH_test/gettest/manifest-abcd-subranges', environ={'REQUEST_METHOD': 'GET'}) status, headers, body = self.call_slo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '200 OK') self.assertEqual(headers['Content-Length'], '17') @@ -1899,7 +1900,7 @@ class TestSloGetManifest(SloTestCase): environ={'REQUEST_METHOD': 'GET'}, headers={'Range': 'bytes=7-26'}) status, headers, body = self.call_slo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '206 Partial Content') self.assertEqual(headers['Content-Length'], '20') @@ -1937,7 +1938,7 @@ class TestSloGetManifest(SloTestCase): environ={'REQUEST_METHOD': 'GET'}, headers={'Range': 'bytes=4-12'}) status, headers, body = self.call_slo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '206 Partial Content') self.assertEqual(headers['Content-Length'], '9') @@ -1988,7 +1989,7 @@ class TestSloGetManifest(SloTestCase): environ={'REQUEST_METHOD': 'GET'}, headers={'Range': 'bytes=0-999999999'}) status, headers, body = self.call_slo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '206 Partial Content') self.assertEqual(headers['Content-Length'], '32') @@ -2025,7 +2026,7 @@ class TestSloGetManifest(SloTestCase): environ={'REQUEST_METHOD': 'GET'}, headers={'Range': 'bytes=0-0,2-2'}) status, headers, body = self.call_slo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '200 OK') self.assertEqual(headers['Content-Type'], 'application/json') @@ -2039,7 +2040,7 @@ class TestSloGetManifest(SloTestCase): '/v1/AUTH_test/gettest/manifest-badjson', environ={'REQUEST_METHOD': 'GET'}) status, headers, body = self.call_slo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '200 OK') self.assertEqual(headers['Content-Length'], '0') @@ -2113,7 +2114,7 @@ class TestSloGetManifest(SloTestCase): '/v1/AUTH_test/gettest/manifest-abcd', environ={'REQUEST_METHOD': 'HEAD'}) status, headers, body = self.call_slo(req) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertEqual(status, '200 OK') self.assertEqual(headers['Content-Length'], '50') @@ -2171,7 +2172,7 @@ class TestSloGetManifest(SloTestCase): '/v1/AUTH_test/gettest/man1', environ={'REQUEST_METHOD': 'GET'}) status, headers, body, exc = self.call_slo(req, expect_exception=True) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertIsInstance(exc, ListingIterError) # we don't know at header-sending time that things are going to go @@ -2319,7 +2320,7 @@ class TestSloGetManifest(SloTestCase): '/v1/AUTH_test/gettest/manifest-abcd', environ={'REQUEST_METHOD': 'GET'}) status, headers, body, exc = self.call_slo(req, expect_exception=True) - headers = swob.HeaderKeyDict(headers) + headers = HeaderKeyDict(headers) self.assertIsInstance(exc, SegmentError) self.assertEqual(status, '200 OK') diff --git a/test/unit/common/middleware/test_tempurl.py b/test/unit/common/middleware/test_tempurl.py index ff06eb8510..d407ba58b1 100644 --- a/test/unit/common/middleware/test_tempurl.py +++ b/test/unit/common/middleware/test_tempurl.py @@ -35,7 +35,8 @@ from hashlib import sha1 from time import time from swift.common.middleware import tempauth, tempurl -from swift.common.swob import Request, Response, HeaderKeyDict +from swift.common.header_key_dict import HeaderKeyDict +from swift.common.swob import Request, Response from swift.common import utils diff --git a/test/unit/common/test_direct_client.py b/test/unit/common/test_direct_client.py index c7fd0a9588..664a6227b1 100644 --- a/test/unit/common/test_direct_client.py +++ b/test/unit/common/test_direct_client.py @@ -26,8 +26,9 @@ from six.moves import urllib from swift.common import direct_client from swift.common.exceptions import ClientException +from swift.common.header_key_dict import HeaderKeyDict from swift.common.utils import Timestamp -from swift.common.swob import HeaderKeyDict, RESPONSE_REASONS +from swift.common.swob import RESPONSE_REASONS from swift.common.storage_policy import POLICIES from six.moves.http_client import HTTPException diff --git a/test/unit/common/test_header_key_dict.py b/test/unit/common/test_header_key_dict.py new file mode 100644 index 0000000000..5f3f669704 --- /dev/null +++ b/test/unit/common/test_header_key_dict.py @@ -0,0 +1,75 @@ +# Copyright (c) 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +from swift.common.header_key_dict import HeaderKeyDict + + +class TestHeaderKeyDict(unittest.TestCase): + def test_case_insensitive(self): + headers = HeaderKeyDict() + headers['Content-Length'] = 0 + headers['CONTENT-LENGTH'] = 10 + headers['content-length'] = 20 + self.assertEqual(headers['Content-Length'], '20') + self.assertEqual(headers['content-length'], '20') + self.assertEqual(headers['CONTENT-LENGTH'], '20') + + def test_setdefault(self): + headers = HeaderKeyDict() + + # it gets set + headers.setdefault('x-rubber-ducky', 'the one') + self.assertEqual(headers['X-Rubber-Ducky'], 'the one') + + # it has the right return value + ret = headers.setdefault('x-boat', 'dinghy') + self.assertEqual(ret, 'dinghy') + + ret = headers.setdefault('x-boat', 'yacht') + self.assertEqual(ret, 'dinghy') + + # shouldn't crash + headers.setdefault('x-sir-not-appearing-in-this-request', None) + + def test_del_contains(self): + headers = HeaderKeyDict() + headers['Content-Length'] = 0 + self.assertTrue('Content-Length' in headers) + del headers['Content-Length'] + self.assertTrue('Content-Length' not in headers) + + def test_update(self): + headers = HeaderKeyDict() + headers.update({'Content-Length': '0'}) + headers.update([('Content-Type', 'text/plain')]) + self.assertEqual(headers['Content-Length'], '0') + self.assertEqual(headers['Content-Type'], 'text/plain') + + def test_get(self): + headers = HeaderKeyDict() + headers['content-length'] = 20 + self.assertEqual(headers.get('CONTENT-LENGTH'), '20') + self.assertEqual(headers.get('something-else'), None) + self.assertEqual(headers.get('something-else', True), True) + + def test_keys(self): + headers = HeaderKeyDict() + headers['content-length'] = 20 + headers['cOnTent-tYpe'] = 'text/plain' + headers['SomeThing-eLse'] = 'somevalue' + self.assertEqual( + set(headers.keys()), + set(('Content-Length', 'Content-Type', 'Something-Else'))) diff --git a/test/unit/common/test_internal_client.py b/test/unit/common/test_internal_client.py index 0900da42e3..834206e55b 100644 --- a/test/unit/common/test_internal_client.py +++ b/test/unit/common/test_internal_client.py @@ -27,6 +27,7 @@ from six.moves.urllib.parse import quote from test.unit import FakeLogger from eventlet.green import urllib2 from swift.common import exceptions, internal_client, swob +from swift.common.header_key_dict import HeaderKeyDict from swift.common.storage_policy import StoragePolicy from test.unit import with_tempdir, write_fake_ring, patch_policies @@ -1027,7 +1028,7 @@ class TestInternalClient(unittest.TestCase): 'user-agent': 'test', # from InternalClient.make_request }) self.assertEqual(app.calls_with_headers, [( - 'GET', path_info, swob.HeaderKeyDict(req_headers))]) + 'GET', path_info, HeaderKeyDict(req_headers))]) def test_iter_object_lines(self): class InternalClient(internal_client.InternalClient): diff --git a/test/unit/common/test_swob.py b/test/unit/common/test_swob.py index c1900ec5e4..fede30785d 100644 --- a/test/unit/common/test_swob.py +++ b/test/unit/common/test_swob.py @@ -98,64 +98,6 @@ class TestHeaderEnvironProxy(unittest.TestCase): set(('Content-Length', 'Content-Type', 'Something-Else'))) -class TestHeaderKeyDict(unittest.TestCase): - def test_case_insensitive(self): - headers = swift.common.swob.HeaderKeyDict() - headers['Content-Length'] = 0 - headers['CONTENT-LENGTH'] = 10 - headers['content-length'] = 20 - self.assertEqual(headers['Content-Length'], '20') - self.assertEqual(headers['content-length'], '20') - self.assertEqual(headers['CONTENT-LENGTH'], '20') - - def test_setdefault(self): - headers = swift.common.swob.HeaderKeyDict() - - # it gets set - headers.setdefault('x-rubber-ducky', 'the one') - self.assertEqual(headers['X-Rubber-Ducky'], 'the one') - - # it has the right return value - ret = headers.setdefault('x-boat', 'dinghy') - self.assertEqual(ret, 'dinghy') - - ret = headers.setdefault('x-boat', 'yacht') - self.assertEqual(ret, 'dinghy') - - # shouldn't crash - headers.setdefault('x-sir-not-appearing-in-this-request', None) - - def test_del_contains(self): - headers = swift.common.swob.HeaderKeyDict() - headers['Content-Length'] = 0 - self.assertTrue('Content-Length' in headers) - del headers['Content-Length'] - self.assertTrue('Content-Length' not in headers) - - def test_update(self): - headers = swift.common.swob.HeaderKeyDict() - headers.update({'Content-Length': '0'}) - headers.update([('Content-Type', 'text/plain')]) - self.assertEqual(headers['Content-Length'], '0') - self.assertEqual(headers['Content-Type'], 'text/plain') - - def test_get(self): - headers = swift.common.swob.HeaderKeyDict() - headers['content-length'] = 20 - self.assertEqual(headers.get('CONTENT-LENGTH'), '20') - self.assertEqual(headers.get('something-else'), None) - self.assertEqual(headers.get('something-else', True), True) - - def test_keys(self): - headers = swift.common.swob.HeaderKeyDict() - headers['content-length'] = 20 - headers['cOnTent-tYpe'] = 'text/plain' - headers['SomeThing-eLse'] = 'somevalue' - self.assertEqual( - set(headers.keys()), - set(('Content-Length', 'Content-Type', 'Something-Else'))) - - class TestRange(unittest.TestCase): def test_range(self): swob_range = swift.common.swob.Range('bytes=1-7') diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index 63c746de51..3ebc8f6dc4 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -60,7 +60,8 @@ from swift.common.exceptions import Timeout, MessageTimeout, \ MimeInvalid, ThreadPoolDead from swift.common import utils from swift.common.container_sync_realms import ContainerSyncRealms -from swift.common.swob import Request, Response, HeaderKeyDict +from swift.common.header_key_dict import HeaderKeyDict +from swift.common.swob import Request, Response from test.unit import FakeLogger threading = eventlet.patcher.original('threading') diff --git a/test/unit/container/test_reconciler.py b/test/unit/container/test_reconciler.py index 4a00e72f2e..0e1346273d 100644 --- a/test/unit/container/test_reconciler.py +++ b/test/unit/container/test_reconciler.py @@ -31,6 +31,7 @@ from swift.container import reconciler from swift.container.server import gen_resp_headers from swift.common.direct_client import ClientException from swift.common import swob +from swift.common.header_key_dict import HeaderKeyDict from swift.common.utils import split_path, Timestamp, encode_timestamps from test.unit import debug_logger, FakeRing, fake_http_connect @@ -43,7 +44,7 @@ def timestamp_to_last_modified(timestamp): def container_resp_headers(**kwargs): - return swob.HeaderKeyDict(gen_resp_headers(kwargs)) + return HeaderKeyDict(gen_resp_headers(kwargs)) class FakeStoragePolicySwift(object): diff --git a/test/unit/container/test_server.py b/test/unit/container/test_server.py index 0205bca3bf..fd0e5a5633 100644 --- a/test/unit/container/test_server.py +++ b/test/unit/container/test_server.py @@ -34,8 +34,8 @@ from six import BytesIO from six import StringIO from swift import __version__ as swift_version -from swift.common.swob import (Request, HeaderKeyDict, - WsgiBytesIO, HTTPNoContent) +from swift.common.header_key_dict import HeaderKeyDict +from swift.common.swob import (Request, WsgiBytesIO, HTTPNoContent) import swift.container from swift.container import server as container_server from swift.common import constraints diff --git a/test/unit/obj/test_reconstructor.py b/test/unit/obj/test_reconstructor.py index a093a80213..19fc1f8bba 100755 --- a/test/unit/obj/test_reconstructor.py +++ b/test/unit/obj/test_reconstructor.py @@ -30,8 +30,8 @@ from contextlib import closing, contextmanager from gzip import GzipFile from shutil import rmtree from swift.common import utils -from swift.common.swob import HeaderKeyDict from swift.common.exceptions import DiskFileError +from swift.common.header_key_dict import HeaderKeyDict from swift.obj import diskfile, reconstructor as object_reconstructor from swift.common import ring from swift.common.storage_policy import (StoragePolicy, ECStoragePolicy, diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py index 40c37ee39c..a4de8d7a35 100755 --- a/test/unit/obj/test_server.py +++ b/test/unit/obj/test_server.py @@ -50,11 +50,12 @@ from test.unit import connect_tcp, readuntil2crlfs, patch_policies from swift.obj import server as object_server from swift.obj import diskfile from swift.common import utils, bufferedhttp +from swift.common.header_key_dict import HeaderKeyDict from swift.common.utils import hash_path, mkdirs, normalize_timestamp, \ NullLogger, storage_directory, public, replication, encode_timestamps, \ Timestamp from swift.common import constraints -from swift.common.swob import Request, HeaderKeyDict, WsgiBytesIO +from swift.common.swob import Request, WsgiBytesIO from swift.common.splice import splice from swift.common.storage_policy import (StoragePolicy, ECStoragePolicy, POLICIES, EC_POLICY) diff --git a/test/unit/obj/test_updater.py b/test/unit/obj/test_updater.py index 79cfd7a695..1990e4a6b3 100644 --- a/test/unit/obj/test_updater.py +++ b/test/unit/obj/test_updater.py @@ -35,9 +35,9 @@ from swift.obj.diskfile import (ASYNCDIR_BASE, get_async_dir, DiskFileManager, get_tmp_dir) from swift.common.ring import RingData from swift.common import utils +from swift.common.header_key_dict import HeaderKeyDict from swift.common.utils import hash_path, normalize_timestamp, mkdirs, \ write_pickle -from swift.common import swob from test.unit import debug_logger, patch_policies, mocked_http_conn from swift.common.storage_policy import StoragePolicy, POLICIES @@ -316,7 +316,7 @@ class TestObjectUpdater(unittest.TestCase): out.flush() self.assertEqual(inc.readline(), 'PUT /sda1/0/a/c/o HTTP/1.1\r\n') - headers = swob.HeaderKeyDict() + headers = HeaderKeyDict() line = inc.readline() while line and line != '\r\n': headers[line.split(':')[0]] = \ @@ -404,7 +404,7 @@ class TestObjectUpdater(unittest.TestCase): daemon = object_updater.ObjectUpdater(conf, logger=self.logger) dfmanager = DiskFileManager(conf, daemon.logger) # don't include storage-policy-index in headers_out pickle - headers_out = swob.HeaderKeyDict({ + headers_out = HeaderKeyDict({ 'x-size': 0, 'x-content-type': 'text/plain', 'x-etag': 'd41d8cd98f00b204e9800998ecf8427e', @@ -452,7 +452,7 @@ class TestObjectUpdater(unittest.TestCase): dfmanager = DiskFileManager(conf, daemon.logger) account, container, obj = 'a', 'c', 'o' op = 'PUT' - headers_out = swob.HeaderKeyDict({ + headers_out = HeaderKeyDict({ 'x-size': 0, 'x-content-type': 'text/plain', 'x-etag': 'd41d8cd98f00b204e9800998ecf8427e', diff --git a/test/unit/proxy/controllers/test_base.py b/test/unit/proxy/controllers/test_base.py index 295be11cee..4bc8991d04 100644 --- a/test/unit/proxy/controllers/test_base.py +++ b/test/unit/proxy/controllers/test_base.py @@ -23,10 +23,10 @@ from swift.proxy.controllers.base import headers_to_container_info, \ get_object_env_key, get_info, get_object_info, \ Controller, GetOrHeadHandler, _set_info_cache, _set_object_info_cache, \ bytes_to_skip -from swift.common.swob import Request, HTTPException, HeaderKeyDict, \ - RESPONSE_REASONS +from swift.common.swob import Request, HTTPException, RESPONSE_REASONS from swift.common import exceptions from swift.common.utils import split_path +from swift.common.header_key_dict import HeaderKeyDict from swift.common.http import is_success from swift.common.storage_policy import StoragePolicy from test.unit import fake_http_connect, FakeRing, FakeMemcache diff --git a/test/unit/proxy/controllers/test_obj.py b/test/unit/proxy/controllers/test_obj.py index d0fcf96fd3..08a0be9e98 100755 --- a/test/unit/proxy/controllers/test_obj.py +++ b/test/unit/proxy/controllers/test_obj.py @@ -31,6 +31,7 @@ from six.moves import range import swift from swift.common import utils, swob, exceptions +from swift.common.header_key_dict import HeaderKeyDict from swift.proxy import server as proxy_server from swift.proxy.controllers import obj from swift.proxy.controllers.base import get_info as _real_get_info @@ -1074,7 +1075,7 @@ class StubResponse(object): self.status = status self.body = body self.readable = BytesIO(body) - self.headers = swob.HeaderKeyDict(headers) + self.headers = HeaderKeyDict(headers) fake_reason = ('Fake', 'This response is a lie.') self.reason = swob.RESPONSE_REASONS.get(status, fake_reason)[0] diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 5f9d03094d..8aba81ffb1 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -74,8 +74,9 @@ from swift.proxy.controllers.base import get_container_memcache_key, \ get_account_memcache_key, cors_validation, _get_info_cache import swift.proxy.controllers import swift.proxy.controllers.obj +from swift.common.header_key_dict import HeaderKeyDict from swift.common.swob import Request, Response, HTTPUnauthorized, \ - HTTPException, HeaderKeyDict, HTTPBadRequest + HTTPException, HTTPBadRequest from swift.common import storage_policy from swift.common.storage_policy import StoragePolicy, ECStoragePolicy, \ StoragePolicyCollection, POLICIES From 82c8023b894aa7a1fe12fbf1efbfad47afdf86b3 Mon Sep 17 00:00:00 2001 From: Mahati Chamarthy Date: Tue, 8 Mar 2016 19:01:53 +0530 Subject: [PATCH 020/141] remove hash_cleanup_listdir legacy translation method Change-Id: I0b96dfde32b4c666eebda6e88228516dd693ef92 closes-bug:#1550569 --- swift/obj/diskfile.py | 22 +++---------- test/unit/obj/test_diskfile.py | 58 +++++++++++++++++----------------- 2 files changed, 33 insertions(+), 47 deletions(-) diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py index 3a8b41bb54..23b94aa2fb 100644 --- a/swift/obj/diskfile.py +++ b/swift/obj/diskfile.py @@ -751,20 +751,6 @@ class BaseDiskFileManager(object): results['files'] = files return results - def hash_cleanup_listdir(self, hsh_path, reclaim_age=ONE_WEEK): - """ - List contents of a hash directory and clean up any old files. - For EC policy, delete files older than a .durable or .ts file. - - :param hsh_path: object hash path - :param reclaim_age: age in seconds at which to remove tombstones - :returns: list of files remaining in the directory, reverse sorted - """ - # maintain compatibility with 'legacy' hash_cleanup_listdir - # return value - return self.cleanup_ondisk_files( - hsh_path, reclaim_age=reclaim_age)['files'] - def _update_suffix_hashes(self, hashes, ondisk_info): """ Applies policy specific updates to the given dict of md5 hashes for @@ -1065,8 +1051,8 @@ class BaseDiskFileManager(object): dev_path, get_data_dir(policy), str(partition), object_hash[-3:], object_hash) try: - filenames = self.hash_cleanup_listdir(object_path, - self.reclaim_age) + filenames = self.cleanup_ondisk_files(object_path, + self.reclaim_age)['files'] except OSError as err: if err.errno == errno.ENOTDIR: quar_path = self.quarantine_renamer(dev_path, object_path) @@ -1322,7 +1308,7 @@ class BaseDiskFileWriter(object): self._put_succeeded = True if cleanup: try: - self.manager.hash_cleanup_listdir(self._datadir) + self.manager.cleanup_ondisk_files(self._datadir)['files'] except OSError: logging.exception(_('Problem cleaning up %s'), self._datadir) @@ -2411,7 +2397,7 @@ class ECDiskFileWriter(BaseDiskFileWriter): exc = DiskFileNoSpace(str(err)) else: try: - self.manager.hash_cleanup_listdir(self._datadir) + self.manager.cleanup_ondisk_files(self._datadir)['files'] except OSError as os_err: self.manager.logger.exception( _('Problem cleaning up %s (%s)') % diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py index 657a29ed53..9829eb868e 100644 --- a/test/unit/obj/test_diskfile.py +++ b/test/unit/obj/test_diskfile.py @@ -569,13 +569,13 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): expected_after_cleanup = set([f[0] for f in test if (f[2] if len(f) > 2 else f[1])]) if reclaim_age: - class_under_test.hash_cleanup_listdir( - hashdir, reclaim_age=reclaim_age) + class_under_test.cleanup_ondisk_files( + hashdir, reclaim_age=reclaim_age)['files'] else: with mock.patch('swift.obj.diskfile.time') as mock_time: # don't reclaim anything mock_time.time.return_value = 0.0 - class_under_test.hash_cleanup_listdir(hashdir) + class_under_test.cleanup_ondisk_files(hashdir)['files'] after_cleanup = set(os.listdir(hashdir)) errmsg = "expected %r, got %r for test %r" % ( sorted(expected_after_cleanup), sorted(after_cleanup), test @@ -746,9 +746,9 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): self.df_mgr.get_dev_path = mock.MagicMock(return_value=None) with mock.patch(self._manager_mock('diskfile_cls')), \ mock.patch(self._manager_mock( - 'hash_cleanup_listdir')) as hclistdir, \ + 'cleanup_ondisk_files')) as hclistdir, \ mock.patch('swift.obj.diskfile.read_metadata') as readmeta: - hclistdir.return_value = ['1381679759.90941.data'] + hclistdir.return_value = {'files': ['1381679759.90941.data']} readmeta.return_value = {'name': '/a/c/o'} self.assertRaises( DiskFileDeviceUnavailable, @@ -759,7 +759,7 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/') with mock.patch(self._manager_mock('diskfile_cls')), \ mock.patch(self._manager_mock( - 'hash_cleanup_listdir')) as hclistdir, \ + 'cleanup_ondisk_files')) as hclistdir, \ mock.patch('swift.obj.diskfile.read_metadata') as readmeta, \ mock.patch(self._manager_mock( 'quarantine_renamer')) as quarantine_renamer: @@ -779,7 +779,7 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/') with mock.patch(self._manager_mock('diskfile_cls')), \ mock.patch(self._manager_mock( - 'hash_cleanup_listdir')) as hclistdir, \ + 'cleanup_ondisk_files')) as hclistdir, \ mock.patch('swift.obj.diskfile.read_metadata') as readmeta: osexc = OSError() osexc.errno = errno.ENOENT @@ -794,7 +794,7 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/') with mock.patch(self._manager_mock('diskfile_cls')), \ mock.patch(self._manager_mock( - 'hash_cleanup_listdir')) as hclistdir, \ + 'cleanup_ondisk_files')) as hclistdir, \ mock.patch('swift.obj.diskfile.read_metadata') as readmeta: osexc = OSError() hclistdir.side_effect = osexc @@ -808,9 +808,9 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/') with mock.patch(self._manager_mock('diskfile_cls')), \ mock.patch(self._manager_mock( - 'hash_cleanup_listdir')) as hclistdir, \ + 'cleanup_ondisk_files')) as hclistdir, \ mock.patch('swift.obj.diskfile.read_metadata') as readmeta: - hclistdir.return_value = [] + hclistdir.return_value = {'files': []} readmeta.return_value = {'name': '/a/c/o'} self.assertRaises( DiskFileNotExist, @@ -821,9 +821,9 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/') with mock.patch(self._manager_mock('diskfile_cls')), \ mock.patch(self._manager_mock( - 'hash_cleanup_listdir')) as hclistdir, \ + 'cleanup_ondisk_files')) as hclistdir, \ mock.patch('swift.obj.diskfile.read_metadata') as readmeta: - hclistdir.return_value = ['1381679759.90941.data'] + hclistdir.return_value = {'files': ['1381679759.90941.data']} readmeta.side_effect = EOFError() self.assertRaises( DiskFileNotExist, @@ -834,9 +834,9 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/') with mock.patch(self._manager_mock('diskfile_cls')), \ mock.patch(self._manager_mock( - 'hash_cleanup_listdir')) as hclistdir, \ + 'cleanup_ondisk_files')) as hclistdir, \ mock.patch('swift.obj.diskfile.read_metadata') as readmeta: - hclistdir.return_value = ['1381679759.90941.data'] + hclistdir.return_value = {'files': ['1381679759.90941.data']} readmeta.return_value = {} try: self.df_mgr.get_diskfile_from_hash( @@ -850,9 +850,9 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/') with mock.patch(self._manager_mock('diskfile_cls')), \ mock.patch(self._manager_mock( - 'hash_cleanup_listdir')) as hclistdir, \ + 'cleanup_ondisk_files')) as hclistdir, \ mock.patch('swift.obj.diskfile.read_metadata') as readmeta: - hclistdir.return_value = ['1381679759.90941.data'] + hclistdir.return_value = {'files': ['1381679759.90941.data']} readmeta.return_value = {'name': 'bad'} try: self.df_mgr.get_diskfile_from_hash( @@ -866,9 +866,9 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/') with mock.patch(self._manager_mock('diskfile_cls')) as dfclass, \ mock.patch(self._manager_mock( - 'hash_cleanup_listdir')) as hclistdir, \ + 'cleanup_ondisk_files')) as hclistdir, \ mock.patch('swift.obj.diskfile.read_metadata') as readmeta: - hclistdir.return_value = ['1381679759.90941.data'] + hclistdir.return_value = {'files': ['1381679759.90941.data']} readmeta.return_value = {'name': '/a/c/o'} self.df_mgr.get_diskfile_from_hash( 'dev', '9', '9a7175077c01a23ade5956b8a2bba900', POLICIES[0]) @@ -3091,7 +3091,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): } writer.put(metadata) with mock.patch(self._manager_mock( - 'hash_cleanup_listdir', df), mock_hcl): + 'cleanup_ondisk_files', df), mock_hcl): writer.commit(timestamp) expected = { EC_POLICY: 1, @@ -3125,11 +3125,11 @@ class DiskFileMixin(BaseDiskFileTestMixin): 'Content-Length': '0', } with mock.patch(self._manager_mock( - 'hash_cleanup_listdir', df)) as mock_hcl: + 'cleanup_ondisk_files', df)) as mock_hcl: writer.put(metadata) self.assertEqual(expected[0], mock_hcl.call_count) with mock.patch(self._manager_mock( - 'hash_cleanup_listdir', df)) as mock_hcl: + 'cleanup_ondisk_files', df)) as mock_hcl: writer.commit(timestamp) self.assertEqual(expected[1], mock_hcl.call_count) @@ -3151,15 +3151,15 @@ class DiskFileMixin(BaseDiskFileTestMixin): 'Content-Length': '0', } with mock.patch(self._manager_mock( - 'hash_cleanup_listdir', df)) as mock_hcl: + 'cleanup_ondisk_files', df)) as mock_hcl: writer.put(metadata) self.assertEqual(expected[0], mock_hcl.call_count) with mock.patch(self._manager_mock( - 'hash_cleanup_listdir', df)) as mock_hcl: + 'cleanup_ondisk_files', df)) as mock_hcl: writer.commit(timestamp) self.assertEqual(expected[1], mock_hcl.call_count) with mock.patch(self._manager_mock( - 'hash_cleanup_listdir', df)) as mock_hcl: + 'cleanup_ondisk_files', df)) as mock_hcl: timestamp = Timestamp(time()) df.delete(timestamp) self.assertEqual(expected[2], mock_hcl.call_count) @@ -3594,7 +3594,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): df = self._get_open_disk_file() file_count = len(os.listdir(df._datadir)) ts = time() - with mock.patch(self._manager_mock('hash_cleanup_listdir'), mock_hcl): + with mock.patch(self._manager_mock('cleanup_ondisk_files'), mock_hcl): try: df.delete(ts) except OSError: @@ -4506,9 +4506,9 @@ class TestSuffixHashes(unittest.TestCase): if isinstance(output_files, Exception): path = os.path.join(self.testdir, 'does-not-matter') self.assertRaises(output_files.__class__, - df_mgr.hash_cleanup_listdir, path) + df_mgr.cleanup_ondisk_files, path) return - files = df_mgr.hash_cleanup_listdir('/whatever') + files = df_mgr.cleanup_ondisk_files('/whatever')['files'] self.assertEqual(files, output_files) # hash_cleanup_listdir tests - behaviors @@ -4721,7 +4721,7 @@ class TestSuffixHashes(unittest.TestCase): df_mgr = self.df_router[policy] # common.utils.listdir *completely* mutes ENOENT path = os.path.join(self.testdir, 'does-not-exist') - self.assertEqual(df_mgr.hash_cleanup_listdir(path), []) + self.assertEqual(df_mgr.cleanup_ondisk_files(path)['files'], []) def test_hash_cleanup_listdir_hsh_path_other_oserror(self): for policy in self.iter_policies(): @@ -4730,7 +4730,7 @@ class TestSuffixHashes(unittest.TestCase): mock_listdir.side_effect = OSError('kaboom!') # but it will raise other OSErrors path = os.path.join(self.testdir, 'does-not-matter') - self.assertRaises(OSError, df_mgr.hash_cleanup_listdir, + self.assertRaises(OSError, df_mgr.cleanup_ondisk_files, path) def test_hash_cleanup_listdir_reclaim_tombstone_remove_file_error(self): From 9db7391e55e069d82f780c4372ffa32ef4e79c35 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Mon, 7 Mar 2016 18:42:01 +0000 Subject: [PATCH 021/141] Object POST update gets distinct async pending file Each object update to a container server is saved in a pending file if the initial update attempt fails. Pending file names were derived from the update request's x-timestamp, which is equal to the object's data file timestamp. This meant that updates due to an object POST used the same async pending file as updates due to the object's PUT. This is not so bad because the object POST update has a superset of the metadata included in the PUT update. But there is a risk of a race condition causing an update to be lost: the updater may open an update file due to a PUT whuile the object server is writing an update due to a POST to the same file name. The updater could then unlink the file before the more recent update for the POST is sent. This patch changes the POST update pending file name to be derived from the object's metadata timestamp, thus making it distinct from the PUT update pending file name. There is no upgrade impact since existing pending files will continue to be processed. Change-Id: I1b093c837efe8c2a64e92075ebd5e1b93e30efb9 --- swift/obj/server.py | 4 +- test/unit/obj/test_server.py | 145 +++++++++++++++++++++++++++++++++++ 2 files changed, 148 insertions(+), 1 deletion(-) diff --git a/swift/obj/server.py b/swift/obj/server.py index ac3c7f39e5..e1d047d265 100644 --- a/swift/obj/server.py +++ b/swift/obj/server.py @@ -250,7 +250,8 @@ class ObjectController(BaseStorageServer): {'ip': ip, 'port': port, 'dev': contdevice}) data = {'op': op, 'account': account, 'container': container, 'obj': obj, 'headers': headers_out} - timestamp = headers_out['x-timestamp'] + timestamp = headers_out.get('x-meta-timestamp', + headers_out.get('x-timestamp')) self._diskfile_router[policy].pickle_async_update( objdevice, account, container, obj, data, timestamp, policy) @@ -565,6 +566,7 @@ class ObjectController(BaseStorageServer): content_type_headers['Content-Type'] += (';swift_bytes=%s' % swift_bytes) + # object POST updates are PUT to the container server self.container_update( 'PUT', account, container, obj, request, HeaderKeyDict({ diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py index 40c37ee39c..bb296a07e6 100755 --- a/test/unit/obj/test_server.py +++ b/test/unit/obj/test_server.py @@ -48,6 +48,7 @@ from test.unit import FakeLogger, debug_logger, mocked_http_conn, \ make_timestamp_iter, DEFAULT_TEST_EC_TYPE from test.unit import connect_tcp, readuntil2crlfs, patch_policies from swift.obj import server as object_server +from swift.obj import updater from swift.obj import diskfile from swift.common import utils, bufferedhttp from swift.common.utils import hash_path, mkdirs, normalize_timestamp, \ @@ -697,6 +698,150 @@ class TestObjectController(unittest.TestCase): self._test_POST_container_updates( POLICIES[1], update_etag='override_etag') + def _test_PUT_then_POST_async_pendings(self, policy, update_etag=None): + # Test that PUT and POST requests result in distinct async pending + # files when sync container update fails. + def fake_http_connect(*args): + raise Exception('test') + + device_dir = os.path.join(self.testdir, 'sda1') + ts_iter = make_timestamp_iter() + t_put = ts_iter.next() + update_etag = update_etag or '098f6bcd4621d373cade4e832627b4f6' + + put_headers = { + 'X-Trans-Id': 'put_trans_id', + 'X-Timestamp': t_put.internal, + 'Content-Type': 'application/octet-stream;swift_bytes=123456789', + 'Content-Length': '4', + 'X-Backend-Storage-Policy-Index': int(policy), + 'X-Container-Host': 'chost:cport', + 'X-Container-Partition': 'cpartition', + 'X-Container-Device': 'cdevice'} + if policy.policy_type == EC_POLICY: + put_headers.update({ + 'X-Object-Sysmeta-Ec-Frag-Index': '2', + 'X-Backend-Container-Update-Override-Etag': update_etag, + 'X-Object-Sysmeta-Ec-Etag': update_etag}) + + req = Request.blank('/sda1/p/a/c/o', + environ={'REQUEST_METHOD': 'PUT'}, + headers=put_headers, body='test') + + with mock.patch('swift.obj.server.http_connect', fake_http_connect): + with mock.patch('swift.common.utils.HASH_PATH_PREFIX', ''): + resp = req.get_response(self.object_controller) + + self.assertEqual(resp.status_int, 201) + + async_pending_file_put = os.path.join( + device_dir, diskfile.get_async_dir(policy), 'a83', + '06fbf0b514e5199dfc4e00f42eb5ea83-%s' % t_put.internal) + self.assertTrue(os.path.isfile(async_pending_file_put), + 'Expected %s to be a file but it is not.' + % async_pending_file_put) + expected_put_headers = { + 'Referer': 'PUT http://localhost/sda1/p/a/c/o', + 'X-Trans-Id': 'put_trans_id', + 'X-Timestamp': t_put.internal, + 'X-Content-Type': 'application/octet-stream;swift_bytes=123456789', + 'X-Size': '4', + 'X-Etag': '098f6bcd4621d373cade4e832627b4f6', + 'User-Agent': 'object-server %s' % os.getpid(), + 'X-Backend-Storage-Policy-Index': '%d' % int(policy)} + if policy.policy_type == EC_POLICY: + expected_put_headers['X-Etag'] = update_etag + self.assertDictEqual( + pickle.load(open(async_pending_file_put)), + {'headers': expected_put_headers, + 'account': 'a', 'container': 'c', 'obj': 'o', 'op': 'PUT'}) + + # POST with newer metadata returns success and container update + # is expected + t_post = ts_iter.next() + post_headers = { + 'X-Trans-Id': 'post_trans_id', + 'X-Timestamp': t_post.internal, + 'Content-Type': 'application/other', + 'X-Backend-Storage-Policy-Index': int(policy), + 'X-Container-Host': 'chost:cport', + 'X-Container-Partition': 'cpartition', + 'X-Container-Device': 'cdevice'} + req = Request.blank('/sda1/p/a/c/o', + environ={'REQUEST_METHOD': 'POST'}, + headers=post_headers) + + with mock.patch('swift.obj.server.http_connect', fake_http_connect): + with mock.patch('swift.common.utils.HASH_PATH_PREFIX', ''): + resp = req.get_response(self.object_controller) + + self.assertEqual(resp.status_int, 202) + + self.maxDiff = None + # check async pending file for PUT is still intact + self.assertDictEqual( + pickle.load(open(async_pending_file_put)), + {'headers': expected_put_headers, + 'account': 'a', 'container': 'c', 'obj': 'o', 'op': 'PUT'}) + + # check distinct async pending file for POST + async_pending_file_post = os.path.join( + device_dir, diskfile.get_async_dir(policy), 'a83', + '06fbf0b514e5199dfc4e00f42eb5ea83-%s' % t_post.internal) + self.assertTrue(os.path.isfile(async_pending_file_post), + 'Expected %s to be a file but it is not.' + % async_pending_file_post) + expected_post_headers = { + 'Referer': 'POST http://localhost/sda1/p/a/c/o', + 'X-Trans-Id': 'post_trans_id', + 'X-Timestamp': t_put.internal, + 'X-Content-Type': 'application/other;swift_bytes=123456789', + 'X-Size': '4', + 'X-Etag': '098f6bcd4621d373cade4e832627b4f6', + 'User-Agent': 'object-server %s' % os.getpid(), + 'X-Backend-Storage-Policy-Index': '%d' % int(policy), + 'X-Meta-Timestamp': t_post.internal, + 'X-Content-Type-Timestamp': t_post.internal, + } + if policy.policy_type == EC_POLICY: + expected_post_headers['X-Etag'] = update_etag + self.assertDictEqual( + pickle.load(open(async_pending_file_post)), + {'headers': expected_post_headers, + 'account': 'a', 'container': 'c', 'obj': 'o', 'op': 'PUT'}) + + # verify that only the POST (most recent) async update gets sent by the + # object updater, and that both update files are deleted + with mock.patch( + 'swift.obj.updater.ObjectUpdater.object_update') as mock_update, \ + mock.patch('swift.obj.updater.dump_recon_cache'): + object_updater = updater.ObjectUpdater( + {'devices': self.testdir, + 'mount_check': 'false'}, logger=debug_logger()) + node = {'id': 1} + mock_ring = mock.MagicMock() + mock_ring.get_nodes.return_value = (99, [node]) + object_updater.container_ring = mock_ring + mock_update.return_value = ((True, 1)) + object_updater.run_once() + self.assertEqual(1, mock_update.call_count) + self.assertEqual((node, 99, 'PUT', '/a/c/o'), + mock_update.call_args_list[0][0][0:4]) + actual_headers = mock_update.call_args_list[0][0][4] + self.assertTrue( + actual_headers.pop('user-agent').startswith('object-updater')) + self.assertDictEqual(expected_post_headers, actual_headers) + self.assertFalse( + os.listdir(os.path.join( + device_dir, diskfile.get_async_dir(policy)))) + + def test_PUT_then_POST_async_updates_with_repl_policy(self): + self._test_PUT_then_POST_async_pendings(POLICIES[0]) + + def test_PUT_then_POST_async_updates_with_EC_policy(self): + self._test_PUT_then_POST_async_pendings( + POLICIES[1], update_etag='override_etag') + def test_POST_quarantine_zbyte(self): timestamp = normalize_timestamp(time()) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, From dc6db66d5e4423ee39e8bf423916c035e55732d0 Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Tue, 8 Mar 2016 12:32:39 -0800 Subject: [PATCH 022/141] Spot check more hashes for increase_part_power Just a touch of cleanup to the unittest to demonstrate the stability of the guarantee that objects will hash to the same nodes but have different parts. Change-Id: I4a24187755455366a6435816a138f6175ae713a4 --- test/unit/common/ring/test_builder.py | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/test/unit/common/ring/test_builder.py b/test/unit/common/ring/test_builder.py index 6213089f1a..454c6a130a 100644 --- a/test/unit/common/ring/test_builder.py +++ b/test/unit/common/ring/test_builder.py @@ -26,6 +26,7 @@ from math import ceil from tempfile import mkdtemp from shutil import rmtree import random +import uuid from six.moves import range @@ -2450,6 +2451,7 @@ class TestRingBuilder(unittest.TestCase): self.assertEqual(changed_parts, 0) self.assertEqual(removed_devs, 0) + old_ring = r rd = rb.get_ring() rd.save(ring_file) r = ring.Ring(ring_file) @@ -2468,19 +2470,26 @@ class TestRingBuilder(unittest.TestCase): self.assertEqual(dev, next_dev) # same for last_part moves - for part in range(0, len(replica), 2): + for part in range(0, rb.parts, 2): this_last_moved = rb._last_part_moves[part] next_last_moved = rb._last_part_moves[part + 1] self.assertEqual(this_last_moved, next_last_moved) - # Due to the increased partition power, the partition each object is - # assigned to has changed. If the old partition was X, it will now be - # either located in 2*X or 2*X+1 - self.assertTrue(new_part in [old_part * 2, old_part * 2 + 1]) + for i in range(100): + suffix = uuid.uuid4() + account = 'account_%s' % suffix + container = 'container_%s' % suffix + obj = 'obj_%s' % suffix + old_part, old_nodes = old_ring.get_nodes(account, container, obj) + new_part, new_nodes = r.get_nodes(account, container, obj) + # Due to the increased partition power, the partition each object + # is assigned to has changed. If the old partition was X, it will + # now be either located in 2*X or 2*X+1 + self.assertTrue(new_part in [old_part * 2, old_part * 2 + 1]) - # Importantly, we expect the objects to be placed on the same nodes - # after increasing the partition power - self.assertEqual(old_nodes, new_nodes) + # Importantly, we expect the objects to be placed on the same + # nodes after increasing the partition power + self.assertEqual(old_nodes, new_nodes) class TestGetRequiredOverload(unittest.TestCase): From 3eeff7c0fc79fab594e3bbb04be74d397c920253 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Tue, 8 Mar 2016 15:23:40 -0800 Subject: [PATCH 023/141] Fix misleading comment Change-Id: I2e0e671a08fb855bf53c57987c08a7eefca7078a --- swift/common/db.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/swift/common/db.py b/swift/common/db.py index cead803375..1ae1696440 100644 --- a/swift/common/db.py +++ b/swift/common/db.py @@ -45,7 +45,8 @@ DB_PREALLOCATION = False BROKER_TIMEOUT = 25 #: Pickle protocol to use PICKLE_PROTOCOL = 2 -#: Max number of pending entries +#: Max size of .pending file in bytes. When this is exceeded, the pending +# records will be merged. PENDING_CAP = 131072 From dd2fbcd4c75974859820f76c3aa78be445170202 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 9 Mar 2016 06:35:57 +0000 Subject: [PATCH 024/141] Imported Translations from Zanata For more information about this automatic import see: https://wiki.openstack.org/wiki/Translations/Infrastructure Change-Id: I9196deac606a3d073fb1c400cbee778cbbbd1c1e --- swift/locale/de/LC_MESSAGES/swift.po | 78 ++++++++++++++++++++++++++-- 1 file changed, 74 insertions(+), 4 deletions(-) diff --git a/swift/locale/de/LC_MESSAGES/swift.po b/swift/locale/de/LC_MESSAGES/swift.po index 1cd38fea4e..2fe5a5d2e0 100644 --- a/swift/locale/de/LC_MESSAGES/swift.po +++ b/swift/locale/de/LC_MESSAGES/swift.po @@ -11,13 +11,13 @@ # Monika Wolf , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev176\n" +"Project-Id-Version: swift 2.6.1.dev187\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-08 04:09+0000\n" +"POT-Creation-Date: 2016-03-09 04:27+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-07 06:04+0000\n" +"PO-Revision-Date: 2016-03-08 09:51+0000\n" "Last-Translator: Monika Wolf \n" "Language: de\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" @@ -55,6 +55,16 @@ msgstr "%(ip)s/%(device)s zurückgemeldet als ausgehängt" msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" +#, python-format +msgid "" +"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of %(device)d/" +"%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in %(time).2fs " +"(%(rate).2f/sec, %(remaining)s remaining)" +msgstr "" +"%(reconstructed)d/%(total)d (%(percentage).2f%%) Partitionen von %(device)d/" +"%(dtotal)d (%(dpercentage).2f%%) Geräten rekonstruiert in %(time).2fs " +"(%(rate).2f/sec, %(remaining)s verbleibend)" + #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " @@ -229,6 +239,11 @@ msgstr "Client beim Lesen getrennt" msgid "Client disconnected without sending enough data" msgstr "Client getrennt ohne dem Senden von genügend Daten" +msgid "Client disconnected without sending last chunk" +msgstr "" +"Die Verbindung zum Client wurde getrennt, bevor der letzte Chunk gesendet " +"wurde. " + #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" @@ -236,7 +251,6 @@ msgstr "" "Clientpfad %(client)s entspricht nicht dem in den Objektmetadaten " "gespeicherten Pfad %(meta)s" -#, fuzzy msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" @@ -304,6 +318,11 @@ msgstr "Fehler beim Downloaden von Daten: %s" msgid "Devices pass completed: %.02fs" msgstr "Gerätedurchgang abgeschlossen: %.02fs" +#, python-format +msgid "Directory %r does not map to a valid policy (%s)" +msgstr "" +"Das Verzeichnis %r kann keiner gültigen Richtlinie (%s) zugeordnet werden." + #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "FEHLER %(db_file)s: %(validate_sync_to_err)s" @@ -383,6 +402,10 @@ msgid "ERROR Exception causing client disconnect" msgstr "" "FEHLER Ausnahme, die zu einer Unterbrechung der Verbindung zum Client führt" +#, python-format +msgid "ERROR Exception transferring data to object servers %s" +msgstr "FEHLER: Ausnahme bei der Übertragung von Daten an die Ojektserver %s" + msgid "ERROR Failed to get my own IPs?" msgstr "FEHLER Eigene IPs konnten nicht abgerufen werden?" @@ -560,6 +583,12 @@ msgstr "Fehler beim Syncen der Partition" msgid "Error syncing with node: %s" msgstr "Fehler beim Synchronisieren mit Knoten: %s" +#, python-format +msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" +msgstr "" +"Fehler bei Versuch, erneuten Build zu erstellen für %(path)s policy#" +"%(policy)d frag#%(frag_index)s" + msgid "Error: An error occurred" msgstr "Fehler: Ein Fehler ist aufgetreten" @@ -579,6 +608,9 @@ msgstr "Ausnahme in Reaper-Loop für Konto der höchsten Ebene" msgid "Exception in top-level replication loop" msgstr "Ausnahme in Replizierungsloop der höchsten Ebene" +msgid "Exception in top-levelreconstruction loop" +msgstr "Ausnahme in Rekonstruktionsloop der höchsten Ebene" + #, python-format msgid "Exception while deleting container %s %s" msgstr "Ausnahme beim Löschen von Container %s %s" @@ -682,10 +714,18 @@ msgstr "Keine Richtlinie mit Index %s" msgid "No realm key for %r" msgstr "Kein Bereichsschlüssel für %r" +#, python-format +msgid "No space left on device for %s (%s)" +msgstr "Kein freier Speicherplatz im Gerät für %s (%s) vorhanden." + #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Knotenfehler begrenzt %(ip)s:%(port)s (%(device)s)" +#, python-format +msgid "Not enough object servers ack'ed (got %d)" +msgstr "Es wurden nicht genügend Objektserver bestätigt (got %d)." + #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " @@ -802,6 +842,10 @@ msgstr "Pfad in X-Container-Sync-To ist erforderlich" msgid "Problem cleaning up %s" msgstr "Problem bei der Bereinigung von %s" +#, python-format +msgid "Problem cleaning up %s (%s)" +msgstr "Problem bei der Bereinigung von %s (%s)" + #, python-format msgid "Profiling Error: %s" msgstr "Fehler bei der Profilerstellung: %s" @@ -961,10 +1005,18 @@ msgstr "" msgid "Timeout %(action)s to memcached: %(server)s" msgstr "Zeitlimit %(action)s für memcached: %(server)s" +#, python-format +msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" +msgstr "Zeitüberschreitungsausnahme bei %(ip)s:%(port)s/%(device)s" + #, python-format msgid "Trying to %(method)s %(path)s" msgstr "Versuch, %(method)s %(path)s" +#, python-format +msgid "Trying to get %s status of PUT to %s" +msgstr "Es wird versucht, %s-Status von PUT für %s abzurufen." + #, python-format msgid "Trying to get final status of PUT to %s" msgstr "Versuch, den finalen Status von PUT für %s abzurufen" @@ -978,6 +1030,10 @@ msgstr "Versuch, während des GET-Vorgangs zu lesen (Wiederholung)" msgid "Trying to send to client" msgstr "Versuch, an den Client zu senden" +#, python-format +msgid "Trying to sync suffixes with %s" +msgstr "Es wird versucht, Suffixe mit %s zu synchronisieren." + #, python-format msgid "Trying to write to %s" msgstr "Versuch, an %s zu schreiben" @@ -989,6 +1045,11 @@ msgstr "NICHT ABGEFANGENE AUSNAHME" msgid "Unable to find %s config section in %s" msgstr "%s-Konfigurationsabschnitt in %s kann nicht gefunden werden" +#, python-format +msgid "Unable to load internal client from config: %r (%s)" +msgstr "" +"Interner Client konnte nicht aus der Konfiguration geladen werden: %r (%s)" + #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "" @@ -998,6 +1059,10 @@ msgstr "" msgid "Unable to locate config for %s" msgstr "Konfiguration für %s wurde nicht gefunden." +#, python-format +msgid "Unable to locate config number %s for %s" +msgstr "Konfigurationsnummer %s für %s wurde nicht gefunden." + msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" @@ -1023,6 +1088,11 @@ msgstr "Unerwartete Antwort: %s" msgid "Unhandled exception" msgstr "Nicht behandelte Exception" +#, python-format +msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" +msgstr "" +"Unbekannte Ausnahme bei GET-Versuch: %(account)r %(container)r %(object)r" + #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "Aktualisierungsbericht fehlgeschlagen für %(container)s %(dbfile)s" From f581fccf71034818d19062593eeb52a4347bb174 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Tue, 9 Feb 2016 15:59:52 -0800 Subject: [PATCH 025/141] By default, disallow inbound X-Timestamp headers With the X-Timestamp validation added in commit e619411, end users could upload objects with X-Timestamp: 9999999999.99999_ffffffffffffffff (the maximum value) and Swift would be unable to delete them. Now, inbound X-Timestamp headers will be moved to X-Backend-Inbound-X-Timestamp, effectively rendering them harmless. The primary reason to allow X-Timestamp before was to prevent Last-Modified changes for objects coming from either: * container_sync or * a migration from another storage system. To enable the former use-case, the container_sync middleware will now translate X-Backend-Inbound-X-Timestamp headers back to X-Timestamp after verifying the request. Additionally, a new option is added to the gatekeeper filter config: # shunt_inbound_x_timestamp = true To enable the latter use-case (or any other use-case not mentioned), set this to false. Upgrade Consideration ===================== If your cluster workload requires that clients be allowed to specify objects' X-Timestamp values, disable the shunt_inbound_x_timestamp option before upgrading. UpgradeImpact Change-Id: I8799d5eb2ae9d795ba358bb422f69c70ee8ebd2c --- etc/proxy-server.conf-sample | 6 +++ swift/common/middleware/container_sync.py | 5 ++ swift/common/middleware/gatekeeper.py | 11 +++- test/functional/test_object.py | 46 +++++++++++++--- .../common/middleware/test_container_sync.py | 19 ++++--- .../unit/common/middleware/test_gatekeeper.py | 52 +++++++++++++++++-- 6 files changed, 122 insertions(+), 17 deletions(-) diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample index a80e69be06..a06e15a9a6 100644 --- a/etc/proxy-server.conf-sample +++ b/etc/proxy-server.conf-sample @@ -674,6 +674,12 @@ use = egg:swift#account_quotas [filter:gatekeeper] use = egg:swift#gatekeeper +# Set this to false if you want to allow clients to set arbitrary X-Timestamps +# on uploaded objects. This may be used to preserve timestamps when migrating +# from a previous storage system, but risks allowing users to upload +# difficult-to-delete data. +# shunt_inbound_x_timestamp = true +# # You can override the default log routing for this filter here: # set log_name = gatekeeper # set log_facility = LOG_LOCAL0 diff --git a/swift/common/middleware/container_sync.py b/swift/common/middleware/container_sync.py index 1ea6480700..357c5e98d1 100644 --- a/swift/common/middleware/container_sync.py +++ b/swift/common/middleware/container_sync.py @@ -97,6 +97,11 @@ class ContainerSync(object): req.environ.setdefault('swift.log_info', []).append( 'cs:no-local-user-key') else: + # x-timestamp headers get shunted by gatekeeper + if 'x-backend-inbound-x-timestamp' in req.headers: + req.headers['x-timestamp'] = req.headers.pop( + 'x-backend-inbound-x-timestamp') + expected = self.realms_conf.get_sig( req.method, req.path, req.headers.get('x-timestamp', '0'), nonce, diff --git a/swift/common/middleware/gatekeeper.py b/swift/common/middleware/gatekeeper.py index 5e680d0e27..c5c1066505 100644 --- a/swift/common/middleware/gatekeeper.py +++ b/swift/common/middleware/gatekeeper.py @@ -32,7 +32,7 @@ automatically inserted close to the start of the pipeline by the proxy server. from swift.common.swob import Request -from swift.common.utils import get_logger +from swift.common.utils import get_logger, config_true_value from swift.common.request_helpers import remove_items, get_sys_meta_prefix import re @@ -69,6 +69,8 @@ class GatekeeperMiddleware(object): self.logger = get_logger(conf, log_route='gatekeeper') self.inbound_condition = make_exclusion_test(inbound_exclusions) self.outbound_condition = make_exclusion_test(outbound_exclusions) + self.shunt_x_timestamp = config_true_value( + conf.get('shunt_inbound_x_timestamp', 'true')) def __call__(self, env, start_response): req = Request(env) @@ -76,6 +78,13 @@ class GatekeeperMiddleware(object): if removed: self.logger.debug('removed request headers: %s' % removed) + if 'X-Timestamp' in req.headers and self.shunt_x_timestamp: + ts = req.headers.pop('X-Timestamp') + req.headers['X-Backend-Inbound-X-Timestamp'] = ts + # log in a similar format as the removed headers + self.logger.debug('shunted request headers: %s' % + [('X-Timestamp', ts)]) + def gatekeeper_response(status, response_headers, exc_info=None): removed = filter( lambda h: self.outbound_condition(h[0]), diff --git a/test/functional/test_object.py b/test/functional/test_object.py index e33f6ca075..e331e220f3 100755 --- a/test/functional/test_object.py +++ b/test/functional/test_object.py @@ -167,11 +167,28 @@ class TestObject(unittest2.TestCase): 'Content-Length': '0', 'X-Timestamp': '-1'}) return check_response(conn) + + def head(url, token, parsed, conn): + conn.request('HEAD', '%s/%s/%s' % (parsed.path, self.container, + 'too_small_x_timestamp'), + '', {'X-Auth-Token': token, + 'Content-Length': '0'}) + return check_response(conn) + ts_before = time.time() resp = retry(put) body = resp.read() - self.assertEqual(resp.status, 400) - self.assertIn( - 'X-Timestamp should be a UNIX timestamp float value', body) + ts_after = time.time() + if resp.status == 400: + # shunt_inbound_x_timestamp must be false + self.assertIn( + 'X-Timestamp should be a UNIX timestamp float value', body) + else: + self.assertEqual(resp.status, 201) + self.assertEqual(body, '') + resp = retry(head) + resp.read() + self.assertGreater(float(resp.headers['x-timestamp']), ts_before) + self.assertLess(float(resp.headers['x-timestamp']), ts_after) def test_too_big_x_timestamp(self): def put(url, token, parsed, conn): @@ -181,11 +198,28 @@ class TestObject(unittest2.TestCase): 'Content-Length': '0', 'X-Timestamp': '99999999999.9999999999'}) return check_response(conn) + + def head(url, token, parsed, conn): + conn.request('HEAD', '%s/%s/%s' % (parsed.path, self.container, + 'too_big_x_timestamp'), + '', {'X-Auth-Token': token, + 'Content-Length': '0'}) + return check_response(conn) + ts_before = time.time() resp = retry(put) body = resp.read() - self.assertEqual(resp.status, 400) - self.assertIn( - 'X-Timestamp should be a UNIX timestamp float value', body) + ts_after = time.time() + if resp.status == 400: + # shunt_inbound_x_timestamp must be false + self.assertIn( + 'X-Timestamp should be a UNIX timestamp float value', body) + else: + self.assertEqual(resp.status, 201) + self.assertEqual(body, '') + resp = retry(head) + resp.read() + self.assertGreater(float(resp.headers['x-timestamp']), ts_before) + self.assertLess(float(resp.headers['x-timestamp']), ts_after) def test_x_delete_after(self): def put(url, token, parsed, conn): diff --git a/test/unit/common/middleware/test_container_sync.py b/test/unit/common/middleware/test_container_sync.py index 786665328f..61a4735f15 100644 --- a/test/unit/common/middleware/test_container_sync.py +++ b/test/unit/common/middleware/test_container_sync.py @@ -42,7 +42,10 @@ class FakeApp(object): body = 'Response to Authorized Request' else: body = 'Pass-Through Response' - start_response('200 OK', [('Content-Length', str(len(body)))]) + headers = [('Content-Length', str(len(body)))] + if 'HTTP_X_TIMESTAMP' in env: + headers.append(('X-Timestamp', env['HTTP_X_TIMESTAMP'])) + start_response('200 OK', headers) return body @@ -214,18 +217,20 @@ cluster_dfw1 = http://dfw1.host/v1/ req.environ.get('swift.log_info')) def test_valid_sig(self): + ts = '1455221706.726999_0123456789abcdef' sig = self.sync.realms_conf.get_sig( - 'GET', '/v1/a/c', '0', 'nonce', + 'GET', '/v1/a/c', ts, 'nonce', self.sync.realms_conf.key('US'), 'abc') - req = swob.Request.blank( - '/v1/a/c', headers={'x-container-sync-auth': 'US nonce ' + sig}) + req = swob.Request.blank('/v1/a/c', headers={ + 'x-container-sync-auth': 'US nonce ' + sig, + 'x-backend-inbound-x-timestamp': ts}) req.environ[_get_cache_key('a', 'c')[1]] = {'sync_key': 'abc'} resp = req.get_response(self.sync) self.assertEqual(resp.status, '200 OK') self.assertEqual(resp.body, 'Response to Authorized Request') - self.assertTrue( - 'cs:valid' in req.environ.get('swift.log_info'), - req.environ.get('swift.log_info')) + self.assertIn('cs:valid', req.environ.get('swift.log_info')) + self.assertIn('X-Timestamp', resp.headers) + self.assertEqual(ts, resp.headers['X-Timestamp']) def test_valid_sig2(self): sig = self.sync.realms_conf.get_sig( diff --git a/test/unit/common/middleware/test_gatekeeper.py b/test/unit/common/middleware/test_gatekeeper.py index 42d88e6abd..a01d45cbb1 100644 --- a/test/unit/common/middleware/test_gatekeeper.py +++ b/test/unit/common/middleware/test_gatekeeper.py @@ -74,10 +74,13 @@ class TestGatekeeper(unittest.TestCase): x_backend_headers = {'X-Backend-Replication': 'true', 'X-Backend-Replication-Headers': 'stuff'} + x_timestamp_headers = {'X-Timestamp': '1455952805.719739'} + forbidden_headers_out = dict(sysmeta_headers.items() + x_backend_headers.items()) forbidden_headers_in = dict(sysmeta_headers.items() + x_backend_headers.items()) + shunted_headers_in = dict(x_timestamp_headers.items()) def _assertHeadersEqual(self, expected, actual): for key in expected: @@ -106,20 +109,63 @@ class TestGatekeeper(unittest.TestCase): def _test_reserved_header_removed_inbound(self, method): headers = dict(self.forbidden_headers_in) headers.update(self.allowed_headers) + headers.update(self.shunted_headers_in) req = Request.blank('/v/a/c', environ={'REQUEST_METHOD': method}, headers=headers) fake_app = FakeApp() app = self.get_app(fake_app, {}) resp = req.get_response(app) self.assertEqual('200 OK', resp.status) - self._assertHeadersEqual(self.allowed_headers, fake_app.req.headers) - self._assertHeadersAbsent(self.forbidden_headers_in, - fake_app.req.headers) + expected_headers = dict(self.allowed_headers) + # shunt_inbound_x_timestamp should be enabled by default + expected_headers.update({'X-Backend-Inbound-' + k: v + for k, v in self.shunted_headers_in.items()}) + self._assertHeadersEqual(expected_headers, fake_app.req.headers) + unexpected_headers = dict(self.forbidden_headers_in.items() + + self.shunted_headers_in.items()) + self._assertHeadersAbsent(unexpected_headers, fake_app.req.headers) def test_reserved_header_removed_inbound(self): for method in self.methods: self._test_reserved_header_removed_inbound(method) + def _test_reserved_header_shunted_inbound(self, method): + headers = dict(self.shunted_headers_in) + headers.update(self.allowed_headers) + req = Request.blank('/v/a/c', environ={'REQUEST_METHOD': method}, + headers=headers) + fake_app = FakeApp() + app = self.get_app(fake_app, {}, shunt_inbound_x_timestamp='true') + resp = req.get_response(app) + self.assertEqual('200 OK', resp.status) + expected_headers = dict(self.allowed_headers) + expected_headers.update({'X-Backend-Inbound-' + k: v + for k, v in self.shunted_headers_in.items()}) + self._assertHeadersEqual(expected_headers, fake_app.req.headers) + self._assertHeadersAbsent(self.shunted_headers_in, + fake_app.req.headers) + + def test_reserved_header_shunted_inbound(self): + for method in self.methods: + self._test_reserved_header_shunted_inbound(method) + + def _test_reserved_header_shunt_bypassed_inbound(self, method): + headers = dict(self.shunted_headers_in) + headers.update(self.allowed_headers) + req = Request.blank('/v/a/c', environ={'REQUEST_METHOD': method}, + headers=headers) + fake_app = FakeApp() + app = self.get_app(fake_app, {}, shunt_inbound_x_timestamp='false') + resp = req.get_response(app) + self.assertEqual('200 OK', resp.status) + expected_headers = dict(self.allowed_headers.items() + + self.shunted_headers_in.items()) + self._assertHeadersEqual(expected_headers, fake_app.req.headers) + + def test_reserved_header_shunt_bypassed_inbound(self): + for method in self.methods: + self._test_reserved_header_shunt_bypassed_inbound(method) + def _test_reserved_header_removed_outbound(self, method): headers = dict(self.forbidden_headers_out) headers.update(self.allowed_headers) From 994fa7b115f31da37d064a60a823d1c87545d292 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Wed, 9 Mar 2016 16:49:20 +0000 Subject: [PATCH 026/141] Fix object server test not using correct policy Without the correct request header, the test that should be using an EC policy was in fact using a replication policy. Change-Id: Id44d0d615a4fd09aadfb286425939aea6abdf7b0 --- test/unit/obj/test_server.py | 54 ++++++++++++++++++++---------------- 1 file changed, 30 insertions(+), 24 deletions(-) diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py index bb296a07e6..7c3af335a6 100755 --- a/test/unit/obj/test_server.py +++ b/test/unit/obj/test_server.py @@ -503,17 +503,18 @@ class TestObjectController(unittest.TestCase): update_etag = update_etag or '098f6bcd4621d373cade4e832627b4f6' def mock_container_update(ctlr, op, account, container, obj, request, - headers_out, objdevice, policy_idx): - calls_made.append(headers_out) + headers_out, objdevice, policy): + calls_made.append((headers_out, policy)) headers = { 'X-Timestamp': t[1].internal, 'Content-Type': 'application/octet-stream;swift_bytes=123456789', 'Content-Length': '4', - 'X-Backend-Storage-Policy': int(policy)} + 'X-Backend-Storage-Policy-Index': int(policy)} if policy.policy_type == EC_POLICY: headers['X-Backend-Container-Update-Override-Etag'] = update_etag headers['X-Object-Sysmeta-Ec-Etag'] = update_etag + headers['X-Object-Sysmeta-Ec-Frag-Index'] = 2 req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, @@ -531,15 +532,16 @@ class TestObjectController(unittest.TestCase): 'x-content-type': 'application/octet-stream;swift_bytes=123456789', 'x-timestamp': t[1].internal, 'x-etag': update_etag}) - self.assertDictEqual(expected_headers, calls_made[0]) + self.assertDictEqual(expected_headers, calls_made[0][0]) + self.assertEqual(policy, calls_made[0][1]) # POST with no metadata newer than the data should return 409, # container update not expected calls_made = [] - req = Request.blank('/sda1/p/a/c/o', - environ={'REQUEST_METHOD': 'POST'}, - headers={'X-Timestamp': t[0].internal, - 'X-Backend-Storage-Policy': int(policy)}) + req = Request.blank( + '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'POST'}, + headers={'X-Timestamp': t[0].internal, + 'X-Backend-Storage-Policy-Index': int(policy)}) with mock.patch('swift.obj.server.ObjectController.container_update', mock_container_update): @@ -553,10 +555,10 @@ class TestObjectController(unittest.TestCase): # POST with newer metadata returns success and container update # is expected calls_made = [] - req = Request.blank('/sda1/p/a/c/o', - environ={'REQUEST_METHOD': 'POST'}, - headers={'X-Timestamp': t[3].internal, - 'X-Backend-Storage-Policy': int(policy)}) + req = Request.blank( + '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'POST'}, + headers={'X-Timestamp': t[3].internal, + 'X-Backend-Storage-Policy-Index': int(policy)}) with mock.patch('swift.obj.server.ObjectController.container_update', mock_container_update): @@ -571,15 +573,16 @@ class TestObjectController(unittest.TestCase): 'x-content-type-timestamp': t[1].internal, 'x-meta-timestamp': t[3].internal, 'x-etag': update_etag}) - self.assertDictEqual(expected_headers, calls_made[0]) + self.assertDictEqual(expected_headers, calls_made[0][0]) + self.assertEqual(policy, calls_made[0][1]) # POST with no metadata newer than existing metadata should return # 409, container update not expected calls_made = [] - req = Request.blank('/sda1/p/a/c/o', - environ={'REQUEST_METHOD': 'POST'}, - headers={'X-Timestamp': t[2].internal, - 'X-Backend-Storage-Policy': int(policy)}) + req = Request.blank( + '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'POST'}, + headers={'X-Timestamp': t[2].internal, + 'X-Backend-Storage-Policy-Index': int(policy)}) with mock.patch('swift.obj.server.ObjectController.container_update', mock_container_update): @@ -600,7 +603,7 @@ class TestObjectController(unittest.TestCase): 'X-Timestamp': t[2].internal, 'Content-Type': 'text/plain', 'Content-Type-Timestamp': t[2].internal, - 'X-Backend-Storage-Policy': int(policy) + 'X-Backend-Storage-Policy-Index': int(policy) }) with mock.patch('swift.obj.server.ObjectController.container_update', @@ -616,7 +619,8 @@ class TestObjectController(unittest.TestCase): 'x-content-type-timestamp': t[2].internal, 'x-meta-timestamp': t[3].internal, 'x-etag': update_etag}) - self.assertDictEqual(expected_headers, calls_made[0]) + self.assertDictEqual(expected_headers, calls_made[0][0]) + self.assertEqual(policy, calls_made[0][1]) # POST with older content-type but newer metadata returns success # and container update is expected @@ -627,7 +631,7 @@ class TestObjectController(unittest.TestCase): 'X-Timestamp': t[4].internal, 'Content-Type': 'older', 'Content-Type-Timestamp': t[1].internal, - 'X-Backend-Storage-Policy': int(policy) + 'X-Backend-Storage-Policy-Index': int(policy) }) with mock.patch('swift.obj.server.ObjectController.container_update', @@ -643,7 +647,8 @@ class TestObjectController(unittest.TestCase): 'x-content-type-timestamp': t[2].internal, 'x-meta-timestamp': t[4].internal, 'x-etag': update_etag}) - self.assertDictEqual(expected_headers, calls_made[0]) + self.assertDictEqual(expected_headers, calls_made[0][0]) + self.assertEqual(policy, calls_made[0][1]) # POST with same-time content-type and metadata returns 409 # and no container update is expected @@ -654,7 +659,7 @@ class TestObjectController(unittest.TestCase): 'X-Timestamp': t[4].internal, 'Content-Type': 'ignored', 'Content-Type-Timestamp': t[2].internal, - 'X-Backend-Storage-Policy': int(policy) + 'X-Backend-Storage-Policy-Index': int(policy) }) with mock.patch('swift.obj.server.ObjectController.container_update', @@ -673,7 +678,7 @@ class TestObjectController(unittest.TestCase): headers={ 'X-Timestamp': t[3].internal, 'Content-Type': 'text/newer', - 'X-Backend-Storage-Policy': int(policy) + 'X-Backend-Storage-Policy-Index': int(policy) }) with mock.patch('swift.obj.server.ObjectController.container_update', @@ -689,7 +694,8 @@ class TestObjectController(unittest.TestCase): 'x-content-type-timestamp': t[3].internal, 'x-meta-timestamp': t[4].internal, 'x-etag': update_etag}) - self.assertDictEqual(expected_headers, calls_made[0]) + self.assertDictEqual(expected_headers, calls_made[0][0]) + self.assertEqual(policy, calls_made[0][1]) def test_POST_container_updates_with_replication_policy(self): self._test_POST_container_updates(POLICIES[0]) From e38b53393fee311ca4e351b6d9ee3d2bf02441a4 Mon Sep 17 00:00:00 2001 From: Donagh McCabe Date: Wed, 9 Mar 2016 14:28:17 +0000 Subject: [PATCH 027/141] Cleanup of Swift Ops Runbook This patch cleans up some rough edges that were left (due to time constraints) in the original commit. Change-Id: Id4480be8dc1b5c920c19988cb89ca8b60ace91b4 Co-Authored-By: Gerry Drudy gerry.drudy@hpe.com --- doc/source/admin_guide.rst | 8 +- doc/source/ops_runbook/diagnose.rst | 462 ++++++++++++------ doc/source/ops_runbook/general.rst | 36 -- doc/source/ops_runbook/index.rst | 52 -- doc/source/ops_runbook/maintenance.rst | 50 +- doc/source/ops_runbook/procedures.rst | 207 +++++--- .../ops_runbook/sec-furtherdiagnose.rst | 177 ------- doc/source/ops_runbook/troubleshooting.rst | 87 ++-- 8 files changed, 517 insertions(+), 562 deletions(-) delete mode 100644 doc/source/ops_runbook/general.rst delete mode 100644 doc/source/ops_runbook/sec-furtherdiagnose.rst diff --git a/doc/source/admin_guide.rst b/doc/source/admin_guide.rst index 06c4244822..73243f2b7b 100644 --- a/doc/source/admin_guide.rst +++ b/doc/source/admin_guide.rst @@ -234,9 +234,11 @@ using the format `regex_pattern_X = regex_expression`, where `X` is a number. This script has been tested on Ubuntu 10.04 and Ubuntu 12.04, so if you are using a different distro or OS, some care should be taken before using in production. --------------- -Cluster Health --------------- +.. _dispersion_report: + +----------------- +Dispersion Report +----------------- There is a swift-dispersion-report tool for measuring overall cluster health. This is accomplished by checking if a set of deliberately distributed diff --git a/doc/source/ops_runbook/diagnose.rst b/doc/source/ops_runbook/diagnose.rst index d34b38c52b..629cf4881e 100644 --- a/doc/source/ops_runbook/diagnose.rst +++ b/doc/source/ops_runbook/diagnose.rst @@ -2,15 +2,53 @@ Identifying issues and resolutions ================================== +Is the system up? +----------------- + +If you have a report that Swift is down, perform the following basic checks: + +#. Run swift functional tests. + +#. From a server in your data center, use ``curl`` to check ``/healthcheck`` + (see below). + +#. If you have a monitoring system, check your monitoring system. + +#. Check your hardware load balancers infrastructure. + +#. Run swift-recon on a proxy node. + +Functional tests usage +----------------------- + +We would recommend that you set up the functional tests to run against your +production system. Run regularly this can be a useful tool to validate +that the system is configured correctly. In addition, it can provide +early warning about failures in your system (if the functional tests stop +working, user applications will also probably stop working). + +A script for running the function tests is located in ``swift/.functests``. + + +External monitoring +------------------- + +We use pingdom.com to monitor the external Swift API. We suggest the +following: + + - Do a GET on ``/healthcheck`` + + - Create a container, make it public (x-container-read: + .r*,.rlistings), create a small file in the container; do a GET + on the object + Diagnose: General approach -------------------------- - Look at service status in your monitoring system. - In addition to system monitoring tools and issue logging by users, - swift errors will often result in log entries in the ``/var/log/swift`` - files: ``proxy.log``, ``server.log`` and ``background.log`` (see:``Swift - logs``). + swift errors will often result in log entries (see :ref:`swift_logs`). - Look at any logs your deployment tool produces. @@ -33,22 +71,24 @@ Diagnose: Swift-dispersion-report --------------------------------- The swift-dispersion-report is a useful tool to gauge the general -health of the system. Configure the ``swift-dispersion`` report for -100% coverage. The dispersion report regularly monitors -these and gives a report of the amount of objects/containers are still -available as well as how many copies of them are also there. +health of the system. Configure the ``swift-dispersion`` report to cover at +a minimum every disk drive in your system (usually 1% coverage). +See :ref:`dispersion_report` for details of how to configure and +use the dispersion reporting tool. -The dispersion-report output is logged on the first proxy of the first -AZ or each system (proxy with the monitoring role) under -``/var/log/swift/swift-dispersion-report.log``. +The ``swift-dispersion-report`` tool can take a long time to run, especially +if any servers are down. We suggest you run it regularly +(e.g., in a cron job) and save the results. This makes it easy to refer +to the last report without having to wait for a long-running command +to complete. -Diagnose: Is swift running? ---------------------------- +Diagnose: Is system responding to /healthcheck? +----------------------------------------------- When you want to establish if a swift endpoint is running, run ``curl -k`` -against either: https://*[REPLACEABLE]*./healthcheck OR -https:*[REPLACEABLE]*.crossdomain.xml +against https://*[ENDPOINT]*/healthcheck. +.. _swift_logs: Diagnose: Interpreting messages in ``/var/log/swift/`` files ------------------------------------------------------------ @@ -70,25 +110,20 @@ The following table lists known issues: - **Signature** - **Issue** - **Steps to take** - * - /var/log/syslog - - kernel: [] hpsa .... .... .... has check condition: unknown type: - Sense: 0x5, ASC: 0x20, ASC Q: 0x0 .... - - An unsupported command was issued to the storage hardware - - Understood to be a benign monitoring issue, ignore * - /var/log/syslog - kernel: [] sd .... [csbu:sd...] Sense Key: Medium Error - Suggests disk surface issues - - Run swift diagnostics on the target node to check for disk errors, + - Run ``swift-drive-audit`` on the target node to check for disk errors, repair disk errors * - /var/log/syslog - kernel: [] sd .... [csbu:sd...] Sense Key: Hardware Error - Suggests storage hardware issues - - Run swift diagnostics on the target node to check for disk failures, + - Run diagnostics on the target node to check for disk failures, replace failed disks * - /var/log/syslog - kernel: [] .... I/O error, dev sd.... ,sector .... - - - Run swift diagnostics on the target node to check for disk errors + - Run diagnostics on the target node to check for disk errors * - /var/log/syslog - pound: NULL get_thr_arg - Multiple threads woke up @@ -96,59 +131,61 @@ The following table lists known issues: * - /var/log/swift/proxy.log - .... ERROR .... ConnectionTimeout .... - A storage node is not responding in a timely fashion - - Run swift diagnostics on the target node to check for node down, - node unconfigured, storage off-line or network issues between the + - Check if node is down, not running Swift, + unconfigured, storage off-line or for network issues between the proxy and non responding node * - /var/log/swift/proxy.log - proxy-server .... HTTP/1.0 500 .... - A proxy server has reported an internal server error - - Run swift diagnostics on the target node to check for issues + - Examine the logs for any errors at the time the error was reported to + attempt to understand the cause of the error. * - /var/log/swift/server.log - .... ERROR .... ConnectionTimeout .... - A storage server is not responding in a timely fashion - - Run swift diagnostics on the target node to check for a node or - service, down, unconfigured, storage off-line or network issues - between the two nodes + - Check if node is down, not running Swift, + unconfigured, storage off-line or for network issues between the + server and non responding node * - /var/log/swift/server.log - .... ERROR .... Remote I/O error: '/srv/node/disk.... - A storage device is not responding as expected - - Run swift diagnostics and check the filesystem named in the error - for corruption (unmount & xfs_repair) + - Run ``swift-drive-audit`` and check the filesystem named in the error + for corruption (unmount & xfs_repair). Check if the filesystem + is mounted and working. * - /var/log/swift/background.log - object-server ERROR container update failed .... Connection refused - - Peer node is not responding - - Check status of the network and peer node + - A container server node could not be contacted + - Check if node is down, not running Swift, + unconfigured, storage off-line or for network issues between the + server and non responding node * - /var/log/swift/background.log - object-updater ERROR with remote .... ConnectionTimeout - - - - Check status of the network and peer node + - The remote container server is busy + - If the container is very large, some errors updating it can be + expected. However, this error can also occur if there is a networking + issue. * - /var/log/swift/background.log - account-reaper STDOUT: .... error: ECONNREFUSED - - Network connectivity issue - - Resolve network issue and re-run diagnostics + - Network connectivity issue or the target server is down. + - Resolve network issue or reboot the target server * - /var/log/swift/background.log - .... ERROR .... ConnectionTimeout - A storage server is not responding in a timely fashion - - Run swift diagnostics on the target node to check for a node - or service, down, unconfigured, storage off-line or network issues - between the two nodes + - The target server may be busy. However, this error can also occur if + there is a networking issue. * - /var/log/swift/background.log - .... ERROR syncing .... Timeout - - A storage server is not responding in a timely fashion - - Run swift diagnostics on the target node to check for a node - or service, down, unconfigured, storage off-line or network issues - between the two nodes + - A timeout occurred syncing data to another node. + - The target server may be busy. However, this error can also occur if + there is a networking issue. * - /var/log/swift/background.log - .... ERROR Remote drive not mounted .... - A storage server disk is unavailable - - Run swift diagnostics on the target node to check for a node or - service, failed or unmounted disk on the target, or a network issue + - Repair and remount the file system (on the remote node) * - /var/log/swift/background.log - object-replicator .... responded as unmounted - A storage server disk is unavailable - - Run swift diagnostics on the target node to check for a node or - service, failed or unmounted disk on the target, or a network issue - * - /var/log/swift/\*.log + - Repair and remount the file system (on the remote node) + * - /var/log/swift/*.log - STDOUT: EXCEPTION IN - A unexpected error occurred - Read the Traceback details, if it matches known issues @@ -157,19 +194,14 @@ The following table lists known issues: * - /var/log/rsyncd.log - rsync: mkdir "/disk....failed: No such file or directory.... - A local storage server disk is unavailable - - Run swift diagnostics on the node to check for a failed or + - Run diagnostics on the node to check for a failed or unmounted disk * - /var/log/swift* - - Exception: Could not bind to 0.0.0.0:600xxx + - Exception: Could not bind to 0.0.0.0:6xxx - Possible Swift process restart issue. This indicates an old swift process is still running. - - Run swift diagnostics, if some swift services are reported down, + - Restart Swift services. If some swift services are reported down, check if they left residual process behind. - * - /var/log/rsyncd.log - - rsync: recv_generator: failed to stat "/disk....." (in object) - failed: Not a directory (20) - - Swift directory structure issues - - Run swift diagnostics on the node to check for issues Diagnose: Parted reports the backup GPT table is corrupt -------------------------------------------------------- @@ -188,7 +220,7 @@ Diagnose: Parted reports the backup GPT table is corrupt OK/Cancel? -To fix, go to: Fix broken GPT table (broken disk partition) +To fix, go to :ref:`fix_broken_gpt_table` Diagnose: Drives diagnostic reports a FS label is not acceptable @@ -240,9 +272,10 @@ Diagnose: Failed LUNs .. note:: - The HPE Helion Public Cloud uses direct attach SmartArry + The HPE Helion Public Cloud uses direct attach SmartArray controllers/drives. The information here is specific to that - environment. + environment. The hpacucli utility mentioned here may be called + hpssacli in your environment. The ``swift_diagnostics`` mount checks may return a warning that a LUN has failed, typically accompanied by DriveAudit check failures and device @@ -254,7 +287,7 @@ the procedure to replace the disk. Otherwise the lun can be re-enabled as follows: -#. Generate a hpssacli diagnostic report. This report allows the swift +#. Generate a hpssacli diagnostic report. This report allows the DC team to troubleshoot potential cabling or hardware issues so it is imperative that you run it immediately when troubleshooting a failed LUN. You will come back later and grep this file for more details, but @@ -262,8 +295,7 @@ Otherwise the lun can be re-enabled as follows: .. code:: - sudo hpssacli controller all diag file=/tmp/hpacu.diag ris=on \ - xml=off zip=off + sudo hpssacli controller all diag file=/tmp/hpacu.diag ris=on xml=off zip=off Export the following variables using the below instructions before proceeding further. @@ -317,8 +349,7 @@ proceeding further. .. code:: - sudo hpssacli controller slot=1 ld ${LDRIVE} show detail \ - grep -i "Disk Name" + sudo hpssacli controller slot=1 ld ${LDRIVE} show detail | grep -i "Disk Name" #. Export the device name variable from the preceding command (example: /dev/sdk): @@ -396,6 +427,8 @@ proceeding further. should be checked. For example, log a DC ticket to check the sas cables between the drive and the expander. +.. _diagnose_slow_disk_drives: + Diagnose: Slow disk devices --------------------------- @@ -404,7 +437,8 @@ Diagnose: Slow disk devices collectl is an open-source performance gathering/analysis tool. If the diagnostics report a message such as ``sda: drive is slow``, you -should log onto the node and run the following comand: +should log onto the node and run the following command (remove ``-c 1`` option to continuously monitor +the data): .. code:: @@ -431,13 +465,12 @@ should log onto the node and run the following comand: dm-3 0 0 0 0 0 0 0 0 0 0 0 0 0 dm-4 0 0 0 0 0 0 0 0 0 0 0 0 0 dm-5 0 0 0 0 0 0 0 0 0 0 0 0 0 - ... - (repeats -- type Ctrl/C to stop) + Look at the ``Wait`` and ``SvcTime`` values. It is not normal for these values to exceed 50msec. This is known to impact customer -performance (upload/download. For a controller problem, many/all drives -will show how wait and service times. A reboot may correct the prblem; +performance (upload/download). For a controller problem, many/all drives +will show long wait and service times. A reboot may correct the problem; otherwise hardware replacement is needed. Another way to look at the data is as follows: @@ -526,12 +559,12 @@ be disabled on a per-drive basis. Diagnose: Slow network link - Measuring network performance ----------------------------------------------------------- -Network faults can cause performance between Swift nodes to degrade. The -following tests are recommended. Other methods (such as copying large +Network faults can cause performance between Swift nodes to degrade. Testing +with ``netperf`` is recommended. Other methods (such as copying large files) may also work, but can produce inconclusive results. -Use netperf on all production systems. Install on all systems if not -already installed. And the UFW rules for its control port are in place. +Install ``netperf`` on all systems if not +already installed. Check that the UFW rules for its control port are in place. However, there are no pre-opened ports for netperf's data connection. Pick a port number. In this example, 12866 is used because it is one higher than netperf's default control port number, 12865. If you get very @@ -561,11 +594,11 @@ Running tests #. On the ``source`` node, run the following command to check throughput. Note the double-dash before the -P option. - The command takes 10 seconds to complete. + The command takes 10 seconds to complete. The ``target`` node is 192.168.245.5. .. code:: - $ netperf -H .72.4 + $ netperf -H 192.168.245.5 -- -P 12866 MIGRATED TCP STREAM TEST from 0.0.0.0 (0.0.0.0) port 12866 AF_INET to .72.4 (.72.4) port 12866 AF_INET : demo Recv Send Send @@ -578,7 +611,7 @@ Running tests .. code:: - $ netperf -H .72.4 -t TCP_RR -- -P 12866 + $ netperf -H 192.168.245.5 -t TCP_RR -- -P 12866 MIGRATED TCP REQUEST/RESPONSE TEST from 0.0.0.0 (0.0.0.0) port 12866 AF_INET to .72.4 (.72.4) port 12866 AF_INET : demo : first burst 0 @@ -763,7 +796,7 @@ Diagnose: High system latency used by the monitor program happen to live on the bad object server. - A general network problem within the data canter. Compare the results - with the Pingdom monitors too see if they also have a problem. + with the Pingdom monitors to see if they also have a problem. Diagnose: Interface reports errors ---------------------------------- @@ -802,59 +835,21 @@ If the nick supports self test, this can be performed with: Self tests should read ``PASS`` if the nic is operating correctly. Nic module drivers can be re-initialised by carefully removing and -re-installing the modules. Case in point being the mellanox drivers on -Swift Proxy servers. which use a two part driver mlx4_en and +re-installing the modules (this avoids rebooting the server). +For example, mellanox drivers use a two part driver mlx4_en and mlx4_core. To reload these you must carefully remove the mlx4_en (ethernet) then the mlx4_core modules, and reinstall them in the reverse order. As the interface will be disabled while the modules are unloaded, you -must be very careful not to lock the interface out. The following -script can be used to reload the melanox drivers, as a side effect, this -resets error counts on the interface. - - -Diagnose: CorruptDir diagnostic reports corrupt directories ------------------------------------------------------------ - -From time to time Swift data structures may become corrupted by -misplaced files in filesystem locations that swift would normally place -a directory. This causes issues for swift when directory creation is -attempted at said location, it may fail due to the pre-existent file. If -the CorruptDir diagnostic reports Corrupt directories, they should be -checked to see if they exist. - -Checking existence of entries ------------------------------ - -Swift data filesystems are located under the ``/srv/node/disk`` -mountpoints and contain accounts, containers and objects -subdirectories which in turn contain partition number subdirectories. -The partition number directories contain md5 hash subdirectories. md5 -hash directories contain md5sum subdirectories. md5sum directories -contain the Swift data payload as either a database (.db), for -accounts and containers, or a data file (.data) for objects. -If the entries reported in diagnostics correspond to a partition -number, md5 hash or md5sum directory, check the entry with ``ls --ld *entry*``. -If it turns out to be a file rather than a directory, it should be -carefully removed. - -.. note:: - - Please do not ``ls`` the partition level directory contents, as - this *especially objects* may take a lot of time and system resources, - if you need to check the contents, use: - - .. code:: - - echo /srv/node/disk#/type/partition#/ +must be very careful not to lock yourself out so it may be better +to script this. Diagnose: Hung swift object replicator -------------------------------------- -The swift diagnostic message ``Object replicator: remaining exceeds -100hrs:`` may indicate that the swift ``object-replicator`` is stuck and not +A replicator reports in its log that remaining time exceeds +100 hours. This may indicate that the swift ``object-replicator`` is stuck and not making progress. Another useful way to check this is with the 'swift-recon -r' command on a swift proxy server: @@ -866,14 +861,13 @@ making progress. Another useful way to check this is with the --> Starting reconnaissance on 384 hosts =============================================================================== [2013-07-17 12:56:19] Checking on replication - http://.72.63:6000/recon/replication: [replication_time] low: 2, high: 80, avg: 28.8, total: 11037, Failed: 0.0%, no_result: 0, reported: 383 - Oldest completion was 2013-06-12 22:46:50 (12 days ago) by .31:6000. - Most recent completion was 2013-07-17 12:56:19 (5 seconds ago) by .204.113:6000. + Oldest completion was 2013-06-12 22:46:50 (12 days ago) by 192.168.245.3:6000. + Most recent completion was 2013-07-17 12:56:19 (5 seconds ago) by 192.168.245.5:6000. =============================================================================== The ``Oldest completion`` line in this example indicates that the -object-replicator on swift object server .31 has not completed +object-replicator on swift object server 192.168.245.3 has not completed the replication cycle in 12 days. This replicator is stuck. The object replicator cycle is generally less than 1 hour. Though an replicator cycle of 15-20 hours can occur if nodes are added to the system and a @@ -886,22 +880,22 @@ the following command: .. code:: # sudo grep object-rep /var/log/swift/background.log | grep -e "Starting object replication" -e "Object replication complete" -e "partitions rep" - Jul 16 06:25:46 object-replicator 15344/16450 (93.28%) partitions replicated in 69018.48s (0.22/sec, 22h remaining) - Jul 16 06:30:46 object-replicator 15344/16450 (93.28%) partitions replicated in 69318.58s (0.22/sec, 22h remaining) - Jul 16 06:35:46 object-replicator 15344/16450 (93.28%) partitions replicated in 69618.63s (0.22/sec, 23h remaining) - Jul 16 06:40:46 object-replicator 15344/16450 (93.28%) partitions replicated in 69918.73s (0.22/sec, 23h remaining) - Jul 16 06:45:46 object-replicator 15348/16450 (93.30%) partitions replicated in 70218.75s (0.22/sec, 24h remaining) - Jul 16 06:50:47 object-replicator 15348/16450 (93.30%) partitions replicated in 70518.85s (0.22/sec, 24h remaining) - Jul 16 06:55:47 object-replicator 15348/16450 (93.30%) partitions replicated in 70818.95s (0.22/sec, 25h remaining) - Jul 16 07:00:47 object-replicator 15348/16450 (93.30%) partitions replicated in 71119.05s (0.22/sec, 25h remaining) - Jul 16 07:05:47 object-replicator 15348/16450 (93.30%) partitions replicated in 71419.15s (0.21/sec, 26h remaining) - Jul 16 07:10:47 object-replicator 15348/16450 (93.30%) partitions replicated in 71719.25s (0.21/sec, 26h remaining) - Jul 16 07:15:47 object-replicator 15348/16450 (93.30%) partitions replicated in 72019.27s (0.21/sec, 27h remaining) - Jul 16 07:20:47 object-replicator 15348/16450 (93.30%) partitions replicated in 72319.37s (0.21/sec, 27h remaining) - Jul 16 07:25:47 object-replicator 15348/16450 (93.30%) partitions replicated in 72619.47s (0.21/sec, 28h remaining) - Jul 16 07:30:47 object-replicator 15348/16450 (93.30%) partitions replicated in 72919.56s (0.21/sec, 28h remaining) - Jul 16 07:35:47 object-replicator 15348/16450 (93.30%) partitions replicated in 73219.67s (0.21/sec, 29h remaining) - Jul 16 07:40:47 object-replicator 15348/16450 (93.30%) partitions replicated in 73519.76s (0.21/sec, 29h remaining) + Jul 16 06:25:46 192.168.245.4 object-replicator 15344/16450 (93.28%) partitions replicated in 69018.48s (0.22/sec, 22h remaining) + Jul 16 06:30:46 192.168.245.4object-replicator 15344/16450 (93.28%) partitions replicated in 69318.58s (0.22/sec, 22h remaining) + Jul 16 06:35:46 192.168.245.4 object-replicator 15344/16450 (93.28%) partitions replicated in 69618.63s (0.22/sec, 23h remaining) + Jul 16 06:40:46 192.168.245.4 object-replicator 15344/16450 (93.28%) partitions replicated in 69918.73s (0.22/sec, 23h remaining) + Jul 16 06:45:46 192.168.245.4 object-replicator 15348/16450 (93.30%) partitions replicated in 70218.75s (0.22/sec, 24h remaining) + Jul 16 06:50:47 192.168.245.4object-replicator 15348/16450 (93.30%) partitions replicated in 70518.85s (0.22/sec, 24h remaining) + Jul 16 06:55:47 192.168.245.4 object-replicator 15348/16450 (93.30%) partitions replicated in 70818.95s (0.22/sec, 25h remaining) + Jul 16 07:00:47 192.168.245.4 object-replicator 15348/16450 (93.30%) partitions replicated in 71119.05s (0.22/sec, 25h remaining) + Jul 16 07:05:47 192.168.245.4 object-replicator 15348/16450 (93.30%) partitions replicated in 71419.15s (0.21/sec, 26h remaining) + Jul 16 07:10:47 192.168.245.4object-replicator 15348/16450 (93.30%) partitions replicated in 71719.25s (0.21/sec, 26h remaining) + Jul 16 07:15:47 192.168.245.4 object-replicator 15348/16450 (93.30%) partitions replicated in 72019.27s (0.21/sec, 27h remaining) + Jul 16 07:20:47 192.168.245.4object-replicator 15348/16450 (93.30%) partitions replicated in 72319.37s (0.21/sec, 27h remaining) + Jul 16 07:25:47 192.168.245.4 object-replicator 15348/16450 (93.30%) partitions replicated in 72619.47s (0.21/sec, 28h remaining) + Jul 16 07:30:47 192.168.245.4 object-replicator 15348/16450 (93.30%) partitions replicated in 72919.56s (0.21/sec, 28h remaining) + Jul 16 07:35:47 192.168.245.4 object-replicator 15348/16450 (93.30%) partitions replicated in 73219.67s (0.21/sec, 29h remaining) + Jul 16 07:40:47 192.168.245.4 object-replicator 15348/16450 (93.30%) partitions replicated in 73519.76s (0.21/sec, 29h remaining) The above status is output every 5 minutes to ``/var/log/swift/background.log``. @@ -921,7 +915,7 @@ of a corrupted filesystem detected by the object replicator: .. code:: # sudo bzgrep "Remote I/O error" /var/log/swift/background.log* |grep srv | - tail -1 - Jul 12 03:33:30 object-replicator STDOUT: ERROR:root:Error hashing suffix#012Traceback (most recent call last):#012 File + Jul 12 03:33:30 192.168.245.4 object-replicator STDOUT: ERROR:root:Error hashing suffix#012Traceback (most recent call last):#012 File "/usr/lib/python2.7/dist-packages/swift/obj/replicator.py", line 199, in get_hashes#012 hashes[suffix] = hash_suffix(suffix_dir, reclaim_age)#012 File "/usr/lib/python2.7/dist-packages/swift/obj/replicator.py", line 84, in hash_suffix#012 path_contents = sorted(os.listdir(path))#012OSError: [Errno 121] Remote I/O error: '/srv/node/disk4/objects/1643763/b51' @@ -996,7 +990,7 @@ to repair the problem filesystem. # sudo xfs_repair -P /dev/sde1 #. If the ``xfs_repair`` fails then it may be necessary to re-format the - filesystem. See Procedure: fix broken XFS filesystem. If the + filesystem. See :ref:`fix_broken_xfs_filesystem`. If the ``xfs_repair`` is successful, re-enable chef using the following command and replication should commence again. @@ -1025,7 +1019,183 @@ load: $ uptime 07:44:02 up 18:22, 1 user, load average: 407.12, 406.36, 404.59 -.. toctree:: - :maxdepth: 2 +Further issues and resolutions +------------------------------ + +.. note:: + + The urgency levels in each **Action** column indicates whether or + not it is required to take immediate action, or if the problem can be worked + on during business hours. + +.. list-table:: + :widths: 33 33 33 + :header-rows: 1 + + * - **Scenario** + - **Description** + - **Action** + * - ``/healthcheck`` latency is high. + - The ``/healthcheck`` test does not tax the proxy very much so any drop in value is probably related to + network issues, rather than the proxies being very busy. A very slow proxy might impact the average + number, but it would need to be very slow to shift the number that much. + - Check networks. Do a ``curl https://:/healthcheck`` where + ``ip-address`` is individual proxy IP address. + Repeat this for every proxy server to see if you can pin point the problem. + + Urgency: If there are other indications that your system is slow, you should treat + this as an urgent problem. + * - Swift process is not running. + - You can use ``swift-init`` status to check if swift processes are running on any + given server. + - Run this command: + + .. code:: + + sudo swift-init all start + + Examine messages in the swift log files to see if there are any + error messages related to any of the swift processes since the time you + ran the ``swift-init`` command. + + Take any corrective actions that seem necessary. + + Urgency: If this only affects one server, and you have more than one, + identifying and fixing the problem can wait until business hours. + If this same problem affects many servers, then you need to take corrective + action immediately. + * - ntpd is not running. + - NTP is not running. + - Configure and start NTP. + + Urgency: For proxy servers, this is vital. + + * - Host clock is not syncd to an NTP server. + - Node time settings does not match NTP server time. + This may take some time to sync after a reboot. + - Assuming NTP is configured and running, you have to wait until the times sync. + * - A swift process has hundreds, to thousands of open file descriptors. + - May happen to any of the swift processes. + Known to have happened with a ``rsyslod`` restart and where ``/tmp`` was hanging. + + - Restart the swift processes on the affected node: + + .. code:: + + % sudo swift-init all reload + + Urgency: + If known performance problem: Immediate + + If system seems fine: Medium + * - A swift process is not owned by the swift user. + - If the UID of the swift user has changed, then the processes might not be + owned by that UID. + - Urgency: If this only affects one server, and you have more than one, + identifying and fixing the problem can wait until business hours. + If this same problem affects many servers, then you need to take corrective + action immediately. + * - Object account or container files not owned by swift. + - This typically happens if during a reinstall or a re-image of a server that the UID + of the swift user was changed. The data files in the object account and container + directories are owned by the original swift UID. As a result, the current swift + user does not own these files. + - Correct the UID of the swift user to reflect that of the original UID. An alternate + action is to change the ownership of every file on all file systems. This alternate + action is often impractical and will take considerable time. + + Urgency: If this only affects one server, and you have more than one, + identifying and fixing the problem can wait until business hours. + If this same problem affects many servers, then you need to take corrective + action immediately. + * - A disk drive has a high IO wait or service time. + - If high wait IO times are seen for a single disk, then the disk drive is the problem. + If most/all devices are slow, the controller is probably the source of the problem. + The controller cache may also be miss configured – which will cause similar long + wait or service times. + - As a first step, if your controllers have a cache, check that it is enabled and their battery/capacitor + is working. + + Second, reboot the server. + If problem persists, file a DC ticket to have the drive or controller replaced. + See :ref:`diagnose_slow_disk_drives` on how to check the drive wait or service times. + + Urgency: Medium + * - The network interface is not up. + - Use the ``ifconfig`` and ``ethtool`` commands to determine the network state. + - You can try restarting the interface. However, generally the interface + (or cable) is probably broken, especially if the interface is flapping. + + Urgency: If this only affects one server, and you have more than one, + identifying and fixing the problem can wait until business hours. + If this same problem affects many servers, then you need to take corrective + action immediately. + * - Network interface card (NIC) is not operating at the expected speed. + - The NIC is running at a slower speed than its nominal rated speed. + For example, it is running at 100 Mb/s and the NIC is a 1Ge NIC. + - 1. Try resetting the interface with: + + .. code:: + + sudo ethtool -s eth0 speed 1000 + + ... and then run: + + .. code:: + + sudo lshw -class + + See if size goes to the expected speed. Failing + that, check hardware (NIC cable/switch port). + + 2. If persistent, consider shutting down the server (especially if a proxy) + until the problem is identified and resolved. If you leave this server + running it can have a large impact on overall performance. + + Urgency: High + * - The interface RX/TX error count is non-zero. + - A value of 0 is typical, but counts of 1 or 2 do not indicate a problem. + - 1. For low numbers (For example, 1 or 2), you can simply ignore. Numbers in the range + 3-30 probably indicate that the error count has crept up slowly over a long time. + Consider rebooting the server to remove the report from the noise. + + Typically, when a cable or interface is bad, the error count goes to 400+. For example, + it stands out. There may be other symptoms such as the interface going up and down or + not running at correct speed. A server with a high error count should be watched. + + 2. If the error count continues to climb, consider taking the server down until + it can be properly investigated. In any case, a reboot should be done to clear + the error count. + + Urgency: High, if the error count increasing. + + * - In a swift log you see a message that a process has not replicated in over 24 hours. + - The replicator has not successfully completed a run in the last 24 hours. + This indicates that the replicator has probably hung. + - Use ``swift-init`` to stop and then restart the replicator process. + + Urgency: Low. However if you + recently added or replaced disk drives then you should treat this urgently. + * - Container Updater has not run in 4 hour(s). + - The service may appear to be running however, it may be hung. Examine their swift + logs to see if there are any error messages relating to the container updater. This + may potentially explain why the container is not running. + - Urgency: Medium + This may have been triggered by a recent restart of the rsyslog daemon. + Restart the service with: + .. code:: + + sudo swift-init reload + * - Object replicator: Reports the remaining time and that time is more than 100 hours. + - Each replication cycle the object replicator writes a log message to its log + reporting statistics about the current cycle. This includes an estimate for the + remaining time needed to replicate all objects. If this time is longer than + 100 hours, there is a problem with the replication process. + - Urgency: Medium + Restart the service with: + .. code:: + + sudo swift-init object-replicator reload + + Check that the remaining replication time is going down. - sec-furtherdiagnose.rst diff --git a/doc/source/ops_runbook/general.rst b/doc/source/ops_runbook/general.rst deleted file mode 100644 index 60d19badee..0000000000 --- a/doc/source/ops_runbook/general.rst +++ /dev/null @@ -1,36 +0,0 @@ -================== -General Procedures -================== - -Getting a swift account stats -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. note:: - - ``swift-direct`` is specific to the HPE Helion Public Cloud. Go look at - ``swifty`` for an alternate, this is an example. - -This procedure describes how you determine the swift usage for a given -swift account, that is the number of containers, number of objects and -total bytes used. To do this you will need the project ID. - -Log onto one of the swift proxy servers. - -Use swift-direct to show this accounts usage: - -.. code:: - - $ sudo -u swift /opt/hp/swift/bin/swift-direct show AUTH_redacted-9a11-45f8-aa1c-9e7b1c7904c8 - Status: 200 - Content-Length: 0 - Accept-Ranges: bytes - X-Timestamp: 1379698586.88364 - X-Account-Bytes-Used: 67440225625994 - X-Account-Container-Count: 1 - Content-Type: text/plain; charset=utf-8 - X-Account-Object-Count: 8436776 - Status: 200 - name: my_container count: 8436776 bytes: 67440225625994 - -This account has 1 container. That container has 8436776 objects. The -total bytes used is 67440225625994. \ No newline at end of file diff --git a/doc/source/ops_runbook/index.rst b/doc/source/ops_runbook/index.rst index 6fdb9c8c90..8bf3f425ac 100644 --- a/doc/source/ops_runbook/index.rst +++ b/doc/source/ops_runbook/index.rst @@ -13,67 +13,15 @@ information, suggestions or recommendations. This document are provided for reference only. We are not responsible for your use of any information, suggestions or recommendations contained herein. -This document also contains references to certain tools that we use to -operate the Swift system within the HPE Helion Public Cloud. -Descriptions of these tools are provided for reference only, as the tools themselves -are not publically available at this time. - -- ``swift-direct``: This is similar to the ``swiftly`` tool. - .. toctree:: :maxdepth: 2 - general.rst diagnose.rst procedures.rst maintenance.rst troubleshooting.rst -Is the system up? -~~~~~~~~~~~~~~~~~ - -If you have a report that Swift is down, perform the following basic checks: - -#. Run swift functional tests. - -#. From a server in your data center, use ``curl`` to check ``/healthcheck``. - -#. If you have a monitoring system, check your monitoring system. - -#. Check on your hardware load balancers infrastructure. - -#. Run swift-recon on a proxy node. - -Run swift function tests ------------------------- - -We would recommend that you set up your function tests against your production -system. - -A script for running the function tests is located in ``swift/.functests``. -External monitoring -------------------- - -- We use pingdom.com to monitor the external Swift API. We suggest the - following: - - - Do a GET on ``/healthcheck`` - - - Create a container, make it public (x-container-read: - .r\*,.rlistings), create a small file in the container; do a GET - on the object - -Reference information -~~~~~~~~~~~~~~~~~~~~~ - -Reference: Swift startup/shutdown ---------------------------------- - -- Use reload - not stop/start/restart. - -- Try to roll sets of servers (especially proxy) in groups of less - than 20% of your servers. diff --git a/doc/source/ops_runbook/maintenance.rst b/doc/source/ops_runbook/maintenance.rst index b3c9e582ac..f0eaeb2806 100644 --- a/doc/source/ops_runbook/maintenance.rst +++ b/doc/source/ops_runbook/maintenance.rst @@ -54,8 +54,8 @@ system. Rules-of-thumb for 'good' recon output are: .. code:: - \-> [http://.29:6000/recon/load:] - \-> [http://.31:6000/recon/load:] + -> [http://.29:6000/recon/load:] + -> [http://.31:6000/recon/load:] - That could be okay or could require investigation. @@ -154,18 +154,18 @@ Running reccon shows some async pendings: .. code:: - bob@notso:~/swift-1.4.4/swift$ ssh \\-q .132.7 sudo swift-recon \\-alr + bob@notso:~/swift-1.4.4/swift$ ssh -q .132.7 sudo swift-recon -alr =============================================================================== - \[2012-03-14 17:25:55\\] Checking async pendings on 384 hosts... + [2012-03-14 17:25:55] Checking async pendings on 384 hosts... Async stats: low: 0, high: 23, avg: 8, total: 3356 =============================================================================== - \[2012-03-14 17:25:55\\] Checking replication times on 384 hosts... - \[Replication Times\\] shortest: 1.49303831657, longest: 39.6982825994, avg: 4.2418222066 + [2012-03-14 17:25:55] Checking replication times on 384 hosts... + [Replication Times] shortest: 1.49303831657, longest: 39.6982825994, avg: 4.2418222066 =============================================================================== - \[2012-03-14 17:25:56\\] Checking load avg's on 384 hosts... - \[5m load average\\] lowest: 2.35, highest: 8.88, avg: 4.45911458333 - \[15m load average\\] lowest: 2.41, highest: 9.11, avg: 4.504765625 - \[1m load average\\] lowest: 1.95, highest: 8.56, avg: 4.40588541667 + [2012-03-14 17:25:56] Checking load avg's on 384 hosts... + [5m load average] lowest: 2.35, highest: 8.88, avg: 4.45911458333 + [15m load average] lowest: 2.41, highest: 9.11, avg: 4.504765625 + [1m load average] lowest: 1.95, highest: 8.56, avg: 4.40588541667 =============================================================================== Why? Running recon again with -av swift (not shown here) tells us that @@ -231,7 +231,7 @@ Procedure This procedure should be run three times, each time specifying the appropriate ``*.builder`` file. -#. Determine whether all three nodes are different Swift zones by +#. Determine whether all three nodes are in different Swift zones by running the ring builder on a proxy node to determine which zones the storage nodes are in. For example: @@ -241,22 +241,22 @@ Procedure /etc/swift/object.builder, build version 1467 2097152 partitions, 3 replicas, 5 zones, 1320 devices, 0.02 balance The minimum number of hours before a partition can be reassigned is 24 - Devices: id zone ip address port name weight partitions balance meta - 0 1 .4 6000 disk0 1708.00 4259 -0.00 - 1 1 .4 6000 disk1 1708.00 4260 0.02 - 2 1 .4 6000 disk2 1952.00 4868 0.01 - 3 1 .4 6000 disk3 1952.00 4868 0.01 - 4 1 .4 6000 disk4 1952.00 4867 -0.01 + Devices: id zone ip address port name weight partitions balance meta + 0 1 .4 6000 disk0 1708.00 4259 -0.00 + 1 1 .4 6000 disk1 1708.00 4260 0.02 + 2 1 .4 6000 disk2 1952.00 4868 0.01 + 3 1 .4 6000 disk3 1952.00 4868 0.01 + 4 1 .4 6000 disk4 1952.00 4867 -0.01 #. Here, node .4 is in zone 1. If two or more of the three nodes under consideration are in the same Swift zone, they do not have any ring partitions in common; there is little/no data availability risk if all three nodes are down. -#. If the nodes are in three distinct Swift zonesit is necessary to +#. If the nodes are in three distinct Swift zones it is necessary to whether the nodes have ring partitions in common. Run ``swift-ring`` builder again, this time with the ``list_parts`` option and specify - the nodes under consideration. For example (all on one line): + the nodes under consideration. For example: .. code:: @@ -302,12 +302,12 @@ Procedure .. code:: - % sudo swift-ring-builder /etc/swift/object.builder list_parts .8 .15 .72.2 | grep “3$” - wc \\-l + % sudo swift-ring-builder /etc/swift/object.builder list_parts .8 .15 .72.2 | grep "3$" | wc -l 30 #. In this case the nodes have 30 out of a total of 2097152 partitions - in common; about 0.001%. In this case the risk is small nonzero. + in common; about 0.001%. In this case the risk is small/nonzero. Recall that a partition is simply a portion of the ring mapping space, not actual data. So having partitions in common is a necessary but not sufficient condition for data unavailability. @@ -320,3 +320,11 @@ Procedure If three nodes that have 3 partitions in common are all down, there is a nonzero probability that data are unavailable and we should work to bring some or all of the nodes up ASAP. + +Swift startup/shutdown +~~~~~~~~~~~~~~~~~~~~~~ + +- Use reload - not stop/start/restart. + +- Try to roll sets of servers (especially proxy) in groups of less + than 20% of your servers. \ No newline at end of file diff --git a/doc/source/ops_runbook/procedures.rst b/doc/source/ops_runbook/procedures.rst index 899df6d694..8a28dc5281 100644 --- a/doc/source/ops_runbook/procedures.rst +++ b/doc/source/ops_runbook/procedures.rst @@ -2,6 +2,8 @@ Software configuration procedures ================================= +.. _fix_broken_gpt_table: + Fix broken GPT table (broken disk partition) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -102,6 +104,8 @@ Fix broken GPT table (broken disk partition) $ sudo aptitude remove gdisk +.. _fix_broken_xfs_filesystem: + Procedure: Fix broken XFS filesystem ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -165,7 +169,7 @@ Procedure: Fix broken XFS filesystem .. code:: - $ sudo dd if=/dev/zero of=/dev/sdb2 bs=$((1024\*1024)) count=1 + $ sudo dd if=/dev/zero of=/dev/sdb2 bs=$((1024*1024)) count=1 1+0 records in 1+0 records out 1048576 bytes (1.0 MB) copied, 0.00480617 s, 218 MB/s @@ -187,129 +191,173 @@ Procedure: Fix broken XFS filesystem $ mount +.. _checking_if_account_ok: + Procedure: Checking if an account is okay ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. note:: ``swift-direct`` is only available in the HPE Helion Public Cloud. - Use ``swiftly`` as an alternate. + Use ``swiftly`` as an alternate (or use ``swift-get-nodes`` as explained + here). -If you have a tenant ID you can check the account is okay as follows from a proxy. +You must know the tenant/project ID. You can check if the account is okay as follows from a proxy. .. code:: - $ sudo -u swift /opt/hp/swift/bin/swift-direct show + $ sudo -u swift /opt/hp/swift/bin/swift-direct show AUTH_ The response will either be similar to a swift list of the account containers, or an error indicating that the resource could not be found. -In the latter case you can establish if a backend database exists for -the tenantId by running the following on a proxy: +Alternatively, you can use ``swift-get-nodes`` to find the account database +files. Run the following on a proxy: .. code:: - $ sudo -u swift swift-get-nodes /etc/swift/account.ring.gz + $ sudo swift-get-nodes /etc/swift/account.ring.gz AUTH_ -The response will list ssh commands that will list the replicated -account databases, if they exist. +The response will print curl/ssh commands that will list the replicated +account databases. Use the indicated ``curl`` or ``ssh`` commands to check +the status and existence of the account. + +Procedure: Getting swift account stats +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. note:: + + ``swift-direct`` is specific to the HPE Helion Public Cloud. Go look at + ``swifty`` for an alternate or use ``swift-get-nodes`` as explained + in :ref:`checking_if_account_ok`. + +This procedure describes how you determine the swift usage for a given +swift account, that is the number of containers, number of objects and +total bytes used. To do this you will need the project ID. + +Log onto one of the swift proxy servers. + +Use swift-direct to show this accounts usage: + +.. code:: + + $ sudo -u swift /opt/hp/swift/bin/swift-direct show AUTH_ + Status: 200 + Content-Length: 0 + Accept-Ranges: bytes + X-Timestamp: 1379698586.88364 + X-Account-Bytes-Used: 67440225625994 + X-Account-Container-Count: 1 + Content-Type: text/plain; charset=utf-8 + X-Account-Object-Count: 8436776 + Status: 200 + name: my_container count: 8436776 bytes: 67440225625994 + +This account has 1 container. That container has 8436776 objects. The +total bytes used is 67440225625994. Procedure: Revive a deleted account ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Swift accounts are normally not recreated. If a tenant unsubscribes from -Swift, the account is deleted. To re-subscribe to Swift, you can create -a new tenant (new tenant ID), and subscribe to Swift. This creates a -new Swift account with the new tenant ID. +Swift accounts are normally not recreated. If a tenant/project is deleted, +the account can then be deleted. If the user wishes to use Swift again, +the normal process is to create a new tenant/project -- and hence a +new Swift account. -However, until the unsubscribe/new tenant process is supported, you may -hit a situation where a Swift account is deleted and the user is locked -out of Swift. +However, if the Swift account is deleted, but the tenant/project is not +deleted from Keystone, the user can no longer access the account. This +is because the account is marked deleted in Swift. You can revive +the account as described in this process. -Deleting the account database files ------------------------------------ +.. note:: -Here is one possible solution. The containers and objects may be lost -forever. The solution is to delete the account database files and -re-create the account. This may only be done once the containers and -objects are completely deleted. This process is untested, but could -work as follows: + The containers and objects in the "old" account cannot be listed + anymore. In addition, if the Account Reaper process has not + finished reaping the containers and objects in the "old" account, these + are effectively orphaned and it is virtually impossible to find and delete + them to free up disk space. -#. Use swift-get-nodes to locate the account's database file (on three - servers). +The solution is to delete the account database files and +re-create the account as follows: -#. Rename the database files (on three servers). +#. You must know the tenant/project ID. The account name is AUTH_. + In this example, the tenant/project is is ``4ebe3039674d4864a11fe0864ae4d905`` + so the Swift account name is ``AUTH_4ebe3039674d4864a11fe0864ae4d905``. -#. Use ``swiftly`` to create the account (use original name). - -Renaming account database so it can be revived ----------------------------------------------- - -Get the locations of the database files that hold the account data. +#. Use ``swift-get-nodes`` to locate the account's database files (on three + servers). The output has been truncated so we can focus on the import pieces + of data: .. code:: - sudo swift-get-nodes /etc/swift/account.ring.gz AUTH_redacted-1856-44ae-97db-31242f7ad7a1 + $ sudo swift-get-nodes /etc/swift/account.ring.gz AUTH_4ebe3039674d4864a11fe0864ae4d905 + ... + curl -I -XHEAD "http://192.168.245.5:6002/disk1/3934/AUTH_4ebe3039674d4864a11fe0864ae4d905" + curl -I -XHEAD "http://192.168.245.3:6002/disk0/3934/AUTH_4ebe3039674d4864a11fe0864ae4d905" + curl -I -XHEAD "http://192.168.245.4:6002/disk1/3934/AUTH_4ebe3039674d4864a11fe0864ae4d905" + ... + Use your own device location of servers: + such as "export DEVICE=/srv/node" + ssh 192.168.245.5 "ls -lah ${DEVICE:-/srv/node*}/disk1/accounts/3934/052/f5ecf8b40de3e1b0adb0dbe576874052" + ssh 192.168.245.3 "ls -lah ${DEVICE:-/srv/node*}/disk0/accounts/3934/052/f5ecf8b40de3e1b0adb0dbe576874052" + ssh 192.168.245.4 "ls -lah ${DEVICE:-/srv/node*}/disk1/accounts/3934/052/f5ecf8b40de3e1b0adb0dbe576874052" + ... + note: `/srv/node*` is used as default value of `devices`, the real value is set in the config file on each storage node. - Account AUTH_redacted-1856-44ae-97db-31242f7ad7a1 - Container None - Object None +#. Before proceeding check that the account is really deleted by using curl. Execute the + commands printed by ``swift-get-nodes``. For example: - Partition 18914 + .. code:: - Hash 93c41ef56dd69173a9524193ab813e78 + $ curl -I -XHEAD "http://192.168.245.5:6002/disk1/3934/AUTH_4ebe3039674d4864a11fe0864ae4d905" + HTTP/1.1 404 Not Found + Content-Length: 0 + Content-Type: text/html; charset=utf-8 - Server:Port Device 15.184.9.126:6002 disk7 - Server:Port Device 15.184.9.94:6002 disk11 - Server:Port Device 15.184.9.103:6002 disk10 - Server:Port Device 15.184.9.80:6002 disk2 [Handoff] - Server:Port Device 15.184.9.120:6002 disk2 [Handoff] - Server:Port Device 15.184.9.98:6002 disk2 [Handoff] + Repeat for the other two servers (192.168.245.3 and 192.168.245.4). + A ``404 Not Found`` indicates that the account is deleted (or never existed). - curl -I -XHEAD "`*http://15.184.9.126:6002/disk7/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* `_ - curl -I -XHEAD "`*http://15.184.9.94:6002/disk11/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* `_ + If you get a ``204 No Content`` response, do **not** proceed. - curl -I -XHEAD "`*http://15.184.9.103:6002/disk10/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* `_ +#. Use the ssh commands printed by ``swift-get-nodes`` to check if database + files exist. For example: - curl -I -XHEAD "`*http://15.184.9.80:6002/disk2/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* `_ # [Handoff] - curl -I -XHEAD "`*http://15.184.9.120:6002/disk2/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* `_ # [Handoff] - curl -I -XHEAD "`*http://15.184.9.98:6002/disk2/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* `_ # [Handoff] + .. code:: - ssh 15.184.9.126 "ls -lah /srv/node/disk7/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/" - ssh 15.184.9.94 "ls -lah /srv/node/disk11/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/" - ssh 15.184.9.103 "ls -lah /srv/node/disk10/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/" - ssh 15.184.9.80 "ls -lah /srv/node/disk2/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/" # [Handoff] - ssh 15.184.9.120 "ls -lah /srv/node/disk2/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/" # [Handoff] - ssh 15.184.9.98 "ls -lah /srv/node/disk2/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/" # [Handoff] + $ ssh 192.168.245.5 "ls -lah ${DEVICE:-/srv/node*}/disk1/accounts/3934/052/f5ecf8b40de3e1b0adb0dbe576874052" + total 20K + drwxr-xr-x 2 swift swift 110 Mar 9 10:22 . + drwxr-xr-x 3 swift swift 45 Mar 9 10:18 .. + -rw------- 1 swift swift 17K Mar 9 10:22 f5ecf8b40de3e1b0adb0dbe576874052.db + -rw-r--r-- 1 swift swift 0 Mar 9 10:22 f5ecf8b40de3e1b0adb0dbe576874052.db.pending + -rwxr-xr-x 1 swift swift 0 Mar 9 10:18 .lock - $ sudo swift-get-nodes /etc/swift/account.ring.gz AUTH\_redacted-1856-44ae-97db-31242f7ad7a1Account AUTH_redacted-1856-44ae-97db- - 31242f7ad7a1Container NoneObject NonePartition 18914Hash 93c41ef56dd69173a9524193ab813e78Server:Port Device 15.184.9.126:6002 disk7Server:Port Device 15.184.9.94:6002 disk11Server:Port Device 15.184.9.103:6002 disk10Server:Port Device 15.184.9.80:6002 - disk2 [Handoff]Server:Port Device 15.184.9.120:6002 disk2 [Handoff]Server:Port Device 15.184.9.98:6002 disk2 [Handoff]curl -I -XHEAD - "`*http://15.184.9.126:6002/disk7/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"*`_ curl -I -XHEAD + Repeat for the other two servers (192.168.245.3 and 192.168.245.4). - "`*http://15.184.9.94:6002/disk11/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* `_ curl -I -XHEAD + If no files exist, no further action is needed. - "`*http://15.184.9.103:6002/disk10/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* `_ curl -I -XHEAD +#. Stop Swift processes on all nodes listed by ``swift-get-nodes`` + (In this example, that is 192.168.245.3, 192.168.245.4 and 192.168.245.5). - "`*http://15.184.9.80:6002/disk2/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* `_ # [Handoff]curl -I -XHEAD +#. We recommend you make backup copies of the database files. - "`*http://15.184.9.120:6002/disk2/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* `_ # [Handoff]curl -I -XHEAD +#. Delete the database files. For example: - "`*http://15.184.9.98:6002/disk2/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* `_ # [Handoff]ssh 15.184.9.126 + .. code:: - "ls -lah /srv/node/disk7/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/"ssh 15.184.9.94 "ls -lah /srv/node/disk11/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/"ssh 15.184.9.103 - "ls -lah /srv/node/disk10/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/"ssh 15.184.9.80 "ls -lah /srv/node/disk2/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/" # [Handoff]ssh 15.184.9.120 - "ls -lah /srv/node/disk2/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/" # [Handoff]ssh 15.184.9.98 "ls -lah /srv/node/disk2/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/" # [Handoff] + $ ssh 192.168.245.5 + $ cd /srv/node/disk1/accounts/3934/052/f5ecf8b40de3e1b0adb0dbe576874052 + $ sudo rm * -Check that the handoff nodes do not have account databases: + Repeat for the other two servers (192.168.245.3 and 192.168.245.4). -.. code:: +#. Restart Swift on all three servers - $ ssh 15.184.9.80 "ls -lah /srv/node/disk2/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/" - ls: cannot access /srv/node/disk2/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/: No such file or directory +At this stage, the account is fully deleted. If you enable the auto-create option, the +next time the user attempts to access the account, the account will be created. +You may also use swiftly to recreate the account. -If the handoff node has a database, wait for rebalancing to occur. Procedure: Temporarily stop load balancers from directing traffic to a proxy server ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -319,7 +367,7 @@ follows. This can be useful when a proxy is misbehaving but you need Swift running to help diagnose the problem. By removing from the load balancers, customer's are not impacted by the misbehaving proxy. -#. Ensure that in proxyserver.com the ``disable_path`` variable is set to +#. Ensure that in /etc/swift/proxy-server.conf the ``disable_path`` variable is set to ``/etc/swift/disabled-by-file``. #. Log onto the proxy node. @@ -330,9 +378,9 @@ balancers, customer's are not impacted by the misbehaving proxy. sudo swift-init proxy shutdown - .. note:: + .. note:: - Shutdown, not stop. + Shutdown, not stop. #. Create the ``/etc/swift/disabled-by-file`` file. For example: @@ -346,13 +394,10 @@ balancers, customer's are not impacted by the misbehaving proxy. sudo swift-init proxy start -It works because the healthcheck middleware looks for this file. If it -find it, it will return 503 error instead of 200/OK. This means the load balancer +It works because the healthcheck middleware looks for /etc/swift/disabled-by-file. +If it exists, the middleware will return 503/error instead of 200/OK. This means the load balancer should stop sending traffic to the proxy. -``/healthcheck`` will report -``FAIL: disabled by file`` if the ``disabled-by-file`` file exists. - Procedure: Ad-Hoc disk performance test ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/ops_runbook/sec-furtherdiagnose.rst b/doc/source/ops_runbook/sec-furtherdiagnose.rst deleted file mode 100644 index dd8154a3d9..0000000000 --- a/doc/source/ops_runbook/sec-furtherdiagnose.rst +++ /dev/null @@ -1,177 +0,0 @@ -============================== -Further issues and resolutions -============================== - -.. note:: - - The urgency levels in each **Action** column indicates whether or - not it is required to take immediate action, or if the problem can be worked - on during business hours. - -.. list-table:: - :widths: 33 33 33 - :header-rows: 1 - - * - **Scenario** - - **Description** - - **Action** - * - ``/healthcheck`` latency is high. - - The ``/healthcheck`` test does not tax the proxy very much so any drop in value is probably related to - network issues, rather than the proxies being very busy. A very slow proxy might impact the average - number, but it would need to be very slow to shift the number that much. - - Check networks. Do a ``curl https:///healthcheck where ip-address`` is individual proxy - IP address to see if you can pin point a problem in the network. - - Urgency: If there are other indications that your system is slow, you should treat - this as an urgent problem. - * - Swift process is not running. - - You can use ``swift-init`` status to check if swift processes are running on any - given server. - - Run this command: - .. code:: - - sudo swift-init all start - - Examine messages in the swift log files to see if there are any - error messages related to any of the swift processes since the time you - ran the ``swift-init`` command. - - Take any corrective actions that seem necessary. - - Urgency: If this only affects one server, and you have more than one, - identifying and fixing the problem can wait until business hours. - If this same problem affects many servers, then you need to take corrective - action immediately. - * - ntpd is not running. - - NTP is not running. - - Configure and start NTP. - Urgency: For proxy servers, this is vital. - - * - Host clock is not syncd to an NTP server. - - Node time settings does not match NTP server time. - This may take some time to sync after a reboot. - - Assuming NTP is configured and running, you have to wait until the times sync. - * - A swift process has hundreds, to thousands of open file descriptors. - - May happen to any of the swift processes. - Known to have happened with a ``rsyslod restart`` and where ``/tmp`` was hanging. - - - Restart the swift processes on the affected node: - - .. code:: - - % sudo swift-init all reload - - Urgency: - If known performance problem: Immediate - - If system seems fine: Medium - * - A swift process is not owned by the swift user. - - If the UID of the swift user has changed, then the processes might not be - owned by that UID. - - Urgency: If this only affects one server, and you have more than one, - identifying and fixing the problem can wait until business hours. - If this same problem affects many servers, then you need to take corrective - action immediately. - * - Object account or container files not owned by swift. - - This typically happens if during a reinstall or a re-image of a server that the UID - of the swift user was changed. The data files in the object account and container - directories are owned by the original swift UID. As a result, the current swift - user does not own these files. - - Correct the UID of the swift user to reflect that of the original UID. An alternate - action is to change the ownership of every file on all file systems. This alternate - action is often impractical and will take considerable time. - - Urgency: If this only affects one server, and you have more than one, - identifying and fixing the problem can wait until business hours. - If this same problem affects many servers, then you need to take corrective - action immediately. - * - A disk drive has a high IO wait or service time. - - If high wait IO times are seen for a single disk, then the disk drive is the problem. - If most/all devices are slow, the controller is probably the source of the problem. - The controller cache may also be miss configured – which will cause similar long - wait or service times. - - As a first step, if your controllers have a cache, check that it is enabled and their battery/capacitor - is working. - - Second, reboot the server. - If problem persists, file a DC ticket to have the drive or controller replaced. - See `Diagnose: Slow disk devices` on how to check the drive wait or service times. - - Urgency: Medium - * - The network interface is not up. - - Use the ``ifconfig`` and ``ethtool`` commands to determine the network state. - - You can try restarting the interface. However, generally the interface - (or cable) is probably broken, especially if the interface is flapping. - - Urgency: If this only affects one server, and you have more than one, - identifying and fixing the problem can wait until business hours. - If this same problem affects many servers, then you need to take corrective - action immediately. - * - Network interface card (NIC) is not operating at the expected speed. - - The NIC is running at a slower speed than its nominal rated speed. - For example, it is running at 100 Mb/s and the NIC is a 1Ge NIC. - - 1. Try resetting the interface with: - - .. code:: - - sudo ethtool -s eth0 speed 1000 - - ... and then run: - - .. code:: - - sudo lshw -class - - See if size goes to the expected speed. Failing - that, check hardware (NIC cable/switch port). - - 2. If persistent, consider shutting down the server (especially if a proxy) - until the problem is identified and resolved. If you leave this server - running it can have a large impact on overall performance. - - Urgency: High - * - The interface RX/TX error count is non-zero. - - A value of 0 is typical, but counts of 1 or 2 do not indicate a problem. - - 1. For low numbers (For example, 1 or 2), you can simply ignore. Numbers in the range - 3-30 probably indicate that the error count has crept up slowly over a long time. - Consider rebooting the server to remove the report from the noise. - - Typically, when a cable or interface is bad, the error count goes to 400+. For example, - it stands out. There may be other symptoms such as the interface going up and down or - not running at correct speed. A server with a high error count should be watched. - - 2. If the error count continue to climb, consider taking the server down until - it can be properly investigated. In any case, a reboot should be done to clear - the error count. - - Urgency: High, if the error count increasing. - - * - In a swift log you see a message that a process has not replicated in over 24 hours. - - The replicator has not successfully completed a run in the last 24 hours. - This indicates that the replicator has probably hung. - - Use ``swift-init`` to stop and then restart the replicator process. - - Urgency: Low (high if recent adding or replacement of disk drives), however if you - recently added or replaced disk drives then you should treat this urgently. - * - Container Updater has not run in 4 hour(s). - - The service may appear to be running however, it may be hung. Examine their swift - logs to see if there are any error messages relating to the container updater. This - may potentially explain why the container is not running. - - Urgency: Medium - This may have been triggered by a recent restart of the rsyslog daemon. - Restart the service with: - .. code:: - - sudo swift-init reload - * - Object replicator: Reports the remaining time and that time is more than 100 hours. - - Each replication cycle the object replicator writes a log message to its log - reporting statistics about the current cycle. This includes an estimate for the - remaining time needed to replicate all objects. If this time is longer than - 100 hours, there is a problem with the replication process. - - Urgency: Medium - Restart the service with: - .. code:: - - sudo swift-init object-replicator reload - - Check that the remaining replication time is going down. diff --git a/doc/source/ops_runbook/troubleshooting.rst b/doc/source/ops_runbook/troubleshooting.rst index d097ce0673..01c55899b7 100644 --- a/doc/source/ops_runbook/troubleshooting.rst +++ b/doc/source/ops_runbook/troubleshooting.rst @@ -18,16 +18,14 @@ files. For example: .. code:: - $ PDSH_SSH_ARGS_APPEND="-o StrictHostKeyChecking=no" pdsh -l -R ssh - - -w .68.[4-11,132-139 4-11,132-139],.132.[4-11,132-139 - 4-11,132-139] 'sudo bzgrep -w AUTH_redacted-4962-4692-98fb-52ddda82a5af /var/log/swift/proxy.log\*' - dshbak -c + $ PDSH_SSH_ARGS_APPEND="-o StrictHostKeyChecking=no" pdsh -l -R ssh \ + -w .68.[4-11,132-139 4-11,132-139],.132.[4-11,132-139] \ + 'sudo bzgrep -w AUTH_redacted-4962-4692-98fb-52ddda82a5af /var/log/swift/proxy.log*' | dshbak -c . . - \---------------\- + ---------------- .132.6 - \---------------\- + ---------------- Feb 29 08:51:57 sw-aw2az2-proxy011 proxy-server .16.132 .66.8 29/Feb/2012/08/51/57 GET /v1.0/AUTH_redacted-4962-4692-98fb-52ddda82a5af /%3Fformat%3Djson HTTP/1.0 404 - - _4f4d50c5e4b064d88bd7ab82 - - - @@ -37,52 +35,49 @@ This shows a ``GET`` operation on the users account. .. note:: - The HTTP status returned is 404, not found, rather than 500 as reported by the user. + The HTTP status returned is 404, Not found, rather than 500 as reported by the user. Using the transaction ID, ``tx429fc3be354f434ab7f9c6c4206c1dc3`` you can search the swift object servers log files for this transaction ID: .. code:: - $ PDSH_SSH_ARGS_APPEND="-o StrictHostKeyChecking=no" pdsh -l - - -R ssh - -w .72.[4-67|4-67],.[4-67|4-67],.[4-67|4-67],.204.[4-131| 4-131] - 'sudo bzgrep tx429fc3be354f434ab7f9c6c4206c1dc3 /var/log/swift/server.log*' - | dshbak -c + $ PDSH_SSH_ARGS_APPEND="-o StrictHostKeyChecking=no" pdsh -l -R ssh \ + -w .72.[4-67|4-67],.[4-67|4-67],.[4-67|4-67],.204.[4-131] \ + 'sudo bzgrep tx429fc3be354f434ab7f9c6c4206c1dc3 /var/log/swift/server.log*' | dshbak -c . . - \---------------\- + ---------------- .72.16 - \---------------\- + ---------------- Feb 29 08:51:57 sw-aw2az1-object013 account-server .132.6 - - [29/Feb/2012:08:51:57 +0000|] "GET /disk9/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af" 404 - "tx429fc3be354f434ab7f9c6c4206c1dc3" "-" "-" 0.0016 "" - \---------------\- - .31 - \---------------\- - Feb 29 08:51:57 node-az2-object060 account-server .132.6 - - - [29/Feb/2012:08:51:57 +0000|] "GET /disk6/198875/AUTH_redacted-4962- - 4692-98fb-52ddda82a5af" 404 - "tx429fc3be354f434ab7f9c6c4206c1dc3" "-" "-" 0.0011 "" - \---------------\- - .204.70 - \---------------\- + ---------------- + .31 + ---------------- + Feb 29 08:51:57 node-az2-object060 account-server .132.6 - - + [29/Feb/2012:08:51:57 +0000|] "GET /disk6/198875/AUTH_redacted-4962- + 4692-98fb-52ddda82a5af" 404 - "tx429fc3be354f434ab7f9c6c4206c1dc3" "-" "-" 0.0011 "" + ---------------- + .204.70 + ---------------- - Feb 29 08:51:57 sw-aw2az3-object0067 account-server .132.6 - - - [29/Feb/2012:08:51:57 +0000|] "GET /disk6/198875/AUTH_redacted-4962- - 4692-98fb-52ddda82a5af" 404 - "tx429fc3be354f434ab7f9c6c4206c1dc3" "-" "-" 0.0014 "" + Feb 29 08:51:57 sw-aw2az3-object0067 account-server .132.6 - - + [29/Feb/2012:08:51:57 +0000|] "GET /disk6/198875/AUTH_redacted-4962- + 4692-98fb-52ddda82a5af" 404 - "tx429fc3be354f434ab7f9c6c4206c1dc3" "-" "-" 0.0014 "" .. note:: The 3 GET operations to 3 different object servers that hold the 3 replicas of this users account. Each ``GET`` returns a HTTP status of 404, - not found. + Not found. Next, use the ``swift-get-nodes`` command to determine exactly where the -users account data is stored: +user's account data is stored: .. code:: @@ -114,23 +109,23 @@ users account data is stored: curl -I -XHEAD "`http://.72.27:6002/disk11/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af" `_ # [Handoff] - ssh .31 "ls \-lah /srv/node/disk6/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/" - ssh .204.70 "ls \-lah /srv/node/disk6/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/" - ssh .72.16 "ls \-lah /srv/node/disk9/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/" - ssh .204.64 "ls \-lah /srv/node/disk11/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/" # [Handoff] - ssh .26 "ls \-lah /srv/node/disk11/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/" # [Handoff] - ssh .72.27 "ls \-lah /srv/node/disk11/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/" # [Handoff] + ssh .31 "ls -lah /srv/node/disk6/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/" + ssh .204.70 "ls -lah /srv/node/disk6/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/" + ssh .72.16 "ls -lah /srv/node/disk9/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/" + ssh .204.64 "ls -lah /srv/node/disk11/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/" # [Handoff] + ssh .26 "ls -lah /srv/node/disk11/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/" # [Handoff] + ssh .72.27 "ls -lah /srv/node/disk11/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/" # [Handoff] Check each of the primary servers, .31, .204.70 and .72.16, for this users account. For example on .72.16: .. code:: - $ ls \\-lah /srv/node/disk9/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/ + $ ls -lah /srv/node/disk9/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/ total 1.0M drwxrwxrwx 2 swift swift 98 2012-02-23 14:49 . drwxrwxrwx 3 swift swift 45 2012-02-03 23:28 .. - -rw-\\-----\\- 1 swift swift 15K 2012-02-23 14:49 1846d99185f8a0edaf65cfbf37439696.db + -rw------- 1 swift swift 15K 2012-02-23 14:49 1846d99185f8a0edaf65cfbf37439696.db -rw-rw-rw- 1 swift swift 0 2012-02-23 14:49 1846d99185f8a0edaf65cfbf37439696.db.pending So this users account db, an sqlite db is present. Use sqlite to @@ -155,7 +150,7 @@ checkout the account: status_changed_at = 1330001026.00514 metadata = -.. note:: +.. note: The status is ``DELETED``. So this account was deleted. This explains why the GET operations are returning 404, not found. Check the account @@ -174,14 +169,14 @@ server logs: .. code:: - $ PDSH_SSH_ARGS_APPEND="-o StrictHostKeyChecking=no" pdsh -l -R ssh -w .68.[4-11,132-139 4-11,132- - 139],.132.[4-11,132-139|4-11,132-139] 'sudo bzgrep AUTH_redacted-4962-4692-98fb-52ddda82a5af /var/log/swift/proxy.log\* | grep -w - DELETE |awk "{print \\$3,\\$10,\\$12}"' |- dshbak -c + $ PDSH_SSH_ARGS_APPEND="-o StrictHostKeyChecking=no" pdsh -l -R ssh \ + -w .68.[4-11,132-139 4-11,132-139],.132.[4-11,132-139|4-11,132-139] \ + 'sudo bzgrep AUTH_redacted-4962-4692-98fb-52ddda82a5af /var/log/swift/proxy.log* \ + | grep -w DELETE | awk "{print $3,$10,$12}"' |- dshbak -c . . - Feb 23 12:43:46 sw-aw2az2-proxy001 proxy-server 15.203.233.76 .66.7 23/Feb/2012/12/43/46 DELETE /v1.0/AUTH_redacted-4962-4692-98fb- + Feb 23 12:43:46 sw-aw2az2-proxy001 proxy-server .66.7 23/Feb/2012/12/43/46 DELETE /v1.0/AUTH_redacted-4962-4692-98fb- 52ddda82a5af/ HTTP/1.0 204 - Apache-HttpClient/4.1.2%20%28java%201.5%29 _4f458ee4e4b02a869c3aad02 - - - - tx4471188b0b87406899973d297c55ab53 - 0.0086 From this you can see the operation that resulted in the account being deleted. @@ -252,8 +247,8 @@ Finally, use ``swift-direct`` to delete the container. Procedure: Decommissioning swift nodes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Should Swift nodes need to be decommissioned. For example, where they are being -re-purposed, it is very important to follow the following steps. +Should Swift nodes need to be decommissioned (e.g.,, where they are being +re-purposed), it is very important to follow the following steps. #. In the case of object servers, follow the procedure for removing the node from the rings. From 0bc5a69d416004924bd17c14d8455f65433751bd Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Thu, 10 Mar 2016 18:02:44 +0000 Subject: [PATCH 028/141] Wait on greenthreads before unmocking http_connect The fake_spawn() context manager wraps spawn and waits for greenthreads to complete (such as the async_update threads). The wait needs to be done before http_connect is un-mocked, so the fake_spawn context manager should exit *before* any context manager that mocks the http_connect method. Also add fake_spawn to _test_PUT_then_POST_async_pendings() Related-Bug: #1536376 Related-Bug: #1514111 Closes-Bug: #1555739 Change-Id: I15f36e191cfe3ee6c82b4be56e8618ec0230e328 --- test/unit/obj/test_server.py | 114 ++++++++++++++++++----------------- 1 file changed, 60 insertions(+), 54 deletions(-) diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py index ed8f6ce5d2..83c4e98838 100755 --- a/test/unit/obj/test_server.py +++ b/test/unit/obj/test_server.py @@ -729,9 +729,10 @@ class TestObjectController(unittest.TestCase): environ={'REQUEST_METHOD': 'PUT'}, headers=put_headers, body='test') - with mock.patch('swift.obj.server.http_connect', fake_http_connect): - with mock.patch('swift.common.utils.HASH_PATH_PREFIX', ''): - resp = req.get_response(self.object_controller) + with mock.patch('swift.obj.server.http_connect', fake_http_connect), \ + mock.patch('swift.common.utils.HASH_PATH_PREFIX', ''), \ + fake_spawn(): + resp = req.get_response(self.object_controller) self.assertEqual(resp.status_int, 201) @@ -772,9 +773,10 @@ class TestObjectController(unittest.TestCase): environ={'REQUEST_METHOD': 'POST'}, headers=post_headers) - with mock.patch('swift.obj.server.http_connect', fake_http_connect): - with mock.patch('swift.common.utils.HASH_PATH_PREFIX', ''): - resp = req.get_response(self.object_controller) + with mock.patch('swift.obj.server.http_connect', fake_http_connect), \ + mock.patch('swift.common.utils.HASH_PATH_PREFIX', ''), \ + fake_spawn(): + resp = req.get_response(self.object_controller) self.assertEqual(resp.status_int, 202) @@ -836,10 +838,10 @@ class TestObjectController(unittest.TestCase): os.listdir(os.path.join( device_dir, diskfile.get_async_dir(policy)))) - def test_PUT_then_POST_async_updates_with_repl_policy(self): + def test_PUT_then_POST_async_pendings_with_repl_policy(self): self._test_PUT_then_POST_async_pendings(POLICIES[0]) - def test_PUT_then_POST_async_updates_with_EC_policy(self): + def test_PUT_then_POST_async_pendings_with_EC_policy(self): self._test_PUT_then_POST_async_pendings( POLICIES[1], update_etag='override_etag') @@ -1964,10 +1966,10 @@ class TestObjectController(unittest.TestCase): 'X-Container-Timestamp': '1', 'Content-Type': 'application/new1', 'Content-Length': '0'}) - with fake_spawn(), mock.patch.object( - object_server, 'http_connect', - mock_http_connect(201)): - resp = req.get_response(self.object_controller) + with mock.patch.object( + object_server, 'http_connect', mock_http_connect(201)): + with fake_spawn(): + resp = req.get_response(self.object_controller) self.assertEqual(resp.status_int, 201) timestamp = normalize_timestamp(time()) req = Request.blank( @@ -1980,10 +1982,10 @@ class TestObjectController(unittest.TestCase): 'X-Container-Timestamp': '1', 'Content-Type': 'application/new1', 'Content-Length': '0'}) - with fake_spawn(), mock.patch.object( - object_server, 'http_connect', - mock_http_connect(500)): - resp = req.get_response(self.object_controller) + with mock.patch.object( + object_server, 'http_connect', mock_http_connect(500)): + with fake_spawn(): + resp = req.get_response(self.object_controller) self.assertEqual(resp.status_int, 201) timestamp = normalize_timestamp(time()) req = Request.blank( @@ -1996,10 +1998,11 @@ class TestObjectController(unittest.TestCase): 'X-Container-Timestamp': '1', 'Content-Type': 'application/new1', 'Content-Length': '0'}) - with fake_spawn(), mock.patch.object( + with mock.patch.object( object_server, 'http_connect', mock_http_connect(500, with_exc=True)): - resp = req.get_response(self.object_controller) + with fake_spawn(): + resp = req.get_response(self.object_controller) self.assertEqual(resp.status_int, 201) def test_PUT_ssync_multi_frag(self): @@ -3198,10 +3201,10 @@ class TestObjectController(unittest.TestCase): 'X-Container-Device': 'sda1', 'X-Container-Partition': 'p', 'Content-Type': 'text/plain'}) - with fake_spawn(), mocked_http_conn( - 200, give_connect=capture_updates) as fake_conn: - resp = req.get_response(self.object_controller) - self.assertRaises(StopIteration, fake_conn.code_iter.next) + with mocked_http_conn(200, give_connect=capture_updates) as fake_conn: + with fake_spawn(): + resp = req.get_response(self.object_controller) + self.assertRaises(StopIteration, fake_conn.code_iter.next) self.assertEqual(resp.status_int, 201) self.assertEqual(1, len(container_updates)) for update in container_updates: @@ -3237,10 +3240,10 @@ class TestObjectController(unittest.TestCase): 'X-Container-Device': 'sda1', 'X-Container-Partition': 'p', 'Content-Type': 'text/html'}) - with fake_spawn(), mocked_http_conn( - 200, give_connect=capture_updates) as fake_conn: - resp = req.get_response(self.object_controller) - self.assertRaises(StopIteration, fake_conn.code_iter.next) + with mocked_http_conn(200, give_connect=capture_updates) as fake_conn: + with fake_spawn(): + resp = req.get_response(self.object_controller) + self.assertRaises(StopIteration, fake_conn.code_iter.next) self.assertEqual(resp.status_int, 201) self.assertEqual(1, len(container_updates)) for update in container_updates: @@ -3275,10 +3278,10 @@ class TestObjectController(unittest.TestCase): 'X-Container-Device': 'sda1', 'X-Container-Partition': 'p', 'Content-Type': 'text/enriched'}) - with fake_spawn(), mocked_http_conn( - 200, give_connect=capture_updates) as fake_conn: - resp = req.get_response(self.object_controller) - self.assertRaises(StopIteration, fake_conn.code_iter.next) + with mocked_http_conn(200, give_connect=capture_updates) as fake_conn: + with fake_spawn(): + resp = req.get_response(self.object_controller) + self.assertRaises(StopIteration, fake_conn.code_iter.next) self.assertEqual(resp.status_int, 201) self.assertEqual(1, len(container_updates)) for update in container_updates: @@ -3313,10 +3316,10 @@ class TestObjectController(unittest.TestCase): 'X-Container-Host': '10.0.0.1:8080', 'X-Container-Device': 'sda1', 'X-Container-Partition': 'p'}) - with fake_spawn(), mocked_http_conn( - 200, give_connect=capture_updates) as fake_conn: - resp = req.get_response(self.object_controller) - self.assertRaises(StopIteration, fake_conn.code_iter.next) + with mocked_http_conn(200, give_connect=capture_updates) as fake_conn: + with fake_spawn(): + resp = req.get_response(self.object_controller) + self.assertRaises(StopIteration, fake_conn.code_iter.next) self.assertEqual(resp.status_int, 204) self.assertEqual(1, len(container_updates)) for update in container_updates: @@ -3344,10 +3347,10 @@ class TestObjectController(unittest.TestCase): 'X-Container-Host': '10.0.0.1:8080', 'X-Container-Device': 'sda1', 'X-Container-Partition': 'p'}) - with fake_spawn(), mocked_http_conn( - 200, give_connect=capture_updates) as fake_conn: - resp = req.get_response(self.object_controller) - self.assertRaises(StopIteration, fake_conn.code_iter.next) + with mocked_http_conn(200, give_connect=capture_updates) as fake_conn: + with fake_spawn(): + resp = req.get_response(self.object_controller) + self.assertRaises(StopIteration, fake_conn.code_iter.next) self.assertEqual(resp.status_int, 404) self.assertEqual(1, len(container_updates)) for update in container_updates: @@ -3813,9 +3816,10 @@ class TestObjectController(unittest.TestCase): 'X-Delete-At-Partition': '6237', 'X-Delete-At-Device': 'sdp,sdq'}) - with fake_spawn(), mock.patch.object( + with mock.patch.object( object_server, 'http_connect', fake_http_connect): - resp = req.get_response(self.object_controller) + with fake_spawn(): + resp = req.get_response(self.object_controller) self.assertEqual(resp.status_int, 201) @@ -3925,9 +3929,10 @@ class TestObjectController(unittest.TestCase): 'X-Container-Host': '1.2.3.4:5, 6.7.8.9:10', 'X-Container-Device': 'sdb1, sdf1'}) - with fake_spawn(), mock.patch.object( + with mock.patch.object( object_server, 'http_connect', fake_http_connect): - req.get_response(self.object_controller) + with fake_spawn(): + req.get_response(self.object_controller) http_connect_args.sort(key=operator.itemgetter('ipaddr')) @@ -4002,10 +4007,11 @@ class TestObjectController(unittest.TestCase): headers['X-Object-Sysmeta-Ec-Frag-Index'] = '2' req = Request.blank( '/sda1/p/a/c/o', method='PUT', body='', headers=headers) - with fake_spawn(), mocked_http_conn( + with mocked_http_conn( 500, 500, give_connect=capture_updates) as fake_conn: - resp = req.get_response(self.object_controller) - self.assertRaises(StopIteration, fake_conn.code_iter.next) + with fake_spawn(): + resp = req.get_response(self.object_controller) + self.assertRaises(StopIteration, fake_conn.code_iter.next) self.assertEqual(resp.status_int, 201) self.assertEqual(2, len(container_updates)) delete_at_update, container_update = container_updates @@ -4238,10 +4244,10 @@ class TestObjectController(unittest.TestCase): 'X-Container-Partition': 'cpartition', 'X-Container-Device': 'cdevice', 'Content-Type': 'text/plain'}, body='') - with fake_spawn(), mocked_http_conn( - 200, give_connect=capture_updates) as fake_conn: - resp = req.get_response(self.object_controller) - self.assertRaises(StopIteration, fake_conn.code_iter.next) + with mocked_http_conn(200, give_connect=capture_updates) as fake_conn: + with fake_spawn(): + resp = req.get_response(self.object_controller) + self.assertRaises(StopIteration, fake_conn.code_iter.next) self.assertEqual(resp.status_int, 201) self.assertEqual(len(container_updates), 1) ip, port, method, path, headers = container_updates[0] @@ -4279,10 +4285,10 @@ class TestObjectController(unittest.TestCase): } req = Request.blank('/sda1/0/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers=headers, body='') - with fake_spawn(), mocked_http_conn( - 200, give_connect=capture_updates) as fake_conn: - resp = req.get_response(self.object_controller) - self.assertRaises(StopIteration, fake_conn.code_iter.next) + with mocked_http_conn(200, give_connect=capture_updates) as fake_conn: + with fake_spawn(): + resp = req.get_response(self.object_controller) + self.assertRaises(StopIteration, fake_conn.code_iter.next) self.assertEqual(resp.status_int, 201) self.assertEqual(len(container_updates), 1) ip, port, method, path, headers = container_updates[0] @@ -4320,7 +4326,7 @@ class TestObjectController(unittest.TestCase): given_args[:] = args diskfile_mgr = self.object_controller._diskfile_router[policy] diskfile_mgr.pickle_async_update = fake_pickle_async_update - with fake_spawn(), mocked_http_conn(500) as fake_conn: + with mocked_http_conn(500) as fake_conn, fake_spawn(): resp = req.get_response(self.object_controller) self.assertRaises(StopIteration, fake_conn.code_iter.next) self.assertEqual(resp.status_int, 201) From 8aca4e8eb36c67038335462abdb9a58ee5263b5c Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 11 Mar 2016 06:22:54 +0000 Subject: [PATCH 029/141] Imported Translations from Zanata For more information about this automatic import see: https://wiki.openstack.org/wiki/Translations/Infrastructure Change-Id: Ic39cc9ffb582909da3a827243b628206de5301a0 --- swift/locale/ru/LC_MESSAGES/swift.po | 66 +++++++++++++++++++++++++--- 1 file changed, 60 insertions(+), 6 deletions(-) diff --git a/swift/locale/ru/LC_MESSAGES/swift.po b/swift/locale/ru/LC_MESSAGES/swift.po index 16fdce58e2..5e99b3d848 100644 --- a/swift/locale/ru/LC_MESSAGES/swift.po +++ b/swift/locale/ru/LC_MESSAGES/swift.po @@ -6,16 +6,17 @@ # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Filatov Sergey , 2016. #zanata +# Grigory Mokhin , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev176\n" +"Project-Id-Version: swift 2.6.1.dev203\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-08 04:09+0000\n" +"POT-Creation-Date: 2016-03-11 02:36+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-01-17 10:49+0000\n" -"Last-Translator: Filatov Sergey \n" +"PO-Revision-Date: 2016-03-10 07:41+0000\n" +"Last-Translator: Grigory Mokhin \n" "Language: ru\n" "Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n" @@ -235,6 +236,9 @@ msgstr "Клиент отключен во время чтения" msgid "Client disconnected without sending enough data" msgstr "Клиент отключен без отправки данных" +msgid "Client disconnected without sending last chunk" +msgstr "Клиент отключился, не отправив последний фрагмент данных" + #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" @@ -390,6 +394,11 @@ msgstr "Ошибка: ошибка закрытия DiskFile %(data_file)s: %(ex msgid "ERROR Exception causing client disconnect" msgstr "Ошибка. Исключительная ситуация при отключении клиента" +#, python-format +msgid "ERROR Exception transferring data to object servers %s" +msgstr "" +"ОШИБКА. Исключительная ситуация при передаче данных на серверы объектов %s" + msgid "ERROR Failed to get my own IPs?" msgstr "Ошибка: не удалось получить собственные IP-адреса?" @@ -568,6 +577,12 @@ msgstr "Ошибка синхронизации раздела" msgid "Error syncing with node: %s" msgstr "Ошибка синхронизации с узлом %s" +#, python-format +msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" +msgstr "" +"Ошибка при попытке перекомпоновки стратегии %(path)s: номер#%(policy)d " +"фрагмент#%(frag_index)s" + msgid "Error: An error occurred" msgstr "Ошибка: произошла ошибка" @@ -629,6 +644,13 @@ msgstr "Следующая цепочка CNAME для %(given_domain)s в %(fou msgid "Found configs:" msgstr "Обнаружены конфигурации:" +msgid "" +"Handoffs first mode still has handoffs remaining. Aborting current " +"replication pass." +msgstr "" +"В режиме передачи управления не все операции завершены. Принудительное " +"завершение текущего прохода репликации." + msgid "Host unreachable" msgstr "Хост недоступен" @@ -703,6 +725,10 @@ msgstr "Не устройстве %s (%s) закончилось место" msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Ограниченная ошибка узла %(ip)s:%(port)s (%(device)s)" +#, python-format +msgid "Not enough object servers ack'ed (got %d)" +msgstr "Недостаточное число подтверждений с серверов объектов (получено %d)" + #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " @@ -747,6 +773,18 @@ msgstr "" "%(frate).2f, всего байт/с: %(brate).2f, время контроля: %(audit).2f, " "скорость: %(audit_rate).2f" +#, python-format +msgid "" +"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " +"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " +"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " +"%(audit_rate).2f" +msgstr "" +"Проверка объекта (%(type)s). После %(start_time)s: локально: успешно - " +"%(passes)d, в карантине - %(quars)d, файлов с ошибками %(errors)d в секунду: " +"%(frate).2f , байт/с: %(brate).2f, общее время: %(total).2f, время контроля: " +"%(audit).2f, скорость: %(audit_rate).2f" + #, python-format msgid "Object audit stats: %s" msgstr "Состояние контроля объекта: %s" @@ -755,6 +793,10 @@ msgstr "Состояние контроля объекта: %s" msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "Реконструкция объекта выполнена (однократно). (%.02f мин.)" +#, python-format +msgid "Object reconstruction complete. (%.02f minutes)" +msgstr "Реконструкция объекта выполнена. (%.02f мин.)" + #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "Репликация объекта выполнена (однократно). (%.02f мин.)" @@ -818,9 +860,9 @@ msgstr "Неполадка при очистке %s" msgid "Problem cleaning up %s (%s)" msgstr "Возникла проблема при очистке %s (%s)" -#, fuzzy, python-format +#, python-format msgid "Problem writing durable state file %s (%s)" -msgstr "Возникла проблема при записи файла состояния %s (%s)" +msgstr "Возникла неполадка при записи файла сохраняемого состояния %s (%s)" #, python-format msgid "Profiling Error: %s" @@ -995,6 +1037,10 @@ msgstr "Попытка выполнения метода %(method)s %(path)s" msgid "Trying to GET %(full_path)s" msgstr "Попытка GET-запроса %(full_path)s" +#, python-format +msgid "Trying to get %s status of PUT to %s" +msgstr "Попытка получения состояния %s операции PUT в %s" + #, python-format msgid "Trying to get final status of PUT to %s" msgstr "Попытка получения конечного состояния PUT в %s" @@ -1008,6 +1054,10 @@ msgstr "Попытка чтения во время операции GET (вып msgid "Trying to send to client" msgstr "Попытка отправки клиенту" +#, python-format +msgid "Trying to sync suffixes with %s" +msgstr "Попытка синхронизации суффиксов с %s" + #, python-format msgid "Trying to write to %s" msgstr "Попытка записи в %s" @@ -1031,6 +1081,10 @@ msgstr "Не удалось найти %s в libc. Оставлено как no msgid "Unable to locate config for %s" msgstr "Не удалось найти конфигурационный файл для %s" +#, python-format +msgid "Unable to locate config number %s for %s" +msgstr "Не удается найти конфигурации с номером %s для %s" + msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" From 3ac7cab32bcd08e032ad587a5982bdc997fc20c1 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Wed, 9 Mar 2016 10:17:12 +0000 Subject: [PATCH 030/141] Remove reference to py26 in tox testing instructions Also change some rst formatting and clarify that tox is used to run more than just *unit* tests. Change-Id: I263af7b4f7a554da3a391cf5104af573c89e3825 --- doc/source/development_guidelines.rst | 63 ++++++++++++++------------- 1 file changed, 32 insertions(+), 31 deletions(-) diff --git a/doc/source/development_guidelines.rst b/doc/source/development_guidelines.rst index 2f3d70f78e..fd3607015f 100644 --- a/doc/source/development_guidelines.rst +++ b/doc/source/development_guidelines.rst @@ -9,63 +9,64 @@ Coding Guidelines For the most part we try to follow PEP 8 guidelines which can be viewed here: http://www.python.org/dev/peps/pep-0008/ -There is a useful pep8 command line tool for checking files for pep8 -compliance which can be installed with ``easy_install pep8``. - ------------------ Testing Guidelines ------------------ -Swift has a comprehensive suite of tests that are run on all submitted code, -and it is recommended that developers execute the tests themselves to -catch regressions early. Developers are also expected to keep the -test suite up-to-date with any submitted code changes. +Swift has a comprehensive suite of tests and pep8 checks that are run on all +submitted code, and it is recommended that developers execute the tests +themselves to catch regressions early. Developers are also expected to keep +the test suite up-to-date with any submitted code changes. -Swift's suite of unit tests can be executed in an isolated environment +Swift's tests and pep8 checks can be executed in an isolated environment with Tox: http://tox.testrun.org/ -To execute the unit tests: +To execute the tests: -* Install Tox: +* Install Tox:: - - `pip install tox` + pip install tox -* If you do not have python 2.6 installed (as in 12.04): - - Add `export TOXENV=py27,pep8` to your `~/.bashrc` +* Run Tox from the root of the swift repo:: - - `. ~/.bashrc` - -* Run Tox from the root of the swift repo: - - - `tox` + tox Remarks: - If you installed using: `cd ~/swift; sudo python setup.py develop`, - you may need to do: `cd ~/swift; sudo chown -R ${USER}:${USER} swift.egg-info` - prior to running tox. + If you installed using ``cd ~/swift; sudo python setup.py develop``, you may + need to do ``cd ~/swift; sudo chown -R ${USER}:${USER} swift.egg-info`` prior + to running tox. -* Optionally, run only specific tox builds: +* By default ``tox`` will run all of the unit test and pep8 checks listed in + the ``tox.ini`` file ``envlist`` option. A subset of the test environments + can be specified on the tox command line or by setting the ``TOXENV`` + environment variable. For example, to run only the pep8 checks and python2.7 + unit tests use:: - - `tox -e pep8,py27` + tox -e pep8,py27 + + or:: + + TOXENV=py27,pep8 tox .. note:: As of tox version 2.0.0, most environment variables are not automatically - passed to the test environment. Swift's `tox.ini` overrides this default + passed to the test environment. Swift's ``tox.ini`` overrides this default behavior so that variable names matching ``SWIFT_*`` and ``*_proxy`` will be - passed, but you may need to run `tox --recreate` for this to take effect + passed, but you may need to run ``tox --recreate`` for this to take effect after upgrading from tox<2.0.0. Conversely, if you do not want those environment variables to be passed to - the test environment then you will need to unset them before calling tox. + the test environment then you will need to unset them before calling ``tox``. - Also, if you ever encounter DistributionNotFound, try to use `tox --recreate` - or remove the `.tox` directory to force tox to recreate the dependency list. + Also, if you ever encounter DistributionNotFound, try to use ``tox + --recreate`` or remove the ``.tox`` directory to force tox to recreate the + dependency list. -The functional tests may be executed against a :doc:`development_saio` or -other running Swift cluster using the command: +Swift's functional tests may be executed against a :doc:`development_saio` or +other running Swift cluster using the command:: -- `tox -e func` + tox -e func The endpoint and authorization credentials to be used by functional tests should be configured in the ``test.conf`` file as described in the section From 87fe86bcf8f061d91a8587e90613eee4e83c41d2 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Sat, 12 Mar 2016 06:12:49 +0000 Subject: [PATCH 031/141] Imported Translations from Zanata For more information about this automatic import see: https://wiki.openstack.org/wiki/Translations/Infrastructure Change-Id: Iee254bef3513111d339cafb4cb752eb4ad4c3b03 --- swift/locale/de/LC_MESSAGES/swift.po | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/swift/locale/de/LC_MESSAGES/swift.po b/swift/locale/de/LC_MESSAGES/swift.po index 2fe5a5d2e0..eb8b836422 100644 --- a/swift/locale/de/LC_MESSAGES/swift.po +++ b/swift/locale/de/LC_MESSAGES/swift.po @@ -11,13 +11,13 @@ # Monika Wolf , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev187\n" +"Project-Id-Version: swift 2.6.1.dev204\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-09 04:27+0000\n" +"POT-Creation-Date: 2016-03-11 13:27+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-08 09:51+0000\n" +"PO-Revision-Date: 2016-03-11 04:57+0000\n" "Last-Translator: Monika Wolf \n" "Language: de\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" @@ -1013,6 +1013,10 @@ msgstr "Zeitüberschreitungsausnahme bei %(ip)s:%(port)s/%(device)s" msgid "Trying to %(method)s %(path)s" msgstr "Versuch, %(method)s %(path)s" +#, python-format +msgid "Trying to GET %(full_path)s" +msgstr "Versuch, %(full_path)s mit GET abzurufen" + #, python-format msgid "Trying to get %s status of PUT to %s" msgstr "Es wird versucht, %s-Status von PUT für %s abzurufen." From 1cef9f0e3375a9113985fe60b80650395ffc96ad Mon Sep 17 00:00:00 2001 From: zhangguoqing Date: Sun, 13 Mar 2016 10:51:18 +0000 Subject: [PATCH 032/141] Fix py34 error of indexing 'dict_keys' object In python3, keys() of a dict isn't a list and cannot be indexed. Change-Id: I8a5b8e66a69f7096b2e5388bdeb12bb800879158 Closes-Bug: #1550184 --- test/unit/common/middleware/test_recon.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/unit/common/middleware/test_recon.py b/test/unit/common/middleware/test_recon.py index 8bd5bdaa65..f272c84928 100644 --- a/test/unit/common/middleware/test_recon.py +++ b/test/unit/common/middleware/test_recon.py @@ -1340,7 +1340,7 @@ class TestReconMiddleware(unittest.TestCase): os.listdir = fail_os_listdir resp = self.real_app_get_device_info() os.listdir = self.real_listdir - device_path = resp.keys()[0] + device_path = list(resp)[0] self.assertIsNone(resp[device_path]) def test_get_swift_conf_md5(self): From a537684c77a2e91c0cc6e4c4953a7e993f6a7005 Mon Sep 17 00:00:00 2001 From: Brian Cline Date: Mon, 14 Mar 2016 00:17:47 -0500 Subject: [PATCH 033/141] Don't report recon mount/usage status on files Today recon will include normal files in the payload it returns for /recon/unmounted and /recon/diskusage. As a result it can trigger bogus alarms on any operations-side monitoring checking for unmounted disks or disks that show up in diskusage with weird looking stats. This change adds an isdir check for the entries it finds in /srv/node. Change-Id: Iad72e03fdda11ff600b81b4c5d58020cc4b9048e Closes-bug: #1556747 --- swift/common/middleware/recon.py | 6 ++ test/unit/common/middleware/test_recon.py | 68 ++++++++++++++++++++--- 2 files changed, 66 insertions(+), 8 deletions(-) diff --git a/swift/common/middleware/recon.py b/swift/common/middleware/recon.py index b73823888a..0e2dfb4b41 100644 --- a/swift/common/middleware/recon.py +++ b/swift/common/middleware/recon.py @@ -206,6 +206,9 @@ class ReconMiddleware(object): """list unmounted (failed?) devices""" mountlist = [] for entry in os.listdir(self.devices): + if not os.path.isdir(os.path.join(self.devices, entry)): + continue + try: mounted = check_mount(self.devices, entry) except OSError as err: @@ -219,6 +222,9 @@ class ReconMiddleware(object): """get disk utilization statistics""" devices = [] for entry in os.listdir(self.devices): + if not os.path.isdir(os.path.join(self.devices, entry)): + continue + try: mounted = check_mount(self.devices, entry) except OSError as err: diff --git a/test/unit/common/middleware/test_recon.py b/test/unit/common/middleware/test_recon.py index 8bd5bdaa65..e9c18ab2ba 100644 --- a/test/unit/common/middleware/test_recon.py +++ b/test/unit/common/middleware/test_recon.py @@ -98,18 +98,25 @@ class OpenAndReadTester(object): class MockOS(object): - def __init__(self, ls_out=None, im_out=False, statvfs_out=None): + def __init__(self, ls_out=None, isdir_out=None, ismount_out=False, + statvfs_out=None): self.ls_output = ls_out - self.ismount_output = im_out + self.isdir_output = isdir_out + self.ismount_output = ismount_out self.statvfs_output = statvfs_out self.listdir_calls = [] - self.statvfs_calls = [] + self.isdir_calls = [] self.ismount_calls = [] + self.statvfs_calls = [] def fake_listdir(self, *args, **kwargs): self.listdir_calls.append((args, kwargs)) return self.ls_output + def fake_isdir(self, *args, **kwargs): + self.isdir_calls.append((args, kwargs)) + return self.isdir_output + def fake_ismount(self, *args, **kwargs): self.ismount_calls.append((args, kwargs)) if isinstance(self.ismount_output, Exception): @@ -164,7 +171,7 @@ class FakeRecon(object): def fake_unmounted(self): return {'unmountedtest': "1"} - def fake_no_unmounted(self): + def fake_unmounted_empty(self): return [] def fake_diskusage(self): @@ -214,9 +221,11 @@ class TestReconSuccess(TestCase): self.mockos = MockOS() self.fakecache = FakeFromCache() self.real_listdir = os.listdir + self.real_isdir = os.path.isdir self.real_ismount = utils.ismount self.real_statvfs = os.statvfs os.listdir = self.mockos.fake_listdir + os.path.isdir = self.mockos.fake_isdir utils.ismount = self.mockos.fake_ismount os.statvfs = self.mockos.fake_statvfs self.real_from_cache = self.app._from_recon_cache @@ -241,6 +250,7 @@ class TestReconSuccess(TestCase): def tearDown(self): os.listdir = self.real_listdir + os.path.isdir = self.real_isdir utils.ismount = self.real_ismount os.statvfs = self.real_statvfs del self.mockos @@ -931,39 +941,63 @@ class TestReconSuccess(TestCase): unmounted_resp = [{'device': 'fakeone', 'mounted': False}, {'device': 'faketwo', 'mounted': False}] self.mockos.ls_output = ['fakeone', 'faketwo'] + self.mockos.isdir_output = True self.mockos.ismount_output = False rv = self.app.get_unmounted() self.assertEqual(self.mockos.listdir_calls, [(('/srv/node',), {})]) + self.assertEqual(self.mockos.isdir_calls, + [(('/srv/node/fakeone',), {}), + (('/srv/node/faketwo',), {})]) self.assertEqual(rv, unmounted_resp) - def test_get_unmounted_everything_normal(self): + def test_get_unmounted_excludes_files(self): + unmounted_resp = [] + self.mockos.ls_output = ['somerando.log'] + self.mockos.isdir_output = False + self.mockos.ismount_output = False + rv = self.app.get_unmounted() + self.assertEqual(self.mockos.listdir_calls, [(('/srv/node',), {})]) + self.assertEqual(self.mockos.isdir_calls, + [(('/srv/node/somerando.log',), {})]) + self.assertEqual(rv, unmounted_resp) + + def test_get_unmounted_all_mounted(self): unmounted_resp = [] self.mockos.ls_output = ['fakeone', 'faketwo'] + self.mockos.isdir_output = True self.mockos.ismount_output = True rv = self.app.get_unmounted() self.assertEqual(self.mockos.listdir_calls, [(('/srv/node',), {})]) + self.assertEqual(self.mockos.isdir_calls, + [(('/srv/node/fakeone',), {}), + (('/srv/node/faketwo',), {})]) self.assertEqual(rv, unmounted_resp) def test_get_unmounted_checkmount_fail(self): unmounted_resp = [{'device': 'fakeone', 'mounted': 'brokendrive'}] self.mockos.ls_output = ['fakeone'] + self.mockos.isdir_output = True self.mockos.ismount_output = OSError('brokendrive') rv = self.app.get_unmounted() self.assertEqual(self.mockos.listdir_calls, [(('/srv/node',), {})]) + self.assertEqual(self.mockos.isdir_calls, + [(('/srv/node/fakeone',), {})]) self.assertEqual(self.mockos.ismount_calls, [(('/srv/node/fakeone',), {})]) self.assertEqual(rv, unmounted_resp) - def test_no_get_unmounted(self): + def test_get_unmounted_no_mounts(self): def fake_checkmount_true(*args): return True unmounted_resp = [] self.mockos.ls_output = [] + self.mockos.isdir_output = False self.mockos.ismount_output = False rv = self.app.get_unmounted() self.assertEqual(self.mockos.listdir_calls, [(('/srv/node',), {})]) + self.assertEqual(self.mockos.isdir_calls, []) self.assertEqual(rv, unmounted_resp) def test_get_diskusage(self): @@ -977,20 +1011,37 @@ class TestReconSuccess(TestCase): du_resp = [{'device': 'canhazdrive1', 'avail': 4150685696, 'mounted': True, 'used': 3890520064, 'size': 8041205760}] self.mockos.ls_output = ['canhazdrive1'] + self.mockos.isdir_output = True self.mockos.statvfs_output = statvfs_content self.mockos.ismount_output = True rv = self.app.get_diskusage() + self.assertEqual(self.mockos.listdir_calls, [(('/srv/node',), {})]) + self.assertEqual(self.mockos.isdir_calls, + [(('/srv/node/canhazdrive1',), {})]) self.assertEqual(self.mockos.statvfs_calls, [(('/srv/node/canhazdrive1',), {})]) self.assertEqual(rv, du_resp) + def test_get_diskusage_excludes_files(self): + du_resp = [] + self.mockos.ls_output = ['somerando.log'] + self.mockos.isdir_output = False + rv = self.app.get_diskusage() + self.assertEqual(self.mockos.isdir_calls, + [(('/srv/node/somerando.log',), {})]) + self.assertEqual(self.mockos.statvfs_calls, []) + self.assertEqual(rv, du_resp) + def test_get_diskusage_checkmount_fail(self): du_resp = [{'device': 'canhazdrive1', 'avail': '', 'mounted': 'brokendrive', 'used': '', 'size': ''}] self.mockos.ls_output = ['canhazdrive1'] + self.mockos.isdir_output = True self.mockos.ismount_output = OSError('brokendrive') rv = self.app.get_diskusage() self.assertEqual(self.mockos.listdir_calls, [(('/srv/node',), {})]) + self.assertEqual(self.mockos.isdir_calls, + [(('/srv/node/canhazdrive1',), {})]) self.assertEqual(self.mockos.ismount_calls, [(('/srv/node/canhazdrive1',), {})]) self.assertEqual(rv, du_resp) @@ -1000,6 +1051,7 @@ class TestReconSuccess(TestCase): du_resp = [{'device': 'canhazdrive1', 'avail': '', 'mounted': 'Input/Output Error', 'used': '', 'size': ''}] self.mockos.ls_output = ['canhazdrive1'] + self.mockos.isdir_output = True rv = self.app.get_diskusage() self.assertEqual(rv, du_resp) @@ -1256,9 +1308,9 @@ class TestReconMiddleware(unittest.TestCase): resp = self.app(req.environ, start_response) self.assertEqual(resp, get_unmounted_resp) - def test_recon_no_get_unmounted(self): + def test_recon_get_unmounted_empty(self): get_unmounted_resp = '[]' - self.app.get_unmounted = self.frecon.fake_no_unmounted + self.app.get_unmounted = self.frecon.fake_unmounted_empty req = Request.blank('/recon/unmounted', environ={'REQUEST_METHOD': 'GET'}) resp = ''.join(self.app(req.environ, start_response)) From 9e90d309621584be173b9be7ac91c469a013ab96 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Mon, 14 Mar 2016 06:13:37 +0000 Subject: [PATCH 034/141] Imported Translations from Zanata For more information about this automatic import see: https://wiki.openstack.org/wiki/Translations/Infrastructure Change-Id: I30d06a1d96d4bd0e2e4972cb8ede7ebc08beae2d --- swift/locale/zh_CN/LC_MESSAGES/swift.po | 154 +++++++++++++++++++++++- 1 file changed, 150 insertions(+), 4 deletions(-) diff --git a/swift/locale/zh_CN/LC_MESSAGES/swift.po b/swift/locale/zh_CN/LC_MESSAGES/swift.po index a779aa406e..7684fa92e2 100644 --- a/swift/locale/zh_CN/LC_MESSAGES/swift.po +++ b/swift/locale/zh_CN/LC_MESSAGES/swift.po @@ -6,16 +6,18 @@ # Pearl Yajing Tan(Seagate Tech) , 2014 # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata +# Andreas Jaeger , 2016. #zanata +# Linda , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev176\n" +"Project-Id-Version: swift 2.6.1.dev205\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-08 04:09+0000\n" +"POT-Creation-Date: 2016-03-12 17:33+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2015-08-11 11:22+0000\n" -"Last-Translator: openstackjenkins \n" +"PO-Revision-Date: 2016-03-13 07:28+0000\n" +"Last-Translator: Andreas Jaeger \n" "Language: zh-CN\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" @@ -50,6 +52,16 @@ msgstr "%(ip)s/%(device)s的回应为未挂载" msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" +#, python-format +msgid "" +"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of %(device)d/" +"%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in %(time).2fs " +"(%(rate).2f/sec, %(remaining)s remaining)" +msgstr "" +"%(device)d/%(dtotal)d (%(dpercentage).2f%%) 设备的 %(reconstructed)d/" +"%(total)d (%(percentage).2f%%) 分区已于 %(time).2fs 重构(%(rate).2f/秒,剩" +"余 %(remaining)s)" + #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " @@ -86,6 +98,10 @@ msgstr "%s不存在" msgid "%s is not mounted" msgstr "%s未挂载" +#, python-format +msgid "%s responded as unmounted" +msgstr "%s 响应为未安装" + #, python-format msgid "%s running (%s - %s)" msgstr "%s运行(%s - %s)" @@ -216,11 +232,21 @@ msgstr "客户读取时中断" msgid "Client disconnected without sending enough data" msgstr "客户中断 尚未发送足够" +msgid "Client disconnected without sending last chunk" +msgstr "客户机已断开连接而未发送最后一个数据块" + #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "客户路径%(client)s与对象元数据中存储的路径%(meta)s不符" +msgid "" +"Configuration option internal_client_conf_path not defined. Using default " +"configuration, See internal-client.conf-sample for options" +msgstr "" +"未定义配置选项 internal_client_conf_path。正在使用缺省配置。请参阅 internal-" +"client.conf-sample 以了解各个选项" + msgid "Connection refused" msgstr "连接被拒绝" @@ -278,6 +304,10 @@ msgstr "数据下载错误:%s" msgid "Devices pass completed: %.02fs" msgstr "设备通过完成: %.02fs" +#, python-format +msgid "Directory %r does not map to a valid policy (%s)" +msgstr "目录 %r 未映射至有效策略 (%s)" + #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "错误 %(db_file)s: %(validate_sync_to_err)s" @@ -349,6 +379,10 @@ msgstr "磁盘文件错误%(data_file)s关闭失败: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "错误 异常导致客户端中断连接" +#, python-format +msgid "ERROR Exception transferring data to object servers %s" +msgstr "错误:向对象服务器 %s 传输数据时发生异常" + msgid "ERROR Failed to get my own IPs?" msgstr "错误 无法获得我方IPs?" @@ -516,6 +550,10 @@ msgstr "执行同步分区时发生错误" msgid "Error syncing with node: %s" msgstr "执行同步时节点%s发生错误" +#, python-format +msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" +msgstr "尝试重建 %(path)s 策略时出错:#%(policy)d frag#%(frag_index)s" + msgid "Error: An error occurred" msgstr "错误:一个错误发生了" @@ -535,6 +573,9 @@ msgstr "异常出现在top-level账号reaper环" msgid "Exception in top-level replication loop" msgstr "top-level复制圈出现异常" +msgid "Exception in top-levelreconstruction loop" +msgstr " top-levelreconstruction 环中发生异常" + #, python-format msgid "Exception while deleting container %s %s" msgstr "执行删除容器时出现异常 %s %s" @@ -571,6 +612,11 @@ msgstr "跟随CNAME链从%(given_domain)s到%(found_domain)s" msgid "Found configs:" msgstr "找到配置" +msgid "" +"Handoffs first mode still has handoffs remaining. Aborting current " +"replication pass." +msgstr "Handoffs 优先方式仍有 handoffs。正在中止当前复制过程。" + msgid "Host unreachable" msgstr "无法连接到主机" @@ -590,6 +636,10 @@ msgstr "X-Container-Sync-To中无效主机%r" msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "不可用的等待输入%(file)s: %(entry)s" +#, python-format +msgid "Invalid response %(resp)s from %(full_path)s" +msgstr "从 %(full_path)s 返回了无效响应 %(resp)s" + #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "无效的回应%(resp)s来自%(ip)s" @@ -624,20 +674,36 @@ msgstr "%r %r的集群节点不存在" msgid "No permission to signal PID %d" msgstr "无权限发送信号PID%d" +#, python-format +msgid "No policy with index %s" +msgstr "没有具备索引 %s 的策略" + #, python-format msgid "No realm key for %r" msgstr "%r权限key不存在" +#, python-format +msgid "No space left on device for %s (%s)" +msgstr "设备上没有可容纳 %s (%s) 的空间" + #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "节点错误极限 %(ip)s:%(port)s (%(device)s)" +#, python-format +msgid "Not enough object servers ack'ed (got %d)" +msgstr "没有足够的对象服务器应答(收到 %d)" + #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "未找到: %(sync_from)r => %(sync_to)r - object %(obj_name)r" +#, python-format +msgid "Nothing reconstructed for %s seconds." +msgstr "过去 %s 秒未重构任何对象。" + #, python-format msgid "Nothing replicated for %s seconds." msgstr "%s秒无复制" @@ -669,10 +735,30 @@ msgstr "" "%(quars)d, 错误总数: %(errors)d, 文件/秒总和:%(frate).2f, bytes/sec总和: " "%(brate).2f, 审计时间: %(audit).2f, 速率: %(audit_rate).2f" +#, python-format +msgid "" +"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " +"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " +"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " +"%(audit_rate).2f" +msgstr "" +"对象审计 (%(type)s). 自 %(start_time)s 开始: 本地:%(passes)d 通" +"过,%(quars)d 隔离,%(errors)d 错误,文件/秒:%(frate).2f,字节/秒:" +"%(brate).2f,总时间:%(total).2f,审计时间:%(audit).2f,速率:" +"%(audit_rate).2f" + #, python-format msgid "Object audit stats: %s" msgstr "对象审计统计:%s" +#, python-format +msgid "Object reconstruction complete (once). (%.02f minutes)" +msgstr "对象重构完成(一次)。(%.02f 分钟)" + +#, python-format +msgid "Object reconstruction complete. (%.02f minutes)" +msgstr "对象重构完成。(%.02f 分钟)" + #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "对象复制完成(一次)。(%.02f minutes)" @@ -729,6 +815,14 @@ msgstr "在X-Container-Sync-To中路径是必须的" msgid "Problem cleaning up %s" msgstr "问题清除%s" +#, python-format +msgid "Problem cleaning up %s (%s)" +msgstr "清除 %s (%s) 时发生了问题" + +#, python-format +msgid "Problem writing durable state file %s (%s)" +msgstr "编写可持续状态文件 %s (%s) 时发生了问题" + #, python-format msgid "Profiling Error: %s" msgstr "分析代码时出现错误:%s" @@ -766,6 +860,10 @@ msgstr "正在移除 %s 个对象" msgid "Removing partition: %s" msgstr "移除分区:%s" +#, python-format +msgid "Removing pid file %s with invalid pid" +msgstr "正在移除带有无效 pid 的 pid 文件 %s" + #, python-format msgid "Removing stale pid file %s" msgstr "移除原有pid文件%s" @@ -785,6 +883,9 @@ msgstr "" "返还498从%(meth)s到%(acc)s/%(cont)s/%(obj)s,流量控制(Max \"\n" "\"Sleep) %(e)s" +msgid "Ring change detected. Aborting current reconstruction pass." +msgstr "检测到环更改。正在中止当前重构过程。" + msgid "Ring change detected. Aborting current replication pass." msgstr "Ring改变被检测到。退出现有的复制通过" @@ -792,6 +893,9 @@ msgstr "Ring改变被检测到。退出现有的复制通过" msgid "Running %s once" msgstr "运行%s一次" +msgid "Running object reconstructor in script mode." +msgstr "正以脚本方式运行对象重构程序。" + msgid "Running object replicator in script mode." msgstr "在加密模式下执行对象复制" @@ -831,6 +935,12 @@ msgstr "挂载失败 跳过%s" msgid "Starting %s" msgstr "启动%s" +msgid "Starting object reconstruction pass." +msgstr "正在启动对象重构过程。" + +msgid "Starting object reconstructor in daemon mode." +msgstr "正以守护程序方式启动对象重构程序。" + msgid "Starting object replication pass." msgstr "开始通过对象复制" @@ -854,10 +964,22 @@ msgstr "容器(%(total)s)内%(key)s总数不符合协议%(key)s总数(%(sum)s)" msgid "Timeout %(action)s to memcached: %(server)s" msgstr "%(action)s超时 高性能内存对象缓存: %(server)s" +#, python-format +msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" +msgstr "%(ip)s:%(port)s/%(device)s 发生超时异常" + #, python-format msgid "Trying to %(method)s %(path)s" msgstr "尝试执行%(method)s %(path)s" +#, python-format +msgid "Trying to GET %(full_path)s" +msgstr "正尝试获取 %(full_path)s" + +#, python-format +msgid "Trying to get %s status of PUT to %s" +msgstr "正尝试将 PUT 的 %s 状态发送至 %s" + #, python-format msgid "Trying to get final status of PUT to %s" msgstr "尝试执行获取最后的PUT状态%s" @@ -871,6 +993,10 @@ msgstr "执行GET时尝试读取(重新尝试)" msgid "Trying to send to client" msgstr "尝试发送到客户端" +#, python-format +msgid "Trying to sync suffixes with %s" +msgstr "正尝试使后缀与 %s 同步" + #, python-format msgid "Trying to write to %s" msgstr "尝试执行书写%s" @@ -882,10 +1008,22 @@ msgstr "未捕获的异常" msgid "Unable to find %s config section in %s" msgstr "无法在%s中查找到%s设置部分" +#, python-format +msgid "Unable to load internal client from config: %r (%s)" +msgstr "无法从配置装入内部客户机:%r (%s)" + #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "无法查询到%s 保留为no-op" +#, python-format +msgid "Unable to locate config for %s" +msgstr "找不到 %s 的配置" + +#, python-format +msgid "Unable to locate config number %s for %s" +msgstr "找不到 %s 的配置编号 %s" + msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "无法查询到fallocate, posix_fallocate。保存为no-op" @@ -909,6 +1047,10 @@ msgstr "意外响应:%s" msgid "Unhandled exception" msgstr "未处理的异常" +#, python-format +msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" +msgstr "尝试获取 %(account)r %(container)r %(object)r 时发生未知异常" + #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "%(container)s %(dbfile)s更新报告失败" @@ -935,6 +1077,10 @@ msgstr "警告:无法修改内存极限,是否按非root运行?" msgid "Waited %s seconds for %s to die; giving up" msgstr "等待%s秒直到%s停止;放弃" +#, python-format +msgid "Waited %s seconds for %s to die; killing" +msgstr "已消耗 %s 秒等待 %s 终止;正在终止" + msgid "Warning: Cannot ratelimit without a memcached client" msgstr "警告:缺失缓存客户端 无法控制流量 " From f87a5487b5224f77261d82a1087d31820c29e8f8 Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Mon, 14 Mar 2016 16:52:50 -0700 Subject: [PATCH 035/141] Make rsync ignore it's own temporary files In situations where rsync may inadvertently be unable to cleanup it's temporary files we shouldn't spread them around the cluster. By asking our rsync subexec to --exclude patterns that match it's own convention for temporary naming we'll only ever transfer real replicated artifacts and never temporary artifacts which should always be ignored until they are fully transfered. Cleanup of stale rsync droppings should be performed by the auditor and will be addressed in a separate change related to lp bug #1554005. Closes-Bug: #1553995 Change-Id: Ibe598b339af024d05e4d89c34d696e972d8189ff --- swift/obj/replicator.py | 1 + test/probe/test_object_handoff.py | 31 ++++++++++++++++++++++ test/unit/obj/test_replicator.py | 43 +++++++++++++++++++++++++++++++ 3 files changed, 75 insertions(+) diff --git a/swift/obj/replicator.py b/swift/obj/replicator.py index aa38407d35..2d6a38bd3d 100644 --- a/swift/obj/replicator.py +++ b/swift/obj/replicator.py @@ -227,6 +227,7 @@ class ObjectReplicator(Daemon): '--timeout=%s' % self.rsync_io_timeout, '--contimeout=%s' % self.rsync_io_timeout, '--bwlimit=%s' % self.rsync_bwlimit, + '--exclude=.*.%s' % ''.join('[0-9a-zA-Z]' for i in range(6)) ] if self.rsync_compress and \ job['region'] != node['region']: diff --git a/test/probe/test_object_handoff.py b/test/probe/test_object_handoff.py index f3b02c53cd..a360021b7c 100755 --- a/test/probe/test_object_handoff.py +++ b/test/probe/test_object_handoff.py @@ -19,6 +19,7 @@ from uuid import uuid4 import random from hashlib import md5 from collections import defaultdict +import os from swiftclient import client @@ -82,6 +83,22 @@ class TestObjectHandoff(ReplProbeTest): raise Exception('Direct object GET did not return VERIFY, instead ' 'it returned: %s' % repr(odata)) + # drop a tempfile in the handoff's datadir, like it might have + # had if there was an rsync failure while it was previously a + # primary + handoff_device_path = self.device_dir('object', another_onode) + data_filename = None + for root, dirs, files in os.walk(handoff_device_path): + for filename in files: + if filename.endswith('.data'): + data_filename = filename + temp_filename = '.%s.6MbL6r' % data_filename + temp_filepath = os.path.join(root, temp_filename) + if not data_filename: + self.fail('Did not find any data files on %r' % + handoff_device_path) + open(temp_filepath, 'w') + # Assert container listing (via proxy and directly) has container/obj objs = [o['name'] for o in client.get_container(self.url, self.token, container)[1]] @@ -134,6 +151,20 @@ class TestObjectHandoff(ReplProbeTest): raise Exception('Direct object GET did not return VERIFY, instead ' 'it returned: %s' % repr(odata)) + # and that it does *not* have a temporary rsync dropping! + found_data_filename = False + primary_device_path = self.device_dir('object', onode) + for root, dirs, files in os.walk(primary_device_path): + for filename in files: + if filename.endswith('.6MbL6r'): + self.fail('Found unexpected file %s' % + os.path.join(root, filename)) + if filename == data_filename: + found_data_filename = True + self.assertTrue(found_data_filename, + 'Did not find data file %r on %r' % ( + data_filename, primary_device_path)) + # Assert the handoff server no longer has container/obj try: direct_client.direct_get_object( diff --git a/test/unit/obj/test_replicator.py b/test/unit/obj/test_replicator.py index a32fa99806..a3220887ed 100644 --- a/test/unit/obj/test_replicator.py +++ b/test/unit/obj/test_replicator.py @@ -910,6 +910,49 @@ class TestObjectReplicator(unittest.TestCase): jobs = self.replicator.collect_jobs() self.assertEqual(len(jobs), 0) + def test_replicator_skips_rsync_temp_files(self): + # the empty pre-setup dirs aren't that useful to us + device_path = os.path.join(self.devices, 'sda') + rmtree(device_path, ignore_errors=1) + os.mkdir(device_path) + # create a real data file to trigger rsync + df = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o', + policy=POLICIES.legacy) + ts = next(self.ts) + with df.create() as w: + w.write('asdf') + w.put({'X-Timestamp': ts.internal}) + w.commit(ts) + # pre-flight and post sync request for both other primaries + expected_replicate_requests = 4 + process_arg_checker = [ + # (return_code, stdout, ) + (0, '', []), + (0, '', []), + ] + stub_body = pickle.dumps({}) + with _mock_process(process_arg_checker) as rsync_log, \ + mock.patch('swift.obj.replicator.whataremyips', + side_effect=_ips), \ + mocked_http_conn(*[200] * expected_replicate_requests, + body=stub_body) as conn_log: + self.replicator.replicate() + self.assertEqual(['REPLICATE'] * expected_replicate_requests, + [r['method'] for r in conn_log.requests]) + # expect one rsync to each other primary node + self.assertEqual(2, len(rsync_log)) + expected = '--exclude=.*.[0-9a-zA-Z][0-9a-zA-Z][0-9a-zA-Z]' \ + '[0-9a-zA-Z][0-9a-zA-Z][0-9a-zA-Z]' + for subprocess_info in rsync_log: + rsync_args = subprocess_info['rsync_args'] + for arg in rsync_args: + if arg.startswith('--exclude'): + self.assertEqual(arg, expected) + break + else: + self.fail('Did not find --exclude argument in %r' % + rsync_args) + def test_replicator_removes_zbf(self): # After running xfs_repair, a partition directory could become a # zero-byte file. If this happens, the replicator should clean it From f595a7e70492c2751d8c6f5ab60b5512e63281cf Mon Sep 17 00:00:00 2001 From: Matthew Oliver Date: Fri, 29 Aug 2014 17:14:46 +1000 Subject: [PATCH 036/141] Add concurrent reads option to proxy This change adds 2 new parameters to enable and control concurrent GETs in swift, these are 'concurrent_gets' and 'concurrency_timeout'. 'concurrent_gets' allows you to turn on or off concurrent GETs, when on it will set the GET/HEAD concurrency to replica count. And in the case of EC HEADs it will set it to ndata. The proxy will then serve only the first valid source to respond. This applies to all account, container and object GETs except for EC. For EC only HEAD requests are effected. It achieves this by changing the request sending mechanism to using GreenAsyncPile and green threads with a time out between each request. 'concurrency_timeout' is related to concurrent_gets. And is the amount of time to wait before firing the next thread. A value of 0 will fire at the same time (fully concurrent), setting another value will stagger the firing allowing you the ability to give a node a shorter chance to respond before firing the next. This value is a float and should be somewhere between 0 and node_timeout. The default is conn_timeout. Meaning by default it will stagger the firing. DocImpact Implements: blueprint concurrent-reads Change-Id: I789d39472ec48b22415ff9d9821b1eefab7da867 --- doc/source/deployment_guide.rst | 30 ++++ etc/proxy-server.conf-sample | 17 ++- swift/common/utils.py | 19 +++ swift/proxy/controllers/account.py | 4 +- swift/proxy/controllers/base.py | 167 +++++++++++++---------- swift/proxy/controllers/container.py | 4 +- swift/proxy/controllers/obj.py | 13 +- swift/proxy/server.py | 4 + test/unit/common/test_utils.py | 31 +++++ test/unit/proxy/controllers/test_base.py | 48 ++++++- test/unit/proxy/controllers/test_obj.py | 8 +- test/unit/proxy/test_server.py | 82 +++++++++++ 12 files changed, 348 insertions(+), 79 deletions(-) diff --git a/doc/source/deployment_guide.rst b/doc/source/deployment_guide.rst index 9ed83e4a30..c879c07db4 100644 --- a/doc/source/deployment_guide.rst +++ b/doc/source/deployment_guide.rst @@ -1367,6 +1367,36 @@ swift_owner_headers up to the auth system in use, but usually indicates administrative responsibilities. +sorting_method shuffle Storage nodes can be chosen at + random (shuffle), by using timing + measurements (timing), or by using + an explicit match (affinity). + Using timing measurements may allow + for lower overall latency, while + using affinity allows for finer + control. In both the timing and + affinity cases, equally-sorting nodes + are still randomly chosen to spread + load. +timing_expiry 300 If the "timing" sorting_method is + used, the timings will only be valid + for the number of seconds configured + by timing_expiry. +concurrent_gets off Use replica count number of + threads concurrently during a + GET/HEAD and return with the + first successful response. In + the EC case, this parameter only + effects an EC HEAD as an EC GET + behaves differently. +concurrency_timeout conn_timeout This parameter controls how long + to wait before firing off the + next concurrent_get thread. A + value of 0 would we fully concurrent + any other number will stagger the + firing of the threads. This number + should be between 0 and node_timeout. + The default is conn_timeout (0.5). ============================ =============== ============================= [tempauth] diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample index a06e15a9a6..0314980e5a 100644 --- a/etc/proxy-server.conf-sample +++ b/etc/proxy-server.conf-sample @@ -164,13 +164,28 @@ use = egg:swift#proxy # using affinity allows for finer control. In both the timing and # affinity cases, equally-sorting nodes are still randomly chosen to # spread load. -# The valid values for sorting_method are "affinity", "shuffle", and "timing". +# The valid values for sorting_method are "affinity", "shuffle", or "timing". # sorting_method = shuffle # # If the "timing" sorting_method is used, the timings will only be valid for # the number of seconds configured by timing_expiry. # timing_expiry = 300 # +# By default on a GET/HEAD swift will connect to a storage node one at a time +# in a single thread. There is smarts in the order they are hit however. If you +# turn on concurrent_gets below, then replica count threads will be used. +# With addition of the concurrency_timeout option this will allow swift to send +# out GET/HEAD requests to the storage nodes concurrently and answer with the +# first to respond. With an EC policy the parameter only affects HEAD requests. +# concurrent_gets = off +# +# This parameter controls how long to wait before firing off the next +# concurrent_get thread. A value of 0 would be fully concurrent, any other +# number will stagger the firing of the threads. This number should be +# between 0 and node_timeout. The default is what ever you set for the +# conn_timeout parameter. +# concurrency_timeout = 0.5 +# # Set to the number of nodes to contact for a normal request. You can use # '* replicas' at the end to have it use the number given times the number of # replicas for the ring being used for the request. diff --git a/swift/common/utils.py b/swift/common/utils.py index 9547bf8f6a..e975bf1ad2 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -2471,6 +2471,10 @@ class GreenAsyncPile(object): finally: self._inflight -= 1 + @property + def inflight(self): + return self._inflight + def spawn(self, func, *args, **kwargs): """ Spawn a job in a green thread on the pile. @@ -2479,6 +2483,16 @@ class GreenAsyncPile(object): self._inflight += 1 self._pool.spawn(self._run_func, func, args, kwargs) + def waitfirst(self, timeout): + """ + Wait up to timeout seconds for first result to come in. + + :param timeout: seconds to wait for results + :returns: first item to come back, or None + """ + for result in self._wait(timeout, first_n=1): + return result + def waitall(self, timeout): """ Wait timeout seconds for any results to come in. @@ -2486,11 +2500,16 @@ class GreenAsyncPile(object): :param timeout: seconds to wait for results :returns: list of results accrued in that time """ + return self._wait(timeout) + + def _wait(self, timeout, first_n=None): results = [] try: with GreenAsyncPileWaitallTimeout(timeout): while True: results.append(next(self)) + if first_n and len(results) >= first_n: + break except (GreenAsyncPileWaitallTimeout, StopIteration): pass return results diff --git a/swift/proxy/controllers/account.py b/swift/proxy/controllers/account.py index 25cbc62187..faf4ccdee6 100644 --- a/swift/proxy/controllers/account.py +++ b/swift/proxy/controllers/account.py @@ -60,10 +60,12 @@ class AccountController(Controller): return resp partition = self.app.account_ring.get_part(self.account_name) + concurrency = self.app.account_ring.replica_count \ + if self.app.concurrent_gets else 1 node_iter = self.app.iter_nodes(self.app.account_ring, partition) resp = self.GETorHEAD_base( req, _('Account'), node_iter, partition, - req.swift_entity_path.rstrip('/')) + req.swift_entity_path.rstrip('/'), concurrency) if resp.status_int == HTTP_NOT_FOUND: if resp.headers.get('X-Account-Status', '').lower() == 'deleted': resp.status = HTTP_GONE diff --git a/swift/proxy/controllers/base.py b/swift/proxy/controllers/base.py index f225bba3ad..3bebd7f52b 100644 --- a/swift/proxy/controllers/base.py +++ b/swift/proxy/controllers/base.py @@ -623,7 +623,8 @@ def bytes_to_skip(record_size, range_start): class ResumingGetter(object): def __init__(self, app, req, server_type, node_iter, partition, path, - backend_headers, client_chunk_size=None, newest=None): + backend_headers, concurrency=1, client_chunk_size=None, + newest=None): self.app = app self.node_iter = node_iter self.server_type = server_type @@ -634,6 +635,7 @@ class ResumingGetter(object): self.skip_bytes = 0 self.used_nodes = [] self.used_source_etag = '' + self.concurrency = concurrency # stuff from request self.req_method = req.method @@ -649,6 +651,7 @@ class ResumingGetter(object): self.reasons = [] self.bodies = [] self.source_headers = [] + self.sources = [] # populated from response headers self.start_byte = self.end_byte = self.length = None @@ -971,88 +974,106 @@ class ResumingGetter(object): else: return None + def _make_node_request(self, node, node_timeout, logger_thread_locals): + self.app.logger.thread_locals = logger_thread_locals + if node in self.used_nodes: + return False + start_node_timing = time.time() + try: + with ConnectionTimeout(self.app.conn_timeout): + conn = http_connect( + node['ip'], node['port'], node['device'], + self.partition, self.req_method, self.path, + headers=self.backend_headers, + query_string=self.req_query_string) + self.app.set_node_timing(node, time.time() - start_node_timing) + + with Timeout(node_timeout): + possible_source = conn.getresponse() + # See NOTE: swift_conn at top of file about this. + possible_source.swift_conn = conn + except (Exception, Timeout): + self.app.exception_occurred( + node, self.server_type, + _('Trying to %(method)s %(path)s') % + {'method': self.req_method, 'path': self.req_path}) + return False + if self.is_good_source(possible_source): + # 404 if we know we don't have a synced copy + if not float(possible_source.getheader('X-PUT-Timestamp', 1)): + self.statuses.append(HTTP_NOT_FOUND) + self.reasons.append('') + self.bodies.append('') + self.source_headers.append([]) + close_swift_conn(possible_source) + else: + if self.used_source_etag: + src_headers = dict( + (k.lower(), v) for k, v in + possible_source.getheaders()) + + if self.used_source_etag != src_headers.get( + 'x-object-sysmeta-ec-etag', + src_headers.get('etag', '')).strip('"'): + self.statuses.append(HTTP_NOT_FOUND) + self.reasons.append('') + self.bodies.append('') + self.source_headers.append([]) + return False + + self.statuses.append(possible_source.status) + self.reasons.append(possible_source.reason) + self.bodies.append(None) + self.source_headers.append(possible_source.getheaders()) + self.sources.append((possible_source, node)) + if not self.newest: # one good source is enough + return True + else: + self.statuses.append(possible_source.status) + self.reasons.append(possible_source.reason) + self.bodies.append(possible_source.read()) + self.source_headers.append(possible_source.getheaders()) + if possible_source.status == HTTP_INSUFFICIENT_STORAGE: + self.app.error_limit(node, _('ERROR Insufficient Storage')) + elif is_server_error(possible_source.status): + self.app.error_occurred( + node, _('ERROR %(status)d %(body)s ' + 'From %(type)s Server') % + {'status': possible_source.status, + 'body': self.bodies[-1][:1024], + 'type': self.server_type}) + return False + def _get_source_and_node(self): self.statuses = [] self.reasons = [] self.bodies = [] self.source_headers = [] - sources = [] + self.sources = [] + + nodes = GreenthreadSafeIterator(self.node_iter) node_timeout = self.app.node_timeout if self.server_type == 'Object' and not self.newest: node_timeout = self.app.recoverable_node_timeout - for node in self.node_iter: - if node in self.used_nodes: - continue - start_node_timing = time.time() - try: - with ConnectionTimeout(self.app.conn_timeout): - conn = http_connect( - node['ip'], node['port'], node['device'], - self.partition, self.req_method, self.path, - headers=self.backend_headers, - query_string=self.req_query_string) - self.app.set_node_timing(node, time.time() - start_node_timing) - with Timeout(node_timeout): - possible_source = conn.getresponse() - # See NOTE: swift_conn at top of file about this. - possible_source.swift_conn = conn - except (Exception, Timeout): - self.app.exception_occurred( - node, self.server_type, - _('Trying to %(method)s %(path)s') % - {'method': self.req_method, 'path': self.req_path}) - continue - if self.is_good_source(possible_source): - # 404 if we know we don't have a synced copy - if not float(possible_source.getheader('X-PUT-Timestamp', 1)): - self.statuses.append(HTTP_NOT_FOUND) - self.reasons.append('') - self.bodies.append('') - self.source_headers.append([]) - close_swift_conn(possible_source) - else: - if self.used_source_etag: - src_headers = dict( - (k.lower(), v) for k, v in - possible_source.getheaders()) + pile = GreenAsyncPile(self.concurrency) - if self.used_source_etag != src_headers.get( - 'x-object-sysmeta-ec-etag', - src_headers.get('etag', '')).strip('"'): - self.statuses.append(HTTP_NOT_FOUND) - self.reasons.append('') - self.bodies.append('') - self.source_headers.append([]) - continue + for node in nodes: + pile.spawn(self._make_node_request, node, node_timeout, + self.app.logger.thread_locals) + _timeout = self.app.concurrency_timeout \ + if pile.inflight < self.concurrency else None + if pile.waitfirst(_timeout): + break + else: + # ran out of nodes, see if any stragglers will finish + any(pile) - self.statuses.append(possible_source.status) - self.reasons.append(possible_source.reason) - self.bodies.append(None) - self.source_headers.append(possible_source.getheaders()) - sources.append((possible_source, node)) - if not self.newest: # one good source is enough - break - else: - self.statuses.append(possible_source.status) - self.reasons.append(possible_source.reason) - self.bodies.append(possible_source.read()) - self.source_headers.append(possible_source.getheaders()) - if possible_source.status == HTTP_INSUFFICIENT_STORAGE: - self.app.error_limit(node, _('ERROR Insufficient Storage')) - elif is_server_error(possible_source.status): - self.app.error_occurred( - node, _('ERROR %(status)d %(body)s ' - 'From %(type)s Server') % - {'status': possible_source.status, - 'body': self.bodies[-1][:1024], - 'type': self.server_type}) - - if sources: - sources.sort(key=lambda s: source_key(s[0])) - source, node = sources.pop() - for src, _junk in sources: + if self.sources: + self.sources.sort(key=lambda s: source_key(s[0])) + source, node = self.sources.pop() + for src, _junk in self.sources: close_swift_conn(src) self.used_nodes.append(node) src_headers = dict( @@ -1613,7 +1634,7 @@ class Controller(object): self.app.logger.warning('Could not autocreate account %r' % path) def GETorHEAD_base(self, req, server_type, node_iter, partition, path, - client_chunk_size=None): + concurrency=1, client_chunk_size=None): """ Base handler for HTTP GET or HEAD requests. @@ -1622,6 +1643,7 @@ class Controller(object): :param node_iter: an iterator to obtain nodes from :param partition: partition :param path: path for the request + :param concurrency: number of requests to run concurrently :param client_chunk_size: chunk size for response body iterator :returns: swob.Response object """ @@ -1630,6 +1652,7 @@ class Controller(object): handler = GetOrHeadHandler(self.app, req, self.server_type, node_iter, partition, path, backend_headers, + concurrency, client_chunk_size=client_chunk_size) res = handler.get_working_response(req) diff --git a/swift/proxy/controllers/container.py b/swift/proxy/controllers/container.py index d5e52618c2..08a51f10d6 100644 --- a/swift/proxy/controllers/container.py +++ b/swift/proxy/controllers/container.py @@ -93,10 +93,12 @@ class ContainerController(Controller): return HTTPNotFound(request=req) part = self.app.container_ring.get_part( self.account_name, self.container_name) + concurrency = self.app.container_ring.replica_count \ + if self.app.concurrent_gets else 1 node_iter = self.app.iter_nodes(self.app.container_ring, part) resp = self.GETorHEAD_base( req, _('Container'), node_iter, part, - req.swift_entity_path) + req.swift_entity_path, concurrency) if 'swift.authorize' in req.environ: req.acl = resp.headers.get('x-container-read') aresp = req.environ['swift.authorize'](req) diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py index dea29eab3a..fadca564f5 100644 --- a/swift/proxy/controllers/obj.py +++ b/swift/proxy/controllers/obj.py @@ -879,9 +879,11 @@ class BaseObjectController(Controller): class ReplicatedObjectController(BaseObjectController): def _get_or_head_response(self, req, node_iter, partition, policy): + concurrency = self.app.get_object_ring(policy.idx).replica_count \ + if self.app.concurrent_gets else 1 resp = self.GETorHEAD_base( req, _('Object'), node_iter, partition, - req.swift_entity_path) + req.swift_entity_path, concurrency) return resp def _connect_put_node(self, nodes, part, path, headers, @@ -2000,9 +2002,10 @@ class ECObjectController(BaseObjectController): # no fancy EC decoding here, just one plain old HEAD request to # one object server because all fragments hold all metadata # information about the object. + concurrency = policy.ec_ndata if self.app.concurrent_gets else 1 resp = self.GETorHEAD_base( req, _('Object'), node_iter, partition, - req.swift_entity_path) + req.swift_entity_path, concurrency) else: # GET request orig_range = None range_specs = [] @@ -2011,6 +2014,12 @@ class ECObjectController(BaseObjectController): range_specs = self._convert_range(req, policy) safe_iter = GreenthreadSafeIterator(node_iter) + # Sending the request concurrently to all nodes, and responding + # with the first response isn't something useful for EC as all + # nodes contain different fragments. Also EC has implemented it's + # own specific implementation of concurrent gets to ec_ndata nodes. + # So we don't need to worry about plumbing and sending a + # concurrency value to ResumingGetter. with ContextPool(policy.ec_ndata) as pool: pile = GreenAsyncPile(pool) for _junk in range(policy.ec_ndata): diff --git a/swift/proxy/server.py b/swift/proxy/server.py index 1f23e9bb20..f8f4296a25 100644 --- a/swift/proxy/server.py +++ b/swift/proxy/server.py @@ -147,6 +147,10 @@ class Application(object): self.node_timings = {} self.timing_expiry = int(conf.get('timing_expiry', 300)) self.sorting_method = conf.get('sorting_method', 'shuffle').lower() + self.concurrent_gets = \ + config_true_value(conf.get('concurrent_gets')) + self.concurrency_timeout = float(conf.get('concurrency_timeout', + self.conn_timeout)) value = conf.get('request_node_count', '2 * replicas').lower().split() if len(value) == 1: rnc_value = int(value[0]) diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index 3ebc8f6dc4..14e3aa8696 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -4988,6 +4988,37 @@ class TestGreenAsyncPile(unittest.TestCase): self.assertEqual(pile.waitall(0.5), [0.1, 0.1]) self.assertEqual(completed[0], 2) + def test_waitfirst_only_returns_first(self): + def run_test(name): + eventlet.sleep(0) + completed.append(name) + return name + + completed = [] + pile = utils.GreenAsyncPile(3) + pile.spawn(run_test, 'first') + pile.spawn(run_test, 'second') + pile.spawn(run_test, 'third') + self.assertEqual(pile.waitfirst(0.5), completed[0]) + # 3 still completed, but only the first was returned. + self.assertEqual(3, len(completed)) + + def test_wait_with_firstn(self): + def run_test(name): + eventlet.sleep(0) + completed.append(name) + return name + + for first_n in [None] + list(range(6)): + completed = [] + pile = utils.GreenAsyncPile(10) + for i in range(10): + pile.spawn(run_test, i) + actual = pile._wait(1, first_n) + expected_n = first_n if first_n else 10 + self.assertEqual(completed[:expected_n], actual) + self.assertEqual(10, len(completed)) + def test_pending(self): pile = utils.GreenAsyncPile(3) self.assertEqual(0, pile._pending) diff --git a/test/unit/proxy/controllers/test_base.py b/test/unit/proxy/controllers/test_base.py index 4bc8991d04..330250e2c9 100644 --- a/test/unit/proxy/controllers/test_base.py +++ b/test/unit/proxy/controllers/test_base.py @@ -28,7 +28,7 @@ from swift.common import exceptions from swift.common.utils import split_path from swift.common.header_key_dict import HeaderKeyDict from swift.common.http import is_success -from swift.common.storage_policy import StoragePolicy +from swift.common.storage_policy import StoragePolicy, POLICIES from test.unit import fake_http_connect, FakeRing, FakeMemcache from swift.proxy import server as proxy_server from swift.common.request_helpers import get_sys_meta_prefix @@ -193,6 +193,52 @@ class TestFuncs(unittest.TestCase): self.assertTrue('swift.account/a' in resp.environ) self.assertEqual(resp.environ['swift.account/a']['status'], 200) + # Run the above tests again, but this time with concurrent_reads + # turned on + policy = next(iter(POLICIES)) + concurrent_get_threads = policy.object_ring.replica_count + for concurrency_timeout in (0, 2): + self.app.concurrency_timeout = concurrency_timeout + req = Request.blank('/v1/a/c/o/with/slashes') + # NOTE: We are using slow_connect of fake_http_connect as using + # a concurrency of 0 when mocking the connection is a little too + # fast for eventlet. Network i/o will make this fine, but mocking + # it seems is too instantaneous. + with patch('swift.proxy.controllers.base.http_connect', + fake_http_connect(200, slow_connect=True)): + resp = base.GETorHEAD_base( + req, 'object', iter(nodes), 'part', '/a/c/o/with/slashes', + concurrency=concurrent_get_threads) + self.assertTrue('swift.object/a/c/o/with/slashes' in resp.environ) + self.assertEqual( + resp.environ['swift.object/a/c/o/with/slashes']['status'], 200) + req = Request.blank('/v1/a/c/o') + with patch('swift.proxy.controllers.base.http_connect', + fake_http_connect(200, slow_connect=True)): + resp = base.GETorHEAD_base( + req, 'object', iter(nodes), 'part', '/a/c/o', + concurrency=concurrent_get_threads) + self.assertTrue('swift.object/a/c/o' in resp.environ) + self.assertEqual(resp.environ['swift.object/a/c/o']['status'], 200) + req = Request.blank('/v1/a/c') + with patch('swift.proxy.controllers.base.http_connect', + fake_http_connect(200, slow_connect=True)): + resp = base.GETorHEAD_base( + req, 'container', iter(nodes), 'part', '/a/c', + concurrency=concurrent_get_threads) + self.assertTrue('swift.container/a/c' in resp.environ) + self.assertEqual(resp.environ['swift.container/a/c']['status'], + 200) + + req = Request.blank('/v1/a') + with patch('swift.proxy.controllers.base.http_connect', + fake_http_connect(200, slow_connect=True)): + resp = base.GETorHEAD_base( + req, 'account', iter(nodes), 'part', '/a', + concurrency=concurrent_get_threads) + self.assertTrue('swift.account/a' in resp.environ) + self.assertEqual(resp.environ['swift.account/a']['status'], 200) + def test_get_info(self): app = FakeApp() # Do a non cached call to account diff --git a/test/unit/proxy/controllers/test_obj.py b/test/unit/proxy/controllers/test_obj.py index 08a0be9e98..41e180eadc 100755 --- a/test/unit/proxy/controllers/test_obj.py +++ b/test/unit/proxy/controllers/test_obj.py @@ -722,9 +722,15 @@ class TestReplicatedObjController(BaseObjectControllerMixin, def test_GET_error(self): req = swift.common.swob.Request.blank('/v1/a/c/o') - with set_http_connect(503, 200): + self.app.logger.txn_id = req.environ['swift.trans_id'] = 'my-txn-id' + stdout = BytesIO() + with set_http_connect(503, 200), \ + mock.patch('sys.stdout', stdout): resp = req.get_response(self.app) self.assertEqual(resp.status_int, 200) + for line in stdout.getvalue().splitlines(): + self.assertIn('my-txn-id', line) + self.assertIn('From Object Server', stdout.getvalue()) def test_GET_handoff(self): req = swift.common.swob.Request.blank('/v1/a/c/o') diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 8aba81ffb1..d9cebdc8c2 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -928,6 +928,88 @@ class TestProxyServer(unittest.TestCase): {'region': 2, 'zone': 1, 'ip': '127.0.0.1'}] self.assertEqual(exp_sorted, app_sorted) + def test_node_concurrency(self): + nodes = [{'region': 1, 'zone': 1, 'ip': '127.0.0.1', 'port': 6010, + 'device': 'sda'}, + {'region': 2, 'zone': 2, 'ip': '127.0.0.2', 'port': 6010, + 'device': 'sda'}, + {'region': 3, 'zone': 3, 'ip': '127.0.0.3', 'port': 6010, + 'device': 'sda'}] + timings = {'127.0.0.1': 2, '127.0.0.2': 1, '127.0.0.3': 0} + statuses = {'127.0.0.1': 200, '127.0.0.2': 200, '127.0.0.3': 200} + req = Request.blank('/v1/account', environ={'REQUEST_METHOD': 'GET'}) + + def fake_iter_nodes(*arg, **karg): + return iter(nodes) + + class FakeConn(object): + def __init__(self, ip, *args, **kargs): + self.ip = ip + self.args = args + self.kargs = kargs + + def getresponse(self): + def mygetheader(header, *args, **kargs): + if header == "Content-Type": + return "" + else: + return 1 + + resp = mock.Mock() + resp.read.side_effect = ['Response from %s' % self.ip, ''] + resp.getheader = mygetheader + resp.getheaders.return_value = {} + resp.reason = '' + resp.status = statuses[self.ip] + sleep(timings[self.ip]) + return resp + + def myfake_http_connect_raw(ip, *args, **kargs): + conn = FakeConn(ip, *args, **kargs) + return conn + + with mock.patch('swift.proxy.server.Application.iter_nodes', + fake_iter_nodes): + with mock.patch('swift.common.bufferedhttp.http_connect_raw', + myfake_http_connect_raw): + app_conf = {'concurrent_gets': 'on', + 'concurrency_timeout': 0} + baseapp = proxy_server.Application(app_conf, + FakeMemcache(), + container_ring=FakeRing(), + account_ring=FakeRing()) + self.assertEqual(baseapp.concurrent_gets, True) + self.assertEqual(baseapp.concurrency_timeout, 0) + baseapp.update_request(req) + resp = baseapp.handle_request(req) + + # Should get 127.0.0.3 as this has a wait of 0 seconds. + self.assertEqual(resp.body, 'Response from 127.0.0.3') + + # lets try again, with 127.0.0.1 with 0 timing but returns an + # error. + timings['127.0.0.1'] = 0 + statuses['127.0.0.1'] = 500 + + # Should still get 127.0.0.3 as this has a wait of 0 seconds + # and a success + baseapp.update_request(req) + resp = baseapp.handle_request(req) + self.assertEqual(resp.body, 'Response from 127.0.0.3') + + # Now lets set the concurrency_timeout + app_conf['concurrency_timeout'] = 2 + baseapp = proxy_server.Application(app_conf, + FakeMemcache(), + container_ring=FakeRing(), + account_ring=FakeRing()) + self.assertEqual(baseapp.concurrency_timeout, 2) + baseapp.update_request(req) + resp = baseapp.handle_request(req) + + # Should get 127.0.0.2 as this has a wait of 1 seconds. + self.assertEqual(resp.body, 'Response from 127.0.0.2') + def test_info_defaults(self): app = proxy_server.Application({}, FakeMemcache(), account_ring=FakeRing(), From ecbcc94989a385efd1a7c60e7c91e3e8898c5b31 Mon Sep 17 00:00:00 2001 From: Kota Tsuyuzaki Date: Thu, 10 Mar 2016 06:42:57 -0800 Subject: [PATCH 037/141] Fix ssync related object-server docs Swift now uses SSYNC verb instead of old REPLICATION verb for ssync protocol. This patch replaces all docs written as REPLICATION into SSYNC and fix a few words for explanation. Change-Id: I1253210d4f49749e7d425d6252dd262b650d9548 --- doc/manpages/object-server.conf.5 | 13 +++++++------ doc/source/deployment_guide.rst | 10 +++++----- etc/object-server.conf-sample | 13 +++++++------ swift/obj/server.py | 2 +- test/unit/obj/test_diskfile.py | 2 +- 5 files changed, 21 insertions(+), 19 deletions(-) diff --git a/doc/manpages/object-server.conf.5 b/doc/manpages/object-server.conf.5 index 6e3dea7a09..ac1a8889e3 100644 --- a/doc/manpages/object-server.conf.5 +++ b/doc/manpages/object-server.conf.5 @@ -207,20 +207,21 @@ set to a True value (e.g. "True" or "1"). To handle only non-replication verbs, set to "False". Unless you have a separate replication network, you should not specify any value for "replication_server". .IP "\fBreplication_concurrency\fR" -Set to restrict the number of concurrent incoming REPLICATION requests -Set to 0 for unlimited (the default is 4). Note that REPLICATION is currently an ssync only item. +Set to restrict the number of concurrent incoming SSYNC requests +Set to 0 for unlimited (the default is 4). Note that SSYNC requests are only used +by the object reconstructor or the object replicator when configured to use ssync. .IP "\fBreplication_one_per_device\fR" -Restricts incoming REPLICATION requests to one per device, +Restricts incoming SSYNC requests to one per device, replication_currency above allowing. This can help control I/O to each -device, but you may wish to set this to False to allow multiple REPLICATION +device, but you may wish to set this to False to allow multiple SSYNC requests (up to the above replication_concurrency setting) per device. The default is true. .IP "\fBreplication_lock_timeout\fR" Number of seconds to wait for an existing replication device lock before giving up. The default is 15. .IP "\fBreplication_failure_threshold\fR" .IP "\fBreplication_failure_ratio\fR" -These two settings control when the REPLICATION subrequest handler will -abort an incoming REPLICATION attempt. An abort will occur if there are at +These two settings control when the SSYNC subrequest handler will +abort an incoming SSYNC attempt. An abort will occur if there are at least threshold number of failures and the value of failures / successes exceeds the ratio. The defaults of 100 and 1.0 means that at least 100 failures have to occur and there have to be more failures than successes for diff --git a/doc/source/deployment_guide.rst b/doc/source/deployment_guide.rst index 9ed83e4a30..94d418c660 100644 --- a/doc/source/deployment_guide.rst +++ b/doc/source/deployment_guide.rst @@ -569,15 +569,15 @@ replication_server Configure parameter for cr should not specify any value for "replication_server". replication_concurrency 4 Set to restrict the number of - concurrent incoming REPLICATION + concurrent incoming SSYNC requests; set to 0 for unlimited -replication_one_per_device True Restricts incoming REPLICATION +replication_one_per_device True Restricts incoming SSYNC requests to one per device, replication_currency above allowing. This can help control I/O to each device, but you may wish to set this to False to - allow multiple REPLICATION + allow multiple SSYNC requests (up to the above replication_concurrency setting) per device. @@ -589,9 +589,9 @@ replication_failure_threshold 100 The number of subrequest f replication_failure_ratio is checked replication_failure_ratio 1.0 If the value of failures / - successes of REPLICATION + successes of SSYNC subrequests exceeds this ratio, - the overall REPLICATION request + the overall SSYNC request will be aborted splice no Use splice() for zero-copy object GETs. This requires Linux kernel diff --git a/etc/object-server.conf-sample b/etc/object-server.conf-sample index 3c7732416e..80731584ee 100644 --- a/etc/object-server.conf-sample +++ b/etc/object-server.conf-sample @@ -118,14 +118,15 @@ use = egg:swift#object # should not specify any value for "replication_server". # replication_server = false # -# Set to restrict the number of concurrent incoming REPLICATION requests +# Set to restrict the number of concurrent incoming SSYNC requests # Set to 0 for unlimited -# Note that REPLICATION is currently an ssync only item +# Note that SSYNC requests are only used by the object reconstructor or the +# object replicator when configured to use ssync. # replication_concurrency = 4 # -# Restricts incoming REPLICATION requests to one per device, +# Restricts incoming SSYNC requests to one per device, # replication_currency above allowing. This can help control I/O to each -# device, but you may wish to set this to False to allow multiple REPLICATION +# device, but you may wish to set this to False to allow multiple SSYNC # requests (up to the above replication_concurrency setting) per device. # replication_one_per_device = True # @@ -133,8 +134,8 @@ use = egg:swift#object # giving up. # replication_lock_timeout = 15 # -# These next two settings control when the REPLICATION subrequest handler will -# abort an incoming REPLICATION attempt. An abort will occur if there are at +# These next two settings control when the SSYNC subrequest handler will +# abort an incoming SSYNC attempt. An abort will occur if there are at # least threshold number of failures and the value of failures / successes # exceeds the ratio. The defaults of 100 and 1.0 means that at least 100 # failures have to occur and there have to be more failures than successes for diff --git a/swift/obj/server.py b/swift/obj/server.py index e6fa88f680..e59c9fbc38 100644 --- a/swift/obj/server.py +++ b/swift/obj/server.py @@ -1108,7 +1108,7 @@ def global_conf_callback(preloaded_app_conf, global_conf): """ Callback for swift.common.wsgi.run_wsgi during the global_conf creation so that we can add our replication_semaphore, used to - limit the number of concurrent REPLICATION_REQUESTS across all + limit the number of concurrent SSYNC_REQUESTS across all workers. :param preloaded_app_conf: The preloaded conf for the WSGI app. diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py index 09443d22f0..bea8f95beb 100644 --- a/test/unit/obj/test_diskfile.py +++ b/test/unit/obj/test_diskfile.py @@ -4398,7 +4398,7 @@ class TestSuffixHashes(unittest.TestCase): * get_hashes(device, partition, suffixes, policy) * invalidate_hash(suffix_dir) - The Manager.get_hashes method (used by the REPLICATION verb) + The Manager.get_hashes method (used by the REPLICATE verb) calls Manager._get_hashes (which may be an alias to the module method get_hashes), which calls hash_suffix, which calls hash_cleanup_listdir. From 6efee0ebb12a8d0c36aad83d4fd592c4e51c5722 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Wed, 16 Mar 2016 11:38:33 +0000 Subject: [PATCH 038/141] Make keystone middleware options consistent in docs Bring overview_auth.rst and proxy server man page up to date with changes made in [1] [1] Change-Id: I373734933189c87c4094203b0752dd3762689034 Change-Id: Ia16f0c391e7c357ccb9c13945839dc5647e49a13 --- doc/manpages/proxy-server.conf.5 | 13 ++++++++----- doc/source/overview_auth.rst | 26 +++++++++++++++----------- 2 files changed, 23 insertions(+), 16 deletions(-) diff --git a/doc/manpages/proxy-server.conf.5 b/doc/manpages/proxy-server.conf.5 index 45531685c6..41144a87a1 100644 --- a/doc/manpages/proxy-server.conf.5 +++ b/doc/manpages/proxy-server.conf.5 @@ -275,11 +275,14 @@ there you can change it to: authtoken keystoneauth .PD 0 .RS 10 .IP "paste.filter_factory = keystonemiddleware.auth_token:filter_factory" -.IP "identity_uri = http://keystonehost:35357/" -.IP "auth_uri = http://keystonehost:5000/" -.IP "admin_tenant_name = service" -.IP "admin_user = swift" -.IP "admin_password = password" +.IP "auth_uri = http://keystonehost:5000" +.IP "auth_url = http://keystonehost:35357" +.IP "auth_plugin = password" +.IP "project_domain_id = default" +.IP "user_domain_id = default" +.IP "project_name = service" +.IP "username = swift" +.IP "password = password" .IP "" .IP "# delay_auth_decision defaults to False, but leaving it as false will" .IP "# prevent other auth systems, staticweb, tempurl, formpost, and ACLs from" diff --git a/doc/source/overview_auth.rst b/doc/source/overview_auth.rst index 29ac1459e9..aa5a0c61e5 100644 --- a/doc/source/overview_auth.rst +++ b/doc/source/overview_auth.rst @@ -154,11 +154,14 @@ add the configuration for the authtoken middleware:: [filter:authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory - identity_uri = http://keystonehost:35357/ - admin_tenant_name = service - admin_user = swift - admin_password = password auth_uri = http://keystonehost:5000/ + auth_url = http://keystonehost:35357/ + auth_plugin = password + project_domain_id = default + user_domain_id = default + project_name = service + username = swift + password = password cache = swift.cache include_service_catalog = False delay_auth_decision = True @@ -166,16 +169,17 @@ add the configuration for the authtoken middleware:: The actual values for these variables will need to be set depending on your situation, but in short: -* ``identity_uri`` points to the Keystone Admin service. This information is - used by the middleware to actually query Keystone about the validity of the - authentication tokens. It is not necessary to append any Keystone API version - number to this URI. -* The admin auth credentials (``admin_user``, ``admin_tenant_name``, - ``admin_password``) will be used to retrieve an admin token. That - token will be used to authorize user tokens behind the scenes. * ``auth_uri`` should point to a Keystone service from which users may retrieve tokens. This value is used in the `WWW-Authenticate` header that auth_token sends with any denial response. +* ``auth_url`` points to the Keystone Admin service. This information is + used by the middleware to actually query Keystone about the validity of the + authentication tokens. It is not necessary to append any Keystone API version + number to this URI. +* The auth credentials (``project_domain_id``, ``user_domain_id``, + ``username``, ``project_name``, ``password``) will be used to retrieve an + admin token. That token will be used to authorize user tokens behind the + scenes. * ``cache`` is set to ``swift.cache``. This means that the middleware will get the Swift memcache from the request environment. * ``include_service_catalog`` defaults to ``True`` if not set. This means From 6ea7635f87ce480e712439bf20e0e4d9ca81b352 Mon Sep 17 00:00:00 2001 From: Charles Hsu Date: Thu, 31 Dec 2015 12:17:21 +0800 Subject: [PATCH 039/141] Add a note for functional tests with Keystone. Change-Id: I9da8f6a56ffb06e89b8da8c06b26ebc8452b652d Closes-Bug: #1530254 --- test/sample.conf | 1 + 1 file changed, 1 insertion(+) diff --git a/test/sample.conf b/test/sample.conf index 3cf3609e59..74b9ca9bd2 100644 --- a/test/sample.conf +++ b/test/sample.conf @@ -6,6 +6,7 @@ auth_ssl = no auth_prefix = /auth/ ## sample config for Swift with Keystone v2 API # For keystone v2 change auth_version to 2 and auth_prefix to /v2.0/ +# And "allow_account_management" should not be set "true" #auth_version = 3 #auth_host = localhost #auth_port = 5000 From 12dd408823df158359e99fb01716f2059140c5c9 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Wed, 16 Mar 2016 17:41:30 +0000 Subject: [PATCH 040/141] Put correct Etag and Accept-Ranges in EC 304 and 416 responses When using an EC policy, 304 responses to conditional GETs are missing the Accept-Ranges header and have the wrong ETag value. 412 responses also have the wrong etag. 416 responses to ranged GETs also have the wrong ETag. This patch ensures behaviour with EC policy is consistent with replication policy: - 304 and 416 responses have correct etag and Accept-Ranges - 412 responses have correct Etag but no Accept-Ranges Co-Authored-By: Mahati Chamarthy Co-Authored-By: Thiago da Silva Closes-Bug: #1496234 Closes-Bug: #1558197 Closes-Bug: #1558193 Change-Id: Ic21317b9e4f632f0751133a3383eb5487379e11f --- swift/common/swob.py | 2 +- swift/proxy/controllers/obj.py | 20 +++++---- test/functional/tests.py | 54 +++++++++++++++++++------ test/unit/proxy/controllers/test_obj.py | 10 +++-- test/unit/proxy/test_server.py | 54 +++++++++++++++++++------ 5 files changed, 103 insertions(+), 37 deletions(-) diff --git a/swift/common/swob.py b/swift/common/swob.py index 98ee37278e..704212084d 100644 --- a/swift/common/swob.py +++ b/swift/common/swob.py @@ -1299,7 +1299,7 @@ class Response(object): object length and body or app_iter to reset the content_length properties on the request. - It is ok to not call this method, the conditional resposne will be + It is ok to not call this method, the conditional response will be maintained for you when you __call__ the response. """ self.response_iter = self._response_iter(self.app_iter, self._body) diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py index dea29eab3a..731a6d3aea 100644 --- a/swift/proxy/controllers/obj.py +++ b/swift/proxy/controllers/obj.py @@ -60,10 +60,10 @@ from swift.common.exceptions import ChunkReadTimeout, \ from swift.common.header_key_dict import HeaderKeyDict from swift.common.http import ( is_informational, is_success, is_client_error, is_server_error, - HTTP_CONTINUE, HTTP_CREATED, HTTP_MULTIPLE_CHOICES, + is_redirection, HTTP_CONTINUE, HTTP_CREATED, HTTP_MULTIPLE_CHOICES, HTTP_INTERNAL_SERVER_ERROR, HTTP_SERVICE_UNAVAILABLE, HTTP_INSUFFICIENT_STORAGE, HTTP_PRECONDITION_FAILED, HTTP_CONFLICT, - HTTP_UNPROCESSABLE_ENTITY) + HTTP_UNPROCESSABLE_ENTITY, HTTP_REQUESTED_RANGE_NOT_SATISFIABLE) from swift.common.storage_policy import (POLICIES, REPL_POLICY, EC_POLICY, ECDriverError, PolicyError) from swift.proxy.controllers.base import Controller, delay_denial, \ @@ -2065,8 +2065,12 @@ class ECObjectController(BaseObjectController): headers=resp_headers, conditional_response=True, app_iter=app_iter) - resp.accept_ranges = 'bytes' - app_iter.kickoff(req, resp) + try: + app_iter.kickoff(req, resp) + except HTTPException as err_resp: + # catch any HTTPException response here so that we can + # process response headers uniformly in _fix_response + resp = err_resp else: statuses = [] reasons = [] @@ -2086,10 +2090,12 @@ class ECObjectController(BaseObjectController): def _fix_response(self, resp): # EC fragment archives each have different bytes, hence different # etags. However, they all have the original object's etag stored in - # sysmeta, so we copy that here so the client gets it. + # sysmeta, so we copy that here (if it exists) so the client gets it. + resp.headers['Etag'] = resp.headers.get('X-Object-Sysmeta-Ec-Etag') + if (is_success(resp.status_int) or is_redirection(resp.status_int) or + resp.status_int == HTTP_REQUESTED_RANGE_NOT_SATISFIABLE): + resp.accept_ranges = 'bytes' if is_success(resp.status_int): - resp.headers['Etag'] = resp.headers.get( - 'X-Object-Sysmeta-Ec-Etag') resp.headers['Content-Length'] = resp.headers.get( 'X-Object-Sysmeta-Ec-Content-Length') resp.fix_conditional_response() diff --git a/test/functional/tests.py b/test/functional/tests.py index 4831c28b45..4dbfb67c5a 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -90,6 +90,14 @@ class Base(unittest2.TestCase): 'Status returned: %d Expected: %s' % (self.env.conn.response.status, status_or_statuses)) + def assert_header(self, header_name, expected_value): + try: + actual_value = self.env.conn.response.getheader(header_name) + except KeyError: + self.fail( + 'Expected header name %r not found in response.' % header_name) + self.assertEqual(expected_value, actual_value) + class Base2(object): def setUp(self): @@ -1640,32 +1648,35 @@ class TestFile(Base): self.assert_status(416) else: self.assertEqual(file_item.read(hdrs=hdrs), data[-i:]) + self.assert_header('etag', file_item.md5) + self.assert_header('accept-ranges', 'bytes') range_string = 'bytes=%d-' % (i) hdrs = {'Range': range_string} - self.assertTrue( - file_item.read(hdrs=hdrs) == data[i - file_length:], + self.assertEqual( + file_item.read(hdrs=hdrs), data[i - file_length:], range_string) range_string = 'bytes=%d-%d' % (file_length + 1000, file_length + 2000) hdrs = {'Range': range_string} self.assertRaises(ResponseError, file_item.read, hdrs=hdrs) self.assert_status(416) + self.assert_header('etag', file_item.md5) + self.assert_header('accept-ranges', 'bytes') range_string = 'bytes=%d-%d' % (file_length - 1000, file_length + 2000) hdrs = {'Range': range_string} - self.assertTrue( - file_item.read(hdrs=hdrs) == data[-1000:], range_string) + self.assertEqual(file_item.read(hdrs=hdrs), data[-1000:], range_string) hdrs = {'Range': '0-4'} - self.assertTrue(file_item.read(hdrs=hdrs) == data, range_string) + self.assertEqual(file_item.read(hdrs=hdrs), data, '0-4') # RFC 2616 14.35.1 # "If the entity is shorter than the specified suffix-length, the # entire entity-body is used." range_string = 'bytes=-%d' % (file_length + 10) hdrs = {'Range': range_string} - self.assertTrue(file_item.read(hdrs=hdrs) == data, range_string) + self.assertEqual(file_item.read(hdrs=hdrs), data, range_string) def testMultiRangeGets(self): file_length = 10000 @@ -2536,6 +2547,7 @@ class TestFileComparison(Base): hdrs = {'If-Match': 'bogus'} self.assertRaises(ResponseError, file_item.read, hdrs=hdrs) self.assert_status(412) + self.assert_header('etag', file_item.md5) def testIfMatchMultipleEtags(self): for file_item in self.env.files: @@ -2545,6 +2557,7 @@ class TestFileComparison(Base): hdrs = {'If-Match': '"bogus1", "bogus2", "bogus3"'} self.assertRaises(ResponseError, file_item.read, hdrs=hdrs) self.assert_status(412) + self.assert_header('etag', file_item.md5) def testIfNoneMatch(self): for file_item in self.env.files: @@ -2554,6 +2567,8 @@ class TestFileComparison(Base): hdrs = {'If-None-Match': file_item.md5} self.assertRaises(ResponseError, file_item.read, hdrs=hdrs) self.assert_status(304) + self.assert_header('etag', file_item.md5) + self.assert_header('accept-ranges', 'bytes') def testIfNoneMatchMultipleEtags(self): for file_item in self.env.files: @@ -2564,6 +2579,8 @@ class TestFileComparison(Base): '"bogus1", "bogus2", "%s"' % file_item.md5} self.assertRaises(ResponseError, file_item.read, hdrs=hdrs) self.assert_status(304) + self.assert_header('etag', file_item.md5) + self.assert_header('accept-ranges', 'bytes') def testIfModifiedSince(self): for file_item in self.env.files: @@ -2574,8 +2591,12 @@ class TestFileComparison(Base): hdrs = {'If-Modified-Since': self.env.time_new} self.assertRaises(ResponseError, file_item.read, hdrs=hdrs) self.assert_status(304) + self.assert_header('etag', file_item.md5) + self.assert_header('accept-ranges', 'bytes') self.assertRaises(ResponseError, file_item.info, hdrs=hdrs) self.assert_status(304) + self.assert_header('etag', file_item.md5) + self.assert_header('accept-ranges', 'bytes') def testIfUnmodifiedSince(self): for file_item in self.env.files: @@ -2586,8 +2607,10 @@ class TestFileComparison(Base): hdrs = {'If-Unmodified-Since': self.env.time_old_f2} self.assertRaises(ResponseError, file_item.read, hdrs=hdrs) self.assert_status(412) + self.assert_header('etag', file_item.md5) self.assertRaises(ResponseError, file_item.info, hdrs=hdrs) self.assert_status(412) + self.assert_header('etag', file_item.md5) def testIfMatchAndUnmodified(self): for file_item in self.env.files: @@ -2599,33 +2622,38 @@ class TestFileComparison(Base): 'If-Unmodified-Since': self.env.time_new} self.assertRaises(ResponseError, file_item.read, hdrs=hdrs) self.assert_status(412) + self.assert_header('etag', file_item.md5) hdrs = {'If-Match': file_item.md5, 'If-Unmodified-Since': self.env.time_old_f3} self.assertRaises(ResponseError, file_item.read, hdrs=hdrs) self.assert_status(412) + self.assert_header('etag', file_item.md5) def testLastModified(self): file_name = Utils.create_name() content_type = Utils.create_name() - file = self.env.container.file(file_name) - file.content_type = content_type - resp = file.write_random_return_resp(self.env.file_size) + file_item = self.env.container.file(file_name) + file_item.content_type = content_type + resp = file_item.write_random_return_resp(self.env.file_size) put_last_modified = resp.getheader('last-modified') + etag = file_item.md5 - file = self.env.container.file(file_name) - info = file.info() + file_item = self.env.container.file(file_name) + info = file_item.info() self.assertIn('last_modified', info) last_modified = info['last_modified'] self.assertEqual(put_last_modified, info['last_modified']) hdrs = {'If-Modified-Since': last_modified} - self.assertRaises(ResponseError, file.read, hdrs=hdrs) + self.assertRaises(ResponseError, file_item.read, hdrs=hdrs) self.assert_status(304) + self.assert_header('etag', etag) + self.assert_header('accept-ranges', 'bytes') hdrs = {'If-Unmodified-Since': last_modified} - self.assertTrue(file.read(hdrs=hdrs)) + self.assertTrue(file_item.read(hdrs=hdrs)) class TestFileComparisonUTF8(Base2, TestFileComparison): diff --git a/test/unit/proxy/controllers/test_obj.py b/test/unit/proxy/controllers/test_obj.py index 08a0be9e98..56eadcedf3 100755 --- a/test/unit/proxy/controllers/test_obj.py +++ b/test/unit/proxy/controllers/test_obj.py @@ -2395,7 +2395,7 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): self.assertEqual(resp.status_int, 201) def test_GET_with_invalid_ranges(self): - # reall body size is segment_size - 10 (just 1 segment) + # real body size is segment_size - 10 (just 1 segment) segment_size = self.policy.ec_segment_size real_body = ('a' * segment_size)[:-10] @@ -2407,7 +2407,7 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): segment_size, '%s-' % (segment_size + 10)) def test_COPY_with_invalid_ranges(self): - # reall body size is segment_size - 10 (just 1 segment) + # real body size is segment_size - 10 (just 1 segment) segment_size = self.policy.ec_segment_size real_body = ('a' * segment_size)[:-10] @@ -2420,6 +2420,7 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): def _test_invalid_ranges(self, method, real_body, segment_size, req_range): # make a request with range starts from more than real size. + body_etag = md5(real_body).hexdigest() req = swift.common.swob.Request.blank( '/v1/a/c/o', method=method, headers={'Destination': 'c1/o', @@ -2430,7 +2431,8 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): node_fragments = zip(*fragment_payloads) self.assertEqual(len(node_fragments), self.replicas()) # sanity - headers = {'X-Object-Sysmeta-Ec-Content-Length': str(len(real_body))} + headers = {'X-Object-Sysmeta-Ec-Content-Length': str(len(real_body)), + 'X-Object-Sysmeta-Ec-Etag': body_etag} start = int(req_range.split('-')[0]) self.assertTrue(start >= 0) # sanity title, exp = swob.RESPONSE_REASONS[416] @@ -2453,6 +2455,8 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): self.assertEqual(resp.status_int, 416) self.assertEqual(resp.content_length, len(range_not_satisfiable_body)) self.assertEqual(resp.body, range_not_satisfiable_body) + self.assertEqual(resp.etag, body_etag) + self.assertEqual(resp.headers['Accept-Ranges'], 'bytes') if __name__ == '__main__': diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 8aba81ffb1..fc44033dcf 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -39,6 +39,7 @@ from swift.obj import diskfile import re import random from collections import defaultdict +import uuid import mock from eventlet import sleep, spawn, wsgi, listen, Timeout, debug @@ -2273,9 +2274,10 @@ class TestObjectController(unittest.TestCase): self.assertEqual(len(error_lines), 0) # sanity self.assertEqual(len(warn_lines), 0) # sanity - @unpatch_policies - def test_conditional_GET_ec(self): - self.put_container("ec", "ec-con") + def _test_conditional_GET(self, policy): + container_name = uuid.uuid4().hex + object_path = '/v1/a/%s/conditionals' % container_name + self.put_container(policy.name, container_name) obj = 'this object has an etag and is otherwise unimportant' etag = md5(obj).hexdigest() @@ -2285,13 +2287,13 @@ class TestObjectController(unittest.TestCase): prosrv = _test_servers[0] sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() - fd.write('PUT /v1/a/ec-con/conditionals HTTP/1.1\r\n' + fd.write('PUT %s HTTP/1.1\r\n' 'Host: localhost\r\n' 'Connection: close\r\n' 'Content-Length: %d\r\n' 'X-Storage-Token: t\r\n' 'Content-Type: application/octet-stream\r\n' - '\r\n%s' % (len(obj), obj)) + '\r\n%s' % (object_path, len(obj), obj)) fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 201' @@ -2300,55 +2302,79 @@ class TestObjectController(unittest.TestCase): for verb, body in (('GET', obj), ('HEAD', '')): # If-Match req = Request.blank( - '/v1/a/ec-con/conditionals', + object_path, environ={'REQUEST_METHOD': verb}, headers={'If-Match': etag}) resp = req.get_response(prosrv) self.assertEqual(resp.status_int, 200) self.assertEqual(resp.body, body) + self.assertEqual(etag, resp.headers.get('etag')) + self.assertEqual('bytes', resp.headers.get('accept-ranges')) req = Request.blank( - '/v1/a/ec-con/conditionals', + object_path, environ={'REQUEST_METHOD': verb}, headers={'If-Match': not_etag}) resp = req.get_response(prosrv) self.assertEqual(resp.status_int, 412) + self.assertEqual(etag, resp.headers.get('etag')) req = Request.blank( - '/v1/a/ec-con/conditionals', + object_path, environ={'REQUEST_METHOD': verb}, headers={'If-Match': "*"}) resp = req.get_response(prosrv) self.assertEqual(resp.status_int, 200) self.assertEqual(resp.body, body) + self.assertEqual(etag, resp.headers.get('etag')) + self.assertEqual('bytes', resp.headers.get('accept-ranges')) # If-None-Match req = Request.blank( - '/v1/a/ec-con/conditionals', + object_path, environ={'REQUEST_METHOD': verb}, headers={'If-None-Match': etag}) resp = req.get_response(prosrv) self.assertEqual(resp.status_int, 304) + self.assertEqual(etag, resp.headers.get('etag')) + self.assertEqual('bytes', resp.headers.get('accept-ranges')) req = Request.blank( - '/v1/a/ec-con/conditionals', + object_path, environ={'REQUEST_METHOD': verb}, headers={'If-None-Match': not_etag}) resp = req.get_response(prosrv) self.assertEqual(resp.status_int, 200) self.assertEqual(resp.body, body) + self.assertEqual(etag, resp.headers.get('etag')) + self.assertEqual('bytes', resp.headers.get('accept-ranges')) req = Request.blank( - '/v1/a/ec-con/conditionals', + object_path, environ={'REQUEST_METHOD': verb}, headers={'If-None-Match': "*"}) resp = req.get_response(prosrv) self.assertEqual(resp.status_int, 304) + self.assertEqual(etag, resp.headers.get('etag')) + self.assertEqual('bytes', resp.headers.get('accept-ranges')) + error_lines = prosrv.logger.get_lines_for_level('error') warn_lines = prosrv.logger.get_lines_for_level('warning') self.assertEqual(len(error_lines), 0) # sanity self.assertEqual(len(warn_lines), 0) # sanity + @unpatch_policies + def test_conditional_GET_ec(self): + policy = POLICIES[3] + self.assertEqual('erasure_coding', policy.policy_type) # sanity + self._test_conditional_GET(policy) + + @unpatch_policies + def test_conditional_GET_replication(self): + policy = POLICIES[0] + self.assertEqual('replication', policy.policy_type) # sanity + self._test_conditional_GET(policy) + @unpatch_policies def test_GET_ec_big(self): self.put_container("ec", "ec-con") @@ -6543,7 +6569,7 @@ class TestObjectECRangedGET(unittest.TestCase): str(s) for s in range(431)) assert seg_size * 4 > len(cls.obj) > seg_size * 3, \ "object is wrong number of segments" - + cls.obj_etag = md5(cls.obj).hexdigest() cls.tiny_obj = 'tiny, tiny object' assert len(cls.tiny_obj) < seg_size, "tiny_obj too large" @@ -6691,9 +6717,11 @@ class TestObjectECRangedGET(unittest.TestCase): def test_unsatisfiable(self): # Goes just one byte too far off the end of the object, so it's # unsatisfiable - status, _junk, _junk = self._get_obj( + status, headers, _junk = self._get_obj( "bytes=%d-%d" % (len(self.obj), len(self.obj) + 100)) self.assertEqual(status, 416) + self.assertEqual(self.obj_etag, headers.get('Etag')) + self.assertEqual('bytes', headers.get('Accept-Ranges')) def test_off_end(self): # Ranged GET that's mostly off the end of the object, but overlaps From 9f7e947dca83a8b7c5e81493b88669472ef2875d Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 17 Mar 2016 14:45:42 +0000 Subject: [PATCH 041/141] Imported Translations from Zanata For more information about this automatic import see: https://wiki.openstack.org/wiki/Translations/Infrastructure Change-Id: I95466266e314cf4a564c7f93cfae7df9df21b60c --- swift/locale/de/LC_MESSAGES/swift.po | 29 ++++++++++++-- swift/locale/fr/LC_MESSAGES/swift.po | 56 ++++++++++++++++++++++++++-- 2 files changed, 78 insertions(+), 7 deletions(-) diff --git a/swift/locale/de/LC_MESSAGES/swift.po b/swift/locale/de/LC_MESSAGES/swift.po index eb8b836422..1af46d4c9d 100644 --- a/swift/locale/de/LC_MESSAGES/swift.po +++ b/swift/locale/de/LC_MESSAGES/swift.po @@ -11,13 +11,13 @@ # Monika Wolf , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev204\n" +"Project-Id-Version: swift 2.6.1.dev218\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-11 13:27+0000\n" +"POT-Creation-Date: 2016-03-16 11:38+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-11 04:57+0000\n" +"PO-Revision-Date: 2016-03-16 12:06+0000\n" "Last-Translator: Monika Wolf \n" "Language: de\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" @@ -648,6 +648,13 @@ msgstr "CNAME-Kette für %(given_domain)s bis %(found_domain)s wird gefolgt" msgid "Found configs:" msgstr "Gefundene Konfigurationen:" +msgid "" +"Handoffs first mode still has handoffs remaining. Aborting current " +"replication pass." +msgstr "" +"Der Modus 'handoffs_first' ist noch nicht abgeschlossen. Der aktuelle " +"Replikationsdurchgang wird abgebrochen." + msgid "Host unreachable" msgstr "Host nicht erreichbar" @@ -770,6 +777,18 @@ msgstr "" "%(errors)d, Dateien/s insgesamt: %(frate).2f, Bytes/s insgesamt: " "%(brate).2f, Prüfungszeit: %(audit).2f, Geschwindigkeit: %(audit_rate).2f" +#, python-format +msgid "" +"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " +"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " +"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " +"%(audit_rate).2f" +msgstr "" +"Objektprüfung (%(type)s). Seit %(start_time)s: Lokal: %(passes)d übergeben, " +"%(quars)d unter Quarantäne gestellt, %(errors)d Fehler, Dateien/s: " +"%(frate).2f, Bytes/s: %(brate).2f, Zeit insgesamt: %(total).2f, " +"Prüfungszeit: %(audit).2f, Geschwindigkeit: %(audit_rate).2f" + #, python-format msgid "Object audit stats: %s" msgstr "Objektprüfungsstatistik: %s" @@ -846,6 +865,10 @@ msgstr "Problem bei der Bereinigung von %s" msgid "Problem cleaning up %s (%s)" msgstr "Problem bei der Bereinigung von %s (%s)" +#, python-format +msgid "Problem writing durable state file %s (%s)" +msgstr "Problem beim Schreiben der langlebigen Statusdatei %s (%s)" + #, python-format msgid "Profiling Error: %s" msgstr "Fehler bei der Profilerstellung: %s" diff --git a/swift/locale/fr/LC_MESSAGES/swift.po b/swift/locale/fr/LC_MESSAGES/swift.po index 5dcb3d1bcf..9d14d21ae1 100644 --- a/swift/locale/fr/LC_MESSAGES/swift.po +++ b/swift/locale/fr/LC_MESSAGES/swift.po @@ -6,16 +6,17 @@ # Maxime COQUEREL , 2014 # OpenStack Infra , 2015. #zanata # Tom Cocozzello , 2015. #zanata +# Gael Rehault , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev176\n" +"Project-Id-Version: swift 2.6.1.dev218\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-08 04:09+0000\n" +"POT-Creation-Date: 2016-03-16 11:38+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2015-08-11 11:22+0000\n" -"Last-Translator: openstackjenkins \n" +"PO-Revision-Date: 2016-03-16 11:53+0000\n" +"Last-Translator: Gael Rehault \n" "Language: fr\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" @@ -627,6 +628,10 @@ msgstr "Hôte %r non valide dans X-Container-Sync-To" msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "Entrée en attente non valide %(file)s : %(entry)s" +#, python-format +msgid "Invalid response %(resp)s from %(full_path)s" +msgstr "Réponse %(resp)s non valide de %(full_path)s" + #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "Réponse %(resp)s non valide de %(ip)s" @@ -666,6 +671,10 @@ msgstr "Aucun droit pour signaler le PID %d" msgid "No realm key for %r" msgstr "Aucune clé de domaine pour %r" +#, python-format +msgid "No space left on device for %s (%s)" +msgstr "Plus d'espace disponible sur le périphérique pour %s (%s)" + #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "" @@ -680,6 +689,10 @@ msgstr "" "Introuvable : %(sync_from)r => %(sync_to)r - objet " "%(obj_name)r" +#, python-format +msgid "Nothing reconstructed for %s seconds." +msgstr "Aucun élément reconstruit pendant %s secondes." + #, python-format msgid "Nothing replicated for %s seconds." msgstr "Aucun élément répliqué pendant %s secondes." @@ -718,6 +731,10 @@ msgstr "" msgid "Object audit stats: %s" msgstr "Statistiques de l'audit d'objet : %s" +#, python-format +msgid "Object reconstruction complete. (%.02f minutes)" +msgstr "Reconstruction d'objet terminée. (%.02f minutes)" + #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "" @@ -779,6 +796,10 @@ msgstr "Chemin requis dans X-Container-Sync-To" msgid "Problem cleaning up %s" msgstr "Problème lors du nettoyage de %s" +#, python-format +msgid "Problem cleaning up %s (%s)" +msgstr "Problème lors du nettoyage de %s (%s)" + #, python-format msgid "Profiling Error: %s" msgstr "Erreur de profilage : %s" @@ -822,6 +843,10 @@ msgstr "Suppression de %s objets" msgid "Removing partition: %s" msgstr "Suppression partition: %s" +#, python-format +msgid "Removing pid file %s with invalid pid" +msgstr "Suppression du fichier pid %s avec in pid invalide" + #, python-format msgid "Removing stale pid file %s" msgstr "Suppression du fichier PID %s périmé" @@ -841,6 +866,11 @@ msgstr "" "Renvoi de 498 pour %(meth)s jusqu'à %(acc)s/%(cont)s/%(obj)s . Ratelimit " "(Max Sleep) %(e)s" +msgid "Ring change detected. Aborting current reconstruction pass." +msgstr "" +"Changement d'anneau détecté. Abandon de la session de reconstruction en " +"cours." + msgid "Ring change detected. Aborting current replication pass." msgstr "" "Changement d'anneau détecté. Abandon de la session de réplication en cours." @@ -919,6 +949,14 @@ msgstr "Délai d'attente de %(action)s dans memcached : %(server)s" msgid "Trying to %(method)s %(path)s" msgstr "Tentative d'exécution de %(method)s %(path)s" +#, python-format +msgid "Trying to GET %(full_path)s" +msgstr "Tentative de lecture de %(full_path)s" + +#, python-format +msgid "Trying to get %s status of PUT to %s" +msgstr "Tentative d'obtention du statut de l'opération PUT %s sur %s" + #, python-format msgid "Trying to get final status of PUT to %s" msgstr "Tentative d'obtention du statut final de l'opération PUT sur %s" @@ -948,6 +986,10 @@ msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "" "Impossible de localiser %s dans libc. Laissé comme action nulle (no-op)." +#, python-format +msgid "Unable to locate config for %s" +msgstr "Impossible de trouver la configuration pour %s" + msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" @@ -973,6 +1015,12 @@ msgstr "Réponse inattendue : %s" msgid "Unhandled exception" msgstr "Exception non prise en charge" +#, python-format +msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" +msgstr "" +"Une exception inconnue s'est produite pendant une opération GET: %(account)r " +"%(container)r %(object)r" + #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "Echec du rapport de mise à jour pour %(container)s %(dbfile)s" From c70abba52958a78d21c00a08488d1c2a7c3df554 Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Thu, 17 Mar 2016 11:55:14 -0700 Subject: [PATCH 042/141] Adjust replica count before pulling parts from failed devices When your device count falls below your replica count you can either add devices or reduce the replica count. Trying to reduce your replica count fails about half the time because removing parts from from failed devices temporarily invalidates your _replica2part2dev table with NONE_DEV which can result in an IndexError in _adjust_replica2part2dev_size. If you adjust the replica count first you won't have to worry about tracking unassigned parts from failed devices. Closes-Bug: #1558751 Change-Id: I99dc776fd260a2ba68ca77d7b5ed5120d10b06de --- swift/common/ring/builder.py | 4 ++-- test/unit/common/ring/test_builder.py | 15 +++++++++++++++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/swift/common/ring/builder.py b/swift/common/ring/builder.py index 0459cd60de..ee25ad7caa 100644 --- a/swift/common/ring/builder.py +++ b/swift/common/ring/builder.py @@ -443,10 +443,10 @@ class RingBuilder(object): self._set_parts_wanted(replica_plan) assign_parts = defaultdict(list) - # gather parts from failed devices - removed_devs = self._gather_parts_from_failed_devices(assign_parts) # gather parts from replica count adjustment self._adjust_replica2part2dev_size(assign_parts) + # gather parts from failed devices + removed_devs = self._gather_parts_from_failed_devices(assign_parts) # gather parts for dispersion (N.B. this only picks up parts that # *must* disperse according to the replica plan) self._gather_parts_for_dispersion(assign_parts, replica_plan) diff --git a/test/unit/common/ring/test_builder.py b/test/unit/common/ring/test_builder.py index 454c6a130a..c8c5023a30 100644 --- a/test/unit/common/ring/test_builder.py +++ b/test/unit/common/ring/test_builder.py @@ -1385,6 +1385,21 @@ class TestRingBuilder(unittest.TestCase): rb.rebalance() # this would crash since parts_wanted was not set rb.validate() + def test_reduce_replicas_after_remove_device(self): + rb = ring.RingBuilder(8, 3, 1) + rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 3, + 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) + rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 3, + 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) + rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 3, + 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) + rb.rebalance() + rb.remove_dev(0) + self.assertRaises(exceptions.RingValidationError, rb.rebalance) + rb.set_replicas(2) + rb.rebalance() + rb.validate() + def test_rebalance_post_upgrade(self): rb = ring.RingBuilder(8, 3, 1) # 5 devices: 5 is the smallest number that does not divide 3 * 2^8, From a85149c40e34b3ccd2aa5f3865bb85d2378d6086 Mon Sep 17 00:00:00 2001 From: Paul Dardeau Date: Thu, 17 Mar 2016 22:12:42 +0000 Subject: [PATCH 043/141] Update ring overview for reuse of device ids Change-Id: Ied15706d4cc931b5314001a02457f83e175c5d24 --- doc/source/overview_ring.rst | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/doc/source/overview_ring.rst b/doc/source/overview_ring.rst index b121d37135..a1a72d8508 100644 --- a/doc/source/overview_ring.rst +++ b/doc/source/overview_ring.rst @@ -103,10 +103,14 @@ meta string A general-use field for storing additional information for the ====== ======= ============================================================== Note: The list of devices may contain holes, or indexes set to None, for -devices that have been removed from the cluster. Generally, device ids are not -reused. Also, some devices may be temporarily disabled by setting their weight -to 0.0. To obtain a list of active devices (for uptime polling, for example) -the Python code would look like: ``devices = list(self._iter_devs())`` +devices that have been removed from the cluster. However, device ids are +reused. Device ids are reused to avoid potentially running out of device id +slots when there are available slots (from prior removal of devices). A +consequence of this device id reuse is that the device id (integer value) does +not necessarily correspond with the chronology of when the device was added to +the ring. Also, some devices may be temporarily disabled by setting their +weight to 0.0. To obtain a list of active devices (for uptime polling, for +example) the Python code would look like: ``devices = list(self._iter_devs())`` ************************* Partition Assignment List From e1f12960884f9ea52416891a8246a53f9673348b Mon Sep 17 00:00:00 2001 From: Bill Huber Date: Tue, 5 Jan 2016 14:40:50 -0600 Subject: [PATCH 044/141] Re-format the SLO manifest file on new multipart-manifest GET call Currently, the multipart-manifest=get call returns output in json format that is inconsistent with the format that is used for the multipart-manifest=put. This in turn introduces a new call: ?multipart-manifest=get&format=raw Change-Id: I2242943a738f667cbda6363bcb6a017f341e834f Closes-Bug: 1252482 --- swift/common/middleware/slo.py | 55 +++++++++++++- test/functional/tests.py | 33 +++++++++ test/unit/common/middleware/test_slo.py | 99 ++++++++++++++++++++++++- 3 files changed, 182 insertions(+), 5 deletions(-) diff --git a/swift/common/middleware/slo.py b/swift/common/middleware/slo.py index a5ab1085b2..a3291dd7fb 100644 --- a/swift/common/middleware/slo.py +++ b/swift/common/middleware/slo.py @@ -149,9 +149,15 @@ A GET request with the query parameter:: ?multipart-manifest=get -Will return the actual manifest file itself. This is generated json and does -not match the data sent from the original multipart-manifest=put. This call's -main purpose is for debugging. +will return a transformed version of the original manifest, containing +additional fields and different key names. + +A GET request with the query parameters:: + + ?multipart-manifest=get&format=raw + +will return the contents of the original manifest as it was sent by the client. +The main purpose for both calls is solely debugging. When the manifest object is uploaded you are more or less guaranteed that every segment in the manifest exists and matched the specifications. @@ -573,6 +579,9 @@ class SloGetContext(WSGIContext): # Handle pass-through request for the manifest itself if req.params.get('multipart-manifest') == 'get': + if req.params.get('format') == 'raw': + resp_iter = self.convert_segment_listing( + self._response_headers, resp_iter) new_headers = [] for header, value in self._response_headers: if header.lower() == 'content-type': @@ -606,7 +615,40 @@ class SloGetContext(WSGIContext): req, resp_headers, resp_iter) return response(req.environ, start_response) - def get_or_head_response(self, req, resp_headers, resp_iter): + def convert_segment_listing(self, resp_headers, resp_iter): + """ + Converts the manifest data to match with the format + that was put in through ?multipart-manifest=put + + :param resp_headers: response headers + :param resp_iter: a response iterable + """ + segments = self._get_manifest_read(resp_iter) + + for seg_dict in segments: + seg_dict.pop('content_type', None) + seg_dict.pop('last_modified', None) + seg_dict.pop('sub_slo', None) + seg_dict['path'] = seg_dict.pop('name', None) + seg_dict['size_bytes'] = seg_dict.pop('bytes', None) + seg_dict['etag'] = seg_dict.pop('hash', None) + + json_data = json.dumps(segments) # convert to string + if six.PY3: + json_data = json_data.encode('utf-8') + + new_headers = [] + for header, value in resp_headers: + if header.lower() == 'content-length': + new_headers.append(('Content-Length', + len(json_data))) + else: + new_headers.append((header, value)) + self._response_headers = new_headers + + return [json_data] + + def _get_manifest_read(self, resp_iter): with closing_if_possible(resp_iter): resp_body = ''.join(resp_iter) try: @@ -614,6 +656,11 @@ class SloGetContext(WSGIContext): except ValueError: segments = [] + return segments + + def get_or_head_response(self, req, resp_headers, resp_iter): + segments = self._get_manifest_read(resp_iter) + etag = md5() content_length = 0 for seg_dict in segments: diff --git a/test/functional/tests.py b/test/functional/tests.py index 4dbfb67c5a..d51156e7c2 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -3242,6 +3242,39 @@ class TestSlo(Base): self.assertEqual(value[1]['name'], '/%s/seg_b' % self.env.container.name.decode("utf-8")) + def test_slo_get_raw_the_manifest_with_details_from_server(self): + manifest = self.env.container.file("manifest-db") + got_body = manifest.read(parms={'multipart-manifest': 'get', + 'format': 'raw'}) + + self.assertEqual('application/json; charset=utf-8', + manifest.content_type) + try: + value = json.loads(got_body) + except ValueError: + msg = "GET with multipart-manifest=get&format=raw got invalid json" + self.fail(msg) + + self.assertEqual( + set(value[0].keys()), set(('size_bytes', 'etag', 'path'))) + self.assertEqual(len(value), 2) + self.assertEqual(value[0]['size_bytes'], 1024 * 1024) + self.assertEqual(value[0]['etag'], + hashlib.md5('d' * 1024 * 1024).hexdigest()) + self.assertEqual(value[0]['path'], + '/%s/seg_d' % self.env.container.name.decode("utf-8")) + self.assertEqual(value[1]['size_bytes'], 1024 * 1024) + self.assertEqual(value[1]['etag'], + hashlib.md5('b' * 1024 * 1024).hexdigest()) + self.assertEqual(value[1]['path'], + '/%s/seg_b' % self.env.container.name.decode("utf-8")) + + file_item = self.env.container.file("manifest-from-get-raw") + file_item.write(got_body, parms={'multipart-manifest': 'put'}) + + file_contents = file_item.read() + self.assertEqual(2 * 1024 * 1024, len(file_contents)) + def test_slo_head_the_manifest(self): manifest = self.env.container.file("manifest-abcde") got_info = manifest.info(parms={'multipart-manifest': 'get'}) diff --git a/test/unit/common/middleware/test_slo.py b/test/unit/common/middleware/test_slo.py index 34024f1e47..830892a26c 100644 --- a/test/unit/common/middleware/test_slo.py +++ b/test/unit/common/middleware/test_slo.py @@ -1072,6 +1072,103 @@ class TestSloHeadManifest(SloTestCase): self.assertEqual(status, '304 Not Modified') +class TestSloGetRawManifest(SloTestCase): + + def setUp(self): + super(TestSloGetRawManifest, self).setUp() + + _bc_manifest_json = json.dumps( + [{'name': '/gettest/b_10', 'hash': md5hex('b' * 10), 'bytes': '10', + 'content_type': 'text/plain', + 'last_modified': '1970-01-01T00:00:00.000000'}, + {'name': '/gettest/c_15', 'hash': md5hex('c' * 15), 'bytes': '15', + 'content_type': 'text/plain', + 'last_modified': '1970-01-01T00:00:00.000000'}, + {'name': '/gettest/d_10', + 'hash': md5hex(md5hex("e" * 5) + md5hex("f" * 5)), 'bytes': '10', + 'content_type': 'application/json;swift_bytes=10', + 'sub_slo': True, + 'last_modified': '1970-01-01T00:00:00.000000'}]) + self.bc_etag = md5hex(_bc_manifest_json) + self.app.register( + 'GET', '/v1/AUTH_test/gettest/manifest-bc', + swob.HTTPOk, {'Content-Type': 'application/json;swift_bytes=35', + 'X-Static-Large-Object': 'true', + 'X-Object-Meta-Plant': 'Ficus', + 'Etag': md5hex(_bc_manifest_json)}, + _bc_manifest_json) + + _bc_manifest_json_ranges = json.dumps( + [{'name': '/gettest/b_10', 'hash': md5hex('b' * 10), 'bytes': '10', + 'last_modified': '1970-01-01T00:00:00.000000', + 'content_type': 'text/plain', 'range': '1-99'}, + {'name': '/gettest/c_15', 'hash': md5hex('c' * 15), 'bytes': '15', + 'last_modified': '1970-01-01T00:00:00.000000', + 'content_type': 'text/plain', 'range': '100-200'}]) + self.app.register( + 'GET', '/v1/AUTH_test/gettest/manifest-bc-r', + swob.HTTPOk, {'Content-Type': 'application/json;swift_bytes=25', + 'X-Static-Large-Object': 'true', + 'X-Object-Meta-Plant': 'Ficus', + 'Etag': md5hex(_bc_manifest_json_ranges)}, + _bc_manifest_json_ranges) + + def test_get_raw_manifest(self): + req = Request.blank( + '/v1/AUTH_test/gettest/manifest-bc' + '?multipart-manifest=get&format=raw', + environ={'REQUEST_METHOD': 'GET', + 'HTTP_ACCEPT': 'application/json'}) + status, headers, body = self.call_slo(req) + + self.assertEqual(status, '200 OK') + self.assertTrue(('Etag', self.bc_etag) in headers, headers) + self.assertTrue(('X-Static-Large-Object', 'true') in headers, headers) + self.assertTrue( + ('Content-Type', 'application/json; charset=utf-8') in headers, + headers) + + try: + resp_data = json.loads(body) + except ValueError: + self.fail("Invalid JSON in manifest GET: %r" % body) + + self.assertEqual( + resp_data, + [{'etag': md5hex('b' * 10), 'size_bytes': '10', + 'path': '/gettest/b_10'}, + {'etag': md5hex('c' * 15), 'size_bytes': '15', + 'path': '/gettest/c_15'}, + {'etag': md5hex(md5hex("e" * 5) + md5hex("f" * 5)), + 'size_bytes': '10', + 'path': '/gettest/d_10'}]) + + def test_get_raw_manifest_passthrough_with_ranges(self): + req = Request.blank( + '/v1/AUTH_test/gettest/manifest-bc-r' + '?multipart-manifest=get&format=raw', + environ={'REQUEST_METHOD': 'GET', + 'HTTP_ACCEPT': 'application/json'}) + status, headers, body = self.call_slo(req) + + self.assertEqual(status, '200 OK') + self.assertTrue( + ('Content-Type', 'application/json; charset=utf-8') in headers, + headers) + try: + resp_data = json.loads(body) + except ValueError: + self.fail("Invalid JSON in manifest GET: %r" % body) + + self.assertEqual( + resp_data, + [{'etag': md5hex('b' * 10), 'size_bytes': '10', + 'path': '/gettest/b_10', 'range': '1-99'}, + {'etag': md5hex('c' * 15), 'size_bytes': '15', + 'path': '/gettest/c_15', 'range': '100-200'}], + body) + + class TestSloGetManifest(SloTestCase): def setUp(self): super(TestSloGetManifest, self).setUp() @@ -1777,7 +1874,7 @@ class TestSloGetManifest(SloTestCase): self.assertEqual( body, 'aaaaabbbbbbbbbbcccccccccccccccdddddddddddddddddddd') - def test_get_segment_with_non_ascii_name(self): + def test_get_segment_with_non_ascii_path(self): segment_body = u"a møøse once bit my sister".encode("utf-8") self.app.register( 'GET', u'/v1/AUTH_test/ünicode/öbject-segment'.encode('utf-8'), From f99b785f47f458baa6a5286e6f6e942a6e2745f3 Mon Sep 17 00:00:00 2001 From: Andreas Jaeger Date: Fri, 18 Mar 2016 21:30:58 +0100 Subject: [PATCH 045/141] Avoid translation problem Today's import failed again for zh-TW since the translator of a string changed the order of the entries and then msgfmt complained with "format specifications in 'msgid' and 'msgstr' for argument 1 are not the same" Use named arguments to allow translators to switch the order of the entries. Logfile: https://jenkins.openstack.org/job/swift-propose-translation-update/179/console Change-Id: Ide7480a8293a8439ce89d0ad22be74f1f0ad930c --- swift/common/manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/swift/common/manager.py b/swift/common/manager.py index e67f8a32f7..54f84c5e1b 100644 --- a/swift/common/manager.py +++ b/swift/common/manager.py @@ -561,8 +561,8 @@ class Server(object): safe_kill(pid, sig, 'swift-%s' % self.server) except InvalidPidFileException as e: if kwargs.get('verbose'): - print(_('Removing pid file %s with wrong pid %d') % ( - pid_file, pid)) + print(_('Removing pid file %(pid_file)s with wrong pid ' + '%(pid)d'), {'pid_file': pid_file, 'pid': pid}) remove_file(pid_file) except OSError as e: if e.errno == errno.ESRCH: From 0ad1b0cca1f2538ac8fe3ef27d9e06c102157000 Mon Sep 17 00:00:00 2001 From: oshritf Date: Thu, 17 Mar 2016 16:53:04 +0200 Subject: [PATCH 046/141] Container sync nodes shuffle cleanup Since commit "Update container sync to use internal client" get_object is done using internal_client and not directly on nodes which makes the block of code to shuffle the nodes redundant. Change-Id: I45a6dab05f6f87510cf73102b1ed191238209efe --- swift/container/sync.py | 7 +------ test/unit/container/test_sync.py | 3 --- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/swift/container/sync.py b/swift/container/sync.py index 7bb37f9225..1c2260e0f5 100644 --- a/swift/container/sync.py +++ b/swift/container/sync.py @@ -18,7 +18,7 @@ import os import uuid from swift import gettext_ as _ from time import ctime, time -from random import choice, random, shuffle +from random import choice, random from struct import unpack_from from eventlet import sleep, Timeout @@ -463,11 +463,6 @@ class ContainerSync(Daemon): else: # when sync'ing a live object, use ts_meta - this is the time # at which the source object was last modified by a PUT or POST - part, nodes = \ - self.get_object_ring(info['storage_policy_index']). \ - get_nodes(info['account'], info['container'], - row['name']) - shuffle(nodes) exc = None # look up for the newest one headers_out = {'X-Newest': True, diff --git a/test/unit/container/test_sync.py b/test/unit/container/test_sync.py index 42833ed161..9cb56d2d05 100644 --- a/test/unit/container/test_sync.py +++ b/test/unit/container/test_sync.py @@ -854,7 +854,6 @@ class TestContainerSync(unittest.TestCase): def _test_container_sync_row_put(self, realm, realm_key): orig_uuid = sync.uuid - orig_shuffle = sync.shuffle orig_put_object = sync.put_object try: class FakeUUID(object): @@ -862,7 +861,6 @@ class TestContainerSync(unittest.TestCase): hex = 'abcdef' sync.uuid = FakeUUID - sync.shuffle = lambda x: x ts_data = Timestamp(1.1) timestamp = Timestamp(1.2) @@ -1064,7 +1062,6 @@ class TestContainerSync(unittest.TestCase): self.assertLogMessage('error', 'ERROR Syncing') finally: sync.uuid = orig_uuid - sync.shuffle = orig_shuffle sync.put_object = orig_put_object def test_select_http_proxy_None(self): From b685e8513525569ec1e0e4c34377866b6f5c1f7e Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Mon, 21 Mar 2016 06:16:52 +0000 Subject: [PATCH 047/141] Imported Translations from Zanata For more information about this automatic import see: https://wiki.openstack.org/wiki/Translations/Infrastructure Change-Id: I3cae432fdcf311bc3654956a1f61ce22ecae0933 --- swift/locale/de/LC_MESSAGES/swift.po | 24 ++-- swift/locale/fr/LC_MESSAGES/swift.po | 135 ++++++++++++++++-- swift/locale/it/LC_MESSAGES/swift.po | 170 ++++++++++++++++++++++- swift/locale/ru/LC_MESSAGES/swift.po | 8 +- swift/locale/swift.pot | 176 ++++++++++++------------ swift/locale/tr_TR/LC_MESSAGES/swift.po | 8 +- swift/locale/zh_TW/LC_MESSAGES/swift.po | 158 ++++++++++++++++++++- 7 files changed, 548 insertions(+), 131 deletions(-) diff --git a/swift/locale/de/LC_MESSAGES/swift.po b/swift/locale/de/LC_MESSAGES/swift.po index 1af46d4c9d..78184becd9 100644 --- a/swift/locale/de/LC_MESSAGES/swift.po +++ b/swift/locale/de/LC_MESSAGES/swift.po @@ -6,18 +6,16 @@ # Andreas Jaeger , 2014 # Ettore Atalan , 2014-2015 # Jonas John , 2015 -# OpenStack Infra , 2015. #zanata -# Tom Cocozzello , 2015. #zanata -# Monika Wolf , 2016. #zanata +# Frank Kloeker , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev218\n" +"Project-Id-Version: swift 2.6.1.dev235\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-16 11:38+0000\n" +"POT-Creation-Date: 2016-03-18 23:11+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-16 12:06+0000\n" +"PO-Revision-Date: 2016-03-20 07:32+0000\n" "Last-Translator: Monika Wolf \n" "Language: de\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" @@ -498,7 +496,7 @@ msgstr "" "FEHLER beim Synchronisieren von %(file)s Dateien mit dem Knoten %(node)s" msgid "ERROR trying to replicate" -msgstr "FEHLER beim Versuch, zu replizieren" +msgstr "FEHLER beim Versuch zu replizieren" #, python-format msgid "ERROR while trying to clean up %s" @@ -559,10 +557,10 @@ msgid "Error on render profiling results: %s" msgstr "Fehler beim Wiedergeben der Profilerstellungsergebnisse: %s" msgid "Error parsing recon cache file" -msgstr "Fehler beim Analysieren von recon-Cachedatei" +msgstr "Fehler beim Analysieren von recon-Zwischenspeicherdatei" msgid "Error reading recon cache file" -msgstr "Fehler beim Lesen von recon-Cachedatei" +msgstr "Fehler beim Lesen von recon-Zwischenspeicherdatei" msgid "Error reading ringfile" msgstr "Fehler beim Lesen der Ringdatei" @@ -913,12 +911,12 @@ msgid "Removing partition: %s" msgstr "Partition wird entfernt: %s" #, python-format -msgid "Removing pid file %s with invalid pid" -msgstr "PID-Datei %s mit ungültiger PID wird entfernt." +msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" +msgstr "PID-Datei %(pid_file)s mit falscher PID %(pid)d wird entfernt" #, python-format -msgid "Removing pid file %s with wrong pid %d" -msgstr "PID-Datei %s mit falscher PID %d wird entfernt." +msgid "Removing pid file %s with invalid pid" +msgstr "PID-Datei %s mit ungültiger PID wird entfernt." #, python-format msgid "Removing stale pid file %s" diff --git a/swift/locale/fr/LC_MESSAGES/swift.po b/swift/locale/fr/LC_MESSAGES/swift.po index 9d14d21ae1..744f584c54 100644 --- a/swift/locale/fr/LC_MESSAGES/swift.po +++ b/swift/locale/fr/LC_MESSAGES/swift.po @@ -6,17 +6,18 @@ # Maxime COQUEREL , 2014 # OpenStack Infra , 2015. #zanata # Tom Cocozzello , 2015. #zanata +# Angelique Pillal , 2016. #zanata # Gael Rehault , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev218\n" +"Project-Id-Version: swift 2.6.1.dev235\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-16 11:38+0000\n" +"POT-Creation-Date: 2016-03-18 23:11+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-16 11:53+0000\n" -"Last-Translator: Gael Rehault \n" +"PO-Revision-Date: 2016-03-18 03:55+0000\n" +"Last-Translator: Angelique Pillal \n" "Language: fr\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" @@ -53,6 +54,16 @@ msgstr "%(ip)s/%(device)s démonté (d'après la réponse)" msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" +#, python-format +msgid "" +"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of %(device)d/" +"%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in %(time).2fs " +"(%(rate).2f/sec, %(remaining)s remaining)" +msgstr "" +"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions sur %(device)d/" +"%(dtotal)d (%(dpercentage).2f%%) périphériques reconstruites en %(time).2fs " +"(%(rate).2f/sec, %(remaining)s remaining)" + #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " @@ -89,6 +100,10 @@ msgstr "%s n'existe pas" msgid "%s is not mounted" msgstr "%s n'est pas monté" +#, python-format +msgid "%s responded as unmounted" +msgstr "%s ont été identifié(es) comme étant démonté(es)" + #, python-format msgid "%s running (%s - %s)" msgstr "%s en cours d'exécution (%s - %s)" @@ -225,6 +240,9 @@ msgstr "Client déconnecté lors de la lecture" msgid "Client disconnected without sending enough data" msgstr "Client déconnecté avant l'envoi de toutes les données requises" +msgid "Client disconnected without sending last chunk" +msgstr "Le client a été déconnecté avant l'envoi du dernier bloc" + #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" @@ -232,11 +250,19 @@ msgstr "" "Le chemin d'accès au client %(client)s ne correspond pas au chemin stocké " "dans les métadonnées d'objet %(meta)s" +msgid "" +"Configuration option internal_client_conf_path not defined. Using default " +"configuration, See internal-client.conf-sample for options" +msgstr "" +"L'option de configuration internal_client_conf_path n'a pas été définie. La " +"configuration par défaut est utilisée. Consultez les options dans internal-" +"client.conf-sample." + msgid "Connection refused" -msgstr "Connexion refusé" +msgstr "Connexion refusée" msgid "Connection timeout" -msgstr "Connexion timeout" +msgstr "Dépassement du délai d'attente de connexion" msgid "Container" msgstr "Containeur" @@ -281,7 +307,7 @@ msgstr "Liaison impossible à %s:%s après une tentative de %s secondes" #, python-format msgid "Could not load %r: %s" -msgstr "Ne peut pas etre charger %r: %s" +msgstr "Impossible de charger %r: %s" #, python-format msgid "Data download error: %s" @@ -291,6 +317,10 @@ msgstr "Erreur de téléchargement des données: %s" msgid "Devices pass completed: %.02fs" msgstr "Session d'audit d'unité terminée : %.02fs" +#, python-format +msgid "Directory %r does not map to a valid policy (%s)" +msgstr "Le répertoire %r n'est pas mappé à une stratégie valide (%s)" + #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "ERREUR %(db_file)s : %(validate_sync_to_err)s" @@ -373,6 +403,11 @@ msgstr "" msgid "ERROR Exception causing client disconnect" msgstr "ERREUR Exception entraînant la déconnexion du client" +#, python-format +msgid "ERROR Exception transferring data to object servers %s" +msgstr "" +"ERREUR Exception lors du transfert de données vers des serveurs d'objets %s" + msgid "ERROR Failed to get my own IPs?" msgstr "ERREUR Obtention impossible de mes propres adresses IP ?" @@ -551,6 +586,12 @@ msgstr "Erreur de synchronisation de la partition" msgid "Error syncing with node: %s" msgstr "Erreur de synchronisation avec le noeud : %s" +#, python-format +msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" +msgstr "" +"Une erreur est survenue lors de la tentative de régénération de %(path)s " +"policy#%(policy)d frag#%(frag_index)s" + msgid "Error: An error occurred" msgstr "Erreur : une erreur s'est produite" @@ -570,6 +611,9 @@ msgstr "Exception dans la boucle de collecteur de compte de niveau supérieur" msgid "Exception in top-level replication loop" msgstr "Exception dans la boucle de réplication de niveau supérieur" +msgid "Exception in top-levelreconstruction loop" +msgstr "Exception dans la boucle de reconstruction de niveau supérieur" + #, python-format msgid "Exception while deleting container %s %s" msgstr "Exception lors de la suppression du conteneur %s %s" @@ -607,7 +651,14 @@ msgstr "" "Suivi de la chaîne CNAME pour %(given_domain)s jusqu'à %(found_domain)s" msgid "Found configs:" -msgstr "Configurations trouvés:" +msgstr "Configurations trouvées :" + +msgid "" +"Handoffs first mode still has handoffs remaining. Aborting current " +"replication pass." +msgstr "" +"Le premier mode de transferts contient d'autres transferts. Abandon de la " +"session de réplication en cours." msgid "Host unreachable" msgstr "Hôte inaccessible" @@ -667,6 +718,10 @@ msgstr "Aucun noeud final de cluster pour %r %r" msgid "No permission to signal PID %d" msgstr "Aucun droit pour signaler le PID %d" +#, python-format +msgid "No policy with index %s" +msgstr "Aucune statégie avec un index de type %s" + #, python-format msgid "No realm key for %r" msgstr "Aucune clé de domaine pour %r" @@ -681,6 +736,11 @@ msgstr "" "Noeud marqué avec limite d'erreurs (error_limited) %(ip)s:%(port)s " "(%(device)s)" +#, python-format +msgid "Not enough object servers ack'ed (got %d)" +msgstr "" +"Le nombre de serveurs d'objets reconnus n'est pas suffisant (%d obtenus)" + #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " @@ -727,10 +787,28 @@ msgstr "" "total d'octets/sec : %(brate).2f. Durée d'audit : %(audit).2f. Taux : " "%(audit_rate).2f" +#, python-format +msgid "" +"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " +"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " +"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " +"%(audit_rate).2f" +msgstr "" +"Audit d'objet (%(type)s). Depuis %(start_time)s, localement : %(passes)d " +"succès. %(quars)d en quarantaine. %(errors)d erreurs. Fichiers/sec : " +"%(frate).2f. Octets/sec : %(brate).2f. Durée totale : %(total).2f. Durée " +"d'audit : %(audit).2f. Taux : %(audit_rate).2f" + #, python-format msgid "Object audit stats: %s" msgstr "Statistiques de l'audit d'objet : %s" +#, python-format +msgid "Object reconstruction complete (once). (%.02f minutes)" +msgstr "" +"La reconstruction d'objet en mode Once (une fois) est terminée. (%.02f " +"minutes)" + #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "Reconstruction d'objet terminée. (%.02f minutes)" @@ -800,6 +878,11 @@ msgstr "Problème lors du nettoyage de %s" msgid "Problem cleaning up %s (%s)" msgstr "Problème lors du nettoyage de %s (%s)" +#, python-format +msgid "Problem writing durable state file %s (%s)" +msgstr "" +"Un problème est survenu lors de l'écriture du fichier d'état durable %s (%s)" + #, python-format msgid "Profiling Error: %s" msgstr "Erreur de profilage : %s" @@ -845,7 +928,7 @@ msgstr "Suppression partition: %s" #, python-format msgid "Removing pid file %s with invalid pid" -msgstr "Suppression du fichier pid %s avec in pid invalide" +msgstr "Suppression du fichier pid %s comportant un pid non valide" #, python-format msgid "Removing stale pid file %s" @@ -879,6 +962,9 @@ msgstr "" msgid "Running %s once" msgstr "Exécution unique de %s" +msgid "Running object reconstructor in script mode." +msgstr "Exécution du reconstructeur d'objet en mode script." + msgid "Running object replicator in script mode." msgstr "Exécution du réplicateur d'objet en mode script." @@ -920,6 +1006,12 @@ msgstr "%s est ignoré car il n'est pas monté" msgid "Starting %s" msgstr "Démarrage %s" +msgid "Starting object reconstruction pass." +msgstr "Démarrage de la session de reconstruction d'objet." + +msgid "Starting object reconstructor in daemon mode." +msgstr "Démarrage du reconstructeur d'objet en mode démon." + msgid "Starting object replication pass." msgstr "Démarrage de la session de réplication d'objet." @@ -945,6 +1037,12 @@ msgstr "" msgid "Timeout %(action)s to memcached: %(server)s" msgstr "Délai d'attente de %(action)s dans memcached : %(server)s" +#, python-format +msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" +msgstr "" +"Exception liée à un dépassement de délai concernant %(ip)s:%(port)s/" +"%(device)s" + #, python-format msgid "Trying to %(method)s %(path)s" msgstr "Tentative d'exécution de %(method)s %(path)s" @@ -970,6 +1068,10 @@ msgstr "Tentative de lecture pendant une opération GET (nouvelle tentative)" msgid "Trying to send to client" msgstr "Tentative d'envoi au client" +#, python-format +msgid "Trying to sync suffixes with %s" +msgstr "Tentative de synchronisation de suffixes à l'aide de %s" + #, python-format msgid "Trying to write to %s" msgstr "Tentative d'écriture sur %s" @@ -979,7 +1081,12 @@ msgstr "EXCEPTION NON INTERCEPTEE" #, python-format msgid "Unable to find %s config section in %s" -msgstr "Impossuble de trouvé la section configuration %s dans %s" +msgstr "Impossible de trouver la section de configuration %s dans %s" + +#, python-format +msgid "Unable to load internal client from config: %r (%s)" +msgstr "" +"Impossible de charger le client interne depuis la configuration : %r (%s)" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." @@ -990,6 +1097,10 @@ msgstr "" msgid "Unable to locate config for %s" msgstr "Impossible de trouver la configuration pour %s" +#, python-format +msgid "Unable to locate config number %s for %s" +msgstr "Impossible de trouver la configuration portant le numéro %s pour %s" + msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" @@ -1055,6 +1166,10 @@ msgstr "" msgid "Waited %s seconds for %s to die; giving up" msgstr "Attente de %s secondes pour la fin de %s ; abandon" +#, python-format +msgid "Waited %s seconds for %s to die; killing" +msgstr "Attente de %s secondes pour la fin de %s . En cours d'arrêt" + msgid "Warning: Cannot ratelimit without a memcached client" msgstr "Avertissement : impossible d'appliquer Ratelimit sans client memcached" diff --git a/swift/locale/it/LC_MESSAGES/swift.po b/swift/locale/it/LC_MESSAGES/swift.po index 9bc7462e0e..1cccad5c97 100644 --- a/swift/locale/it/LC_MESSAGES/swift.po +++ b/swift/locale/it/LC_MESSAGES/swift.po @@ -5,16 +5,17 @@ # Translators: # OpenStack Infra , 2015. #zanata # Tom Cocozzello , 2015. #zanata +# Alessandra , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev176\n" +"Project-Id-Version: swift 2.6.1.dev235\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-08 04:09+0000\n" +"POT-Creation-Date: 2016-03-18 23:11+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2015-08-11 11:22+0000\n" -"Last-Translator: openstackjenkins \n" +"PO-Revision-Date: 2016-03-19 05:14+0000\n" +"Last-Translator: Alessandra \n" "Language: it\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" @@ -51,6 +52,16 @@ msgstr "%(ip)s/%(device)s ha risposto come smontato" msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" +#, python-format +msgid "" +"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of %(device)d/" +"%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in %(time).2fs " +"(%(rate).2f/sec, %(remaining)s remaining)" +msgstr "" +"%(reconstructed)d/%(total)d (%(percentage).2f%%) partizioni di %(device)d/" +"%(dtotal)d (%(dpercentage).2f%%) dispositivi ricostruiti in %(time).2fs " +"(%(rate).2f/sec, %(remaining)s rimanenti)" + #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " @@ -87,6 +98,10 @@ msgstr "%s non esiste" msgid "%s is not mounted" msgstr "%s non è montato" +#, python-format +msgid "%s responded as unmounted" +msgstr "%s ha risposto come smontato" + #, python-format msgid "%s running (%s - %s)" msgstr "%s in esecuzione (%s - %s)" @@ -221,6 +236,9 @@ msgstr "Client scollegato alla lettura" msgid "Client disconnected without sending enough data" msgstr "Client disconnesso senza inviare dati sufficienti" +msgid "Client disconnected without sending last chunk" +msgstr "Client disconnesso senza inviare l'ultima porzione" + #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" @@ -228,6 +246,14 @@ msgstr "" "Il percorso del client %(client)s non corrisponde al percorso memorizzato " "nei metadati dell'oggetto %(meta)s" +msgid "" +"Configuration option internal_client_conf_path not defined. Using default " +"configuration, See internal-client.conf-sample for options" +msgstr "" +"Opzione di configurazione internal_client_conf_path non definita. Viene " +"utilizzata la configurazione predefinita, vedere l'esempio internal-client." +"conf-sample per le opzioni" + msgid "Connection refused" msgstr "Connessione rifiutata" @@ -289,6 +315,10 @@ msgstr "Errore di download dei dati: %s" msgid "Devices pass completed: %.02fs" msgstr "Trasmissione dei dispositivi completata: %.02fs" +#, python-format +msgid "Directory %r does not map to a valid policy (%s)" +msgstr "La directory %r non è associata ad una politica valida (%s)" + #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "ERRORE %(db_file)s: %(validate_sync_to_err)s" @@ -367,6 +397,11 @@ msgstr "ERRORE Errore di chiusura DiskFile %(data_file)s: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "ERRORE Eccezione che causa la disconnessione del client" +#, python-format +msgid "ERROR Exception transferring data to object servers %s" +msgstr "" +"ERRORE Eccezione durante il trasferimento di dati nel server degli oggetti %s" + msgid "ERROR Failed to get my own IPs?" msgstr "ERRORE Impossibile ottenere i propri IP?" @@ -545,6 +580,12 @@ msgstr "Errore durante la sincronizzazione della partizione" msgid "Error syncing with node: %s" msgstr "Errore durante la sincronizzazione con il nodo: %s" +#, python-format +msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" +msgstr "" +"Errore nel tentativo di ricreare %(path)s policy#%(policy)d frag#" +"%(frag_index)s" + msgid "Error: An error occurred" msgstr "Errore: si è verificato un errore" @@ -564,6 +605,9 @@ msgstr "Eccezione nel loop reaper dell'account di livello superiore" msgid "Exception in top-level replication loop" msgstr "Eccezione nel loop di replica di livello superiore" +msgid "Exception in top-levelreconstruction loop" +msgstr "Eccezione nel loop di ricostruzione di livello superiore" + #, python-format msgid "Exception while deleting container %s %s" msgstr "Eccezione durante l'eliminazione del contenitore %s %s" @@ -603,6 +647,13 @@ msgstr "" msgid "Found configs:" msgstr "Configurazioni trovate:" +msgid "" +"Handoffs first mode still has handoffs remaining. Aborting current " +"replication pass." +msgstr "" +"Nella prima modalità di passaggio ci sono ancora passaggi restanti. " +"Interruzione del passaggio di replica corrente." + msgid "Host unreachable" msgstr "Host non raggiungibile" @@ -622,6 +673,10 @@ msgstr "Host non valido %r in X-Container-Sync-To" msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "Voce in sospeso non valida %(file)s: %(entry)s" +#, python-format +msgid "Invalid response %(resp)s from %(full_path)s" +msgstr "Risposta non valida %(resp)s da %(full_path)s" + #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "Risposta non valida %(resp)s da %(ip)s" @@ -657,20 +712,36 @@ msgstr "Nessun endpoint del cluster per %r %r" msgid "No permission to signal PID %d" msgstr "Nessuna autorizzazione per la segnalazione del PID %d" +#, python-format +msgid "No policy with index %s" +msgstr "Nessuna politica con indice %s" + #, python-format msgid "No realm key for %r" msgstr "Nessuna chiave dell'area di autenticazione per %r" +#, python-format +msgid "No space left on device for %s (%s)" +msgstr "Nessuno spazio rimasto sul dispositivo per %s (%s)" + #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Errore del nodo limitato %(ip)s:%(port)s (%(device)s)" +#, python-format +msgid "Not enough object servers ack'ed (got %d)" +msgstr "Server degli oggetti riconosciuti non sufficienti (got %d)" + #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "%(sync_from)r => %(sync_to)r non trovato - oggetto %(obj_name)r" +#, python-format +msgid "Nothing reconstructed for %s seconds." +msgstr "Nessun elemento ricostruito per %s secondi." + #, python-format msgid "Nothing replicated for %s seconds." msgstr "Nessun elemento replicato per %s secondi." @@ -703,10 +774,30 @@ msgstr "" "Totale file/sec: %(frate).2f, Totale byte/sec: %(brate).2f, Tempo verifica: " "%(audit).2f, Velocità: %(audit_rate).2f" +#, python-format +msgid "" +"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " +"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " +"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " +"%(audit_rate).2f" +msgstr "" +"Verifica oggetto (%(type)s). A partire da %(start_time)s: In locale: " +"%(passes)d passati, %(quars)d in quarantena, %(errors)d errori file/sec: " +"%(frate).2f , byte/sec: %(brate).2f, Tempo totale: %(total).2f, Tempo " +"verifica: %(audit).2f, Velocità: %(audit_rate).2f" + #, python-format msgid "Object audit stats: %s" msgstr "Statistiche verifica oggetto: %s" +#, python-format +msgid "Object reconstruction complete (once). (%.02f minutes)" +msgstr "Ricostruzione dell'oggetto completata (una volta). (%.02f minuti)" + +#, python-format +msgid "Object reconstruction complete. (%.02f minutes)" +msgstr "Ricostruzione dell'oggetto completata. (%.02f minuti)" + #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "Replica dell'oggetto completata (una volta). (%.02f minuti)" @@ -766,6 +857,14 @@ msgstr "Percorso richiesto in X-Container-Sync-To" msgid "Problem cleaning up %s" msgstr "Problema durante la ripulitura di %s" +#, python-format +msgid "Problem cleaning up %s (%s)" +msgstr "Problema durante la ripulitura di %s (%s)" + +#, python-format +msgid "Problem writing durable state file %s (%s)" +msgstr "Problema durante la scrittura del file obsoleto duraturo %s (%s)" + #, python-format msgid "Profiling Error: %s" msgstr "Errore di creazione dei profili: %s" @@ -809,6 +908,14 @@ msgstr "Rimozione di oggetti %s" msgid "Removing partition: %s" msgstr "Rimozione della partizione: %s" +#, python-format +msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" +msgstr "Rimozione del file pid %(pid_file)s con pid non valido %(pid)d" + +#, python-format +msgid "Removing pid file %s with invalid pid" +msgstr "Rimozione del file pid %s con pid non valido" + #, python-format msgid "Removing stale pid file %s" msgstr "Rimozione del file pid %s obsoleto in corso" @@ -828,6 +935,11 @@ msgstr "" "Viene restituito 498 per %(meth)s a %(acc)s/%(cont)s/%(obj)s . Ratelimit " "(numero massimo sospensioni) %(e)s" +msgid "Ring change detected. Aborting current reconstruction pass." +msgstr "" +"Modifica ring rilevata. Interruzione della trasmissione della ricostruzione " +"corrente." + msgid "Ring change detected. Aborting current replication pass." msgstr "" "Modifica ring rilevata. Interruzione della trasmissione della replica " @@ -837,6 +949,10 @@ msgstr "" msgid "Running %s once" msgstr "Esecuzione di %s una volta" +msgid "Running object reconstructor in script mode." +msgstr "" +"Esecuzione del programma di ricostruzione dell'oggetto in modalità script." + msgid "Running object replicator in script mode." msgstr "Esecuzione del programma di replica dell'oggetto in modalità script." @@ -880,6 +996,12 @@ msgstr "%s viene ignorato perché non è montato" msgid "Starting %s" msgstr "Avvio di %s" +msgid "Starting object reconstruction pass." +msgstr "Avvio della trasmissione della ricostruzione dell'oggetto." + +msgid "Starting object reconstructor in daemon mode." +msgstr "Avvio del programma di ricostruzione dell'oggetto in modalità daemon." + msgid "Starting object replication pass." msgstr "Avvio della trasmissione della replica dell'oggetto." @@ -905,10 +1027,22 @@ msgstr "" msgid "Timeout %(action)s to memcached: %(server)s" msgstr "Timeout di %(action)s su memcached: %(server)s" +#, python-format +msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" +msgstr "Eccezione di timeout con %(ip)s:%(port)s/%(device)s" + #, python-format msgid "Trying to %(method)s %(path)s" msgstr "Tentativo di %(method)s %(path)s" +#, python-format +msgid "Trying to GET %(full_path)s" +msgstr "Tentativo di eseguire GET %(full_path)s" + +#, python-format +msgid "Trying to get %s status of PUT to %s" +msgstr "Tentativo di acquisire lo stato %s di PUT su %s" + #, python-format msgid "Trying to get final status of PUT to %s" msgstr "Tentativo di acquisire lo stato finale di PUT su %s" @@ -922,6 +1056,10 @@ msgstr "Tentativo di lettura durante GET (nuovo tentativo)" msgid "Trying to send to client" msgstr "Tentativo di invio al client" +#, python-format +msgid "Trying to sync suffixes with %s" +msgstr "Tentativo di sincronizzazione dei suffissi con %s" + #, python-format msgid "Trying to write to %s" msgstr "Tentativo di scrittura in %s" @@ -933,10 +1071,22 @@ msgstr "ECCEZIONE NON RILEVATA" msgid "Unable to find %s config section in %s" msgstr "Impossibile trovare la sezione di configurazione %s in %s" +#, python-format +msgid "Unable to load internal client from config: %r (%s)" +msgstr "Impossibile caricare il client interno dalla configurazione: %r (%s)" + #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "Impossibile individuare %s in libc. Lasciato come no-op." +#, python-format +msgid "Unable to locate config for %s" +msgstr "Impossibile individuare la configurazione per %s" + +#, python-format +msgid "Unable to locate config number %s for %s" +msgstr "Impossibile individuare il numero di configurazione %s per %s" + msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" @@ -962,6 +1112,12 @@ msgstr "Risposta imprevista: %s" msgid "Unhandled exception" msgstr "Eccezione non gestita" +#, python-format +msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" +msgstr "" +"Eccezione imprevista nel tentativo di eseguire GET: %(account)r " +"%(container)r %(object)r" + #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "Report di aggiornamento non riuscito per %(container)s %(dbfile)s" @@ -998,6 +1154,12 @@ msgstr "" "Sono trascorsi %s secondi in attesa che %s venga interrotto; operazione " "terminata" +#, python-format +msgid "Waited %s seconds for %s to die; killing" +msgstr "" +"Sono trascorsi %s secondi in attesa che %s venga interrotto; operazione " +"terminata" + msgid "Warning: Cannot ratelimit without a memcached client" msgstr "Avvertenza: impossibile eseguire ratelimit senza un client memcached" diff --git a/swift/locale/ru/LC_MESSAGES/swift.po b/swift/locale/ru/LC_MESSAGES/swift.po index 5e99b3d848..61ae557637 100644 --- a/swift/locale/ru/LC_MESSAGES/swift.po +++ b/swift/locale/ru/LC_MESSAGES/swift.po @@ -9,9 +9,9 @@ # Grigory Mokhin , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev203\n" +"Project-Id-Version: swift 2.6.1.dev235\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-11 02:36+0000\n" +"POT-Creation-Date: 2016-03-18 23:11+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -911,10 +911,6 @@ msgstr "Удаление раздела: %s" msgid "Removing pid file %s with invalid pid" msgstr "Удаление pid файла %s с неверным pid-ом" -#, python-format -msgid "Removing pid file %s with wrong pid %d" -msgstr "Удаление pid файла %s с неверным pid-ом %d" - #, python-format msgid "Removing stale pid file %s" msgstr "Удаление устаревшего файла pid %s" diff --git a/swift/locale/swift.pot b/swift/locale/swift.pot index 679d6c5b62..0b9b9347a2 100644 --- a/swift/locale/swift.pot +++ b/swift/locale/swift.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev176\n" +"Project-Id-Version: swift 2.6.1.dev235\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-08 06:35+0000\n" +"POT-Creation-Date: 2016-03-21 06:16+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -63,8 +63,8 @@ msgstr "" msgid "ERROR Could not get account info %s" msgstr "" -#: swift/account/reaper.py:139 swift/common/utils.py:2341 -#: swift/obj/diskfile.py:296 swift/obj/updater.py:88 swift/obj/updater.py:131 +#: swift/account/reaper.py:139 swift/common/utils.py:2342 +#: swift/obj/diskfile.py:359 swift/obj/updater.py:88 swift/obj/updater.py:131 #, python-format msgid "Skipping %s as it is not mounted" msgstr "" @@ -159,8 +159,8 @@ msgstr "" msgid "Exception with objects for container %(container)s for account %(account)s" msgstr "" -#: swift/account/server.py:276 swift/container/server.py:606 -#: swift/obj/server.py:1035 +#: swift/account/server.py:276 swift/container/server.py:607 +#: swift/obj/server.py:1038 #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "" @@ -181,16 +181,16 @@ msgstr "" msgid "Error in %r with mtime_check_interval: %s" msgstr "" -#: swift/common/db.py:352 +#: swift/common/db.py:353 #, python-format msgid "Quarantined %s to %s due to %s database" msgstr "" -#: swift/common/db.py:407 +#: swift/common/db.py:408 msgid "Broker error trying to rollback locked connection" msgstr "" -#: swift/common/db.py:610 +#: swift/common/db.py:611 #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "" @@ -340,7 +340,7 @@ msgstr "" #: swift/common/manager.py:564 #, python-format -msgid "Removing pid file %s with wrong pid %d" +msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "" #: swift/common/manager.py:571 @@ -408,101 +408,101 @@ msgstr "" msgid "ERROR: An error occurred while retrieving segments" msgstr "" -#: swift/common/utils.py:396 +#: swift/common/utils.py:397 #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "" -#: swift/common/utils.py:590 +#: swift/common/utils.py:591 msgid "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" -#: swift/common/utils.py:674 +#: swift/common/utils.py:675 #, python-format msgid "Unable to perform fsync() on directory %s: %s" msgstr "" -#: swift/common/utils.py:1243 +#: swift/common/utils.py:1244 #, python-format msgid "%s: Connection reset by peer" msgstr "" -#: swift/common/utils.py:1245 swift/common/utils.py:1248 +#: swift/common/utils.py:1246 swift/common/utils.py:1249 #, python-format msgid "%s: %s" msgstr "" -#: swift/common/utils.py:1496 +#: swift/common/utils.py:1497 msgid "Connection refused" msgstr "" -#: swift/common/utils.py:1498 +#: swift/common/utils.py:1499 msgid "Host unreachable" msgstr "" -#: swift/common/utils.py:1500 +#: swift/common/utils.py:1501 msgid "Connection timeout" msgstr "" -#: swift/common/utils.py:1778 +#: swift/common/utils.py:1779 msgid "UNCAUGHT EXCEPTION" msgstr "" -#: swift/common/utils.py:1833 +#: swift/common/utils.py:1834 msgid "Error: missing config path argument" msgstr "" -#: swift/common/utils.py:1838 +#: swift/common/utils.py:1839 #, python-format msgid "Error: unable to locate %s" msgstr "" -#: swift/common/utils.py:2199 +#: swift/common/utils.py:2200 #, python-format msgid "Unable to read config from %s" msgstr "" -#: swift/common/utils.py:2205 +#: swift/common/utils.py:2206 #, python-format msgid "Unable to find %s config section in %s" msgstr "" -#: swift/common/utils.py:2571 +#: swift/common/utils.py:2591 #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "" -#: swift/common/utils.py:2576 +#: swift/common/utils.py:2596 #, python-format msgid "No realm key for %r" msgstr "" -#: swift/common/utils.py:2580 +#: swift/common/utils.py:2600 #, python-format msgid "No cluster endpoint for %r %r" msgstr "" -#: swift/common/utils.py:2589 +#: swift/common/utils.py:2609 #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" -#: swift/common/utils.py:2593 +#: swift/common/utils.py:2613 msgid "Path required in X-Container-Sync-To" msgstr "" -#: swift/common/utils.py:2596 +#: swift/common/utils.py:2616 msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "" -#: swift/common/utils.py:2601 +#: swift/common/utils.py:2621 #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "" -#: swift/common/utils.py:2795 +#: swift/common/utils.py:2815 msgid "Exception dumping recon cache" msgstr "" @@ -658,21 +658,21 @@ msgstr "" msgid "ERROR Could not get container info %s" msgstr "" -#: swift/container/server.py:185 +#: swift/container/server.py:186 #, python-format msgid "" "ERROR Account update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "" -#: swift/container/server.py:230 +#: swift/container/server.py:231 #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" -#: swift/container/server.py:239 +#: swift/container/server.py:240 #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " @@ -856,53 +856,53 @@ msgstr "" msgid "ERROR auditing: %s" msgstr "" -#: swift/obj/diskfile.py:306 swift/obj/updater.py:162 +#: swift/obj/diskfile.py:369 swift/obj/updater.py:162 #, python-format msgid "Directory %r does not map to a valid policy (%s)" msgstr "" -#: swift/obj/diskfile.py:803 +#: swift/obj/diskfile.py:853 #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" -#: swift/obj/diskfile.py:913 +#: swift/obj/diskfile.py:973 msgid "Error hashing suffix" msgstr "" -#: swift/obj/diskfile.py:1074 +#: swift/obj/diskfile.py:1134 #, python-format msgid "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" -#: swift/obj/diskfile.py:1327 +#: swift/obj/diskfile.py:1387 #, python-format msgid "Problem cleaning up %s" msgstr "" -#: swift/obj/diskfile.py:1672 +#: swift/obj/diskfile.py:1732 #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "" -#: swift/obj/diskfile.py:2000 +#: swift/obj/diskfile.py:2060 #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata " "%(meta)s" msgstr "" -#: swift/obj/diskfile.py:2408 +#: swift/obj/diskfile.py:2468 #, python-format msgid "No space left on device for %s (%s)" msgstr "" -#: swift/obj/diskfile.py:2417 +#: swift/obj/diskfile.py:2477 #, python-format msgid "Problem cleaning up %s (%s)" msgstr "" -#: swift/obj/diskfile.py:2420 +#: swift/obj/diskfile.py:2480 #, python-format msgid "Problem writing durable state file %s (%s)" msgstr "" @@ -1116,21 +1116,21 @@ msgstr "" msgid "Object replication complete. (%.02f minutes)" msgstr "" -#: swift/obj/server.py:240 +#: swift/obj/server.py:241 #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d" " response from %(ip)s:%(port)s/%(dev)s" msgstr "" -#: swift/obj/server.py:247 +#: swift/obj/server.py:248 #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for " "async update later)" msgstr "" -#: swift/obj/server.py:282 +#: swift/obj/server.py:284 #, python-format msgid "" "ERROR Container update failed: different numbers of hosts and devices in " @@ -1184,162 +1184,162 @@ msgstr "" msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "" -#: swift/proxy/server.py:414 +#: swift/proxy/server.py:418 msgid "ERROR Unhandled exception in request" msgstr "" -#: swift/proxy/server.py:469 +#: swift/proxy/server.py:473 #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "" -#: swift/proxy/server.py:486 swift/proxy/server.py:504 +#: swift/proxy/server.py:490 swift/proxy/server.py:508 #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "" -#: swift/proxy/server.py:527 +#: swift/proxy/server.py:531 #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "" -#: swift/proxy/controllers/account.py:65 +#: swift/proxy/controllers/account.py:67 msgid "Account" msgstr "" -#: swift/proxy/controllers/base.py:809 swift/proxy/controllers/base.py:848 -#: swift/proxy/controllers/base.py:940 swift/proxy/controllers/obj.py:339 -#: swift/proxy/controllers/obj.py:882 swift/proxy/controllers/obj.py:931 -#: swift/proxy/controllers/obj.py:945 swift/proxy/controllers/obj.py:1766 -#: swift/proxy/controllers/obj.py:2003 swift/proxy/controllers/obj.py:2129 -#: swift/proxy/controllers/obj.py:2363 +#: swift/proxy/controllers/base.py:813 swift/proxy/controllers/base.py:852 +#: swift/proxy/controllers/base.py:944 swift/proxy/controllers/obj.py:340 +#: swift/proxy/controllers/obj.py:885 swift/proxy/controllers/obj.py:934 +#: swift/proxy/controllers/obj.py:948 swift/proxy/controllers/obj.py:1769 +#: swift/proxy/controllers/obj.py:2007 swift/proxy/controllers/obj.py:2145 +#: swift/proxy/controllers/obj.py:2379 msgid "Object" msgstr "" -#: swift/proxy/controllers/base.py:810 swift/proxy/controllers/base.py:849 +#: swift/proxy/controllers/base.py:814 swift/proxy/controllers/base.py:853 msgid "Trying to read during GET (retrying)" msgstr "" -#: swift/proxy/controllers/base.py:941 +#: swift/proxy/controllers/base.py:945 msgid "Trying to read during GET" msgstr "" -#: swift/proxy/controllers/base.py:945 +#: swift/proxy/controllers/base.py:949 #, python-format msgid "Client did not read from proxy within %ss" msgstr "" -#: swift/proxy/controllers/base.py:950 +#: swift/proxy/controllers/base.py:954 msgid "Client disconnected on read" msgstr "" -#: swift/proxy/controllers/base.py:952 +#: swift/proxy/controllers/base.py:956 msgid "Trying to send to client" msgstr "" -#: swift/proxy/controllers/base.py:1003 swift/proxy/controllers/base.py:1415 +#: swift/proxy/controllers/base.py:998 swift/proxy/controllers/base.py:1437 #, python-format msgid "Trying to %(method)s %(path)s" msgstr "" -#: swift/proxy/controllers/base.py:1042 swift/proxy/controllers/base.py:1403 -#: swift/proxy/controllers/obj.py:363 swift/proxy/controllers/obj.py:922 -#: swift/proxy/controllers/obj.py:2121 swift/proxy/controllers/obj.py:2408 +#: swift/proxy/controllers/base.py:1037 swift/proxy/controllers/base.py:1425 +#: swift/proxy/controllers/obj.py:364 swift/proxy/controllers/obj.py:925 +#: swift/proxy/controllers/obj.py:2137 swift/proxy/controllers/obj.py:2424 msgid "ERROR Insufficient Storage" msgstr "" -#: swift/proxy/controllers/base.py:1045 +#: swift/proxy/controllers/base.py:1040 #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "" -#: swift/proxy/controllers/base.py:1406 +#: swift/proxy/controllers/base.py:1428 #, python-format msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" msgstr "" -#: swift/proxy/controllers/base.py:1536 +#: swift/proxy/controllers/base.py:1558 #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "" -#: swift/proxy/controllers/container.py:98 +#: swift/proxy/controllers/container.py:100 msgid "Container" msgstr "" -#: swift/proxy/controllers/obj.py:340 +#: swift/proxy/controllers/obj.py:341 #, python-format msgid "Trying to get final status of PUT to %s" msgstr "" -#: swift/proxy/controllers/obj.py:367 swift/proxy/controllers/obj.py:2413 +#: swift/proxy/controllers/obj.py:368 swift/proxy/controllers/obj.py:2429 #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "" -#: swift/proxy/controllers/obj.py:578 +#: swift/proxy/controllers/obj.py:579 #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "" -#: swift/proxy/controllers/obj.py:591 +#: swift/proxy/controllers/obj.py:592 #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" -#: swift/proxy/controllers/obj.py:926 swift/proxy/controllers/obj.py:2124 +#: swift/proxy/controllers/obj.py:929 swift/proxy/controllers/obj.py:2140 #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "" -#: swift/proxy/controllers/obj.py:932 swift/proxy/controllers/obj.py:2130 +#: swift/proxy/controllers/obj.py:935 swift/proxy/controllers/obj.py:2146 #, python-format msgid "Expect: 100-continue on %s" msgstr "" -#: swift/proxy/controllers/obj.py:946 swift/proxy/controllers/obj.py:1767 +#: swift/proxy/controllers/obj.py:949 swift/proxy/controllers/obj.py:1770 #, python-format msgid "Trying to write to %s" msgstr "" -#: swift/proxy/controllers/obj.py:997 swift/proxy/controllers/obj.py:2295 +#: swift/proxy/controllers/obj.py:1000 swift/proxy/controllers/obj.py:2311 #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "" -#: swift/proxy/controllers/obj.py:1005 swift/proxy/controllers/obj.py:2301 +#: swift/proxy/controllers/obj.py:1008 swift/proxy/controllers/obj.py:2317 msgid "Client disconnected without sending last chunk" msgstr "" -#: swift/proxy/controllers/obj.py:1010 swift/proxy/controllers/obj.py:2308 +#: swift/proxy/controllers/obj.py:1013 swift/proxy/controllers/obj.py:2324 msgid "ERROR Exception causing client disconnect" msgstr "" -#: swift/proxy/controllers/obj.py:1014 swift/proxy/controllers/obj.py:2312 +#: swift/proxy/controllers/obj.py:1017 swift/proxy/controllers/obj.py:2328 #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "" -#: swift/proxy/controllers/obj.py:1020 swift/proxy/controllers/obj.py:2226 +#: swift/proxy/controllers/obj.py:1023 swift/proxy/controllers/obj.py:2242 msgid "Client disconnected without sending enough data" msgstr "" -#: swift/proxy/controllers/obj.py:1066 +#: swift/proxy/controllers/obj.py:1069 #, python-format msgid "Object servers returned %s mismatched etags" msgstr "" -#: swift/proxy/controllers/obj.py:1070 swift/proxy/controllers/obj.py:2272 -#: swift/proxy/controllers/obj.py:2497 +#: swift/proxy/controllers/obj.py:1073 swift/proxy/controllers/obj.py:2288 +#: swift/proxy/controllers/obj.py:2513 msgid "Object PUT" msgstr "" -#: swift/proxy/controllers/obj.py:2265 +#: swift/proxy/controllers/obj.py:2281 #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "" -#: swift/proxy/controllers/obj.py:2364 +#: swift/proxy/controllers/obj.py:2380 #, python-format msgid "Trying to get %s status of PUT to %s" msgstr "" diff --git a/swift/locale/tr_TR/LC_MESSAGES/swift.po b/swift/locale/tr_TR/LC_MESSAGES/swift.po index 9bb5657894..2235019090 100644 --- a/swift/locale/tr_TR/LC_MESSAGES/swift.po +++ b/swift/locale/tr_TR/LC_MESSAGES/swift.po @@ -7,9 +7,9 @@ # OpenStack Infra , 2015. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev176\n" +"Project-Id-Version: swift 2.6.1.dev235\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-08 04:09+0000\n" +"POT-Creation-Date: 2016-03-18 23:11+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -860,10 +860,6 @@ msgstr "Bölüm kaldırılıyor: %s" msgid "Removing pid file %s with invalid pid" msgstr "Geçersiz pid'e sahip pid dosyası %s siliniyor" -#, python-format -msgid "Removing pid file %s with wrong pid %d" -msgstr "%s pid dosyası %d yanlış pid'ine sahip siliniyor" - #, python-format msgid "Removing stale pid file %s" msgstr "Askıdaki pid dosyası siliniyor %s" diff --git a/swift/locale/zh_TW/LC_MESSAGES/swift.po b/swift/locale/zh_TW/LC_MESSAGES/swift.po index 06f0e95201..03c9083403 100644 --- a/swift/locale/zh_TW/LC_MESSAGES/swift.po +++ b/swift/locale/zh_TW/LC_MESSAGES/swift.po @@ -5,16 +5,17 @@ # Translators: # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata +# Jennifer , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev176\n" +"Project-Id-Version: swift 2.6.1.dev235\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-08 04:09+0000\n" +"POT-Creation-Date: 2016-03-18 23:11+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2015-08-11 11:22+0000\n" -"Last-Translator: openstackjenkins \n" +"PO-Revision-Date: 2016-03-19 12:55+0000\n" +"Last-Translator: Jennifer \n" "Language: zh-TW\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" @@ -50,6 +51,16 @@ msgstr "%(ip)s/%(device)s 已回應為未裝載" msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" +#, python-format +msgid "" +"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of %(device)d/" +"%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in %(time).2fs " +"(%(rate).2f/sec, %(remaining)s remaining)" +msgstr "" +"在 %(time).2fs 內重新建構了 %(device)d/%(dtotal)d (%(dpercentage).2f%%) 個裝" +"置的 %(reconstructed)d/%(total)d (%(percentage).2f%%) 個分割區(%(rate).2f/" +"秒,剩餘 %(remaining)s)" + #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " @@ -86,6 +97,10 @@ msgstr "%s 不存在" msgid "%s is not mounted" msgstr "未裝載 %s" +#, python-format +msgid "%s responded as unmounted" +msgstr "%s 已回應為未裝載" + #, python-format msgid "%s running (%s - %s)" msgstr "%s 在執行中 (%s - %s)" @@ -216,11 +231,21 @@ msgstr "用戶端在讀取時中斷連線" msgid "Client disconnected without sending enough data" msgstr "用戶端已中斷連線,未傳送足夠的資料" +msgid "Client disconnected without sending last chunk" +msgstr "用戶端已中斷連線,未傳送最後一個片段" + #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "用戶端路徑 %(client)s 不符合物件 meta 資料%(meta)s 中儲存的路徑" +msgid "" +"Configuration option internal_client_conf_path not defined. Using default " +"configuration, See internal-client.conf-sample for options" +msgstr "" +"為定義配置選項 internal_client_conf_path。將使用預設配置,請參閱 internal-" +"client.conf-sample 以取得選項" + msgid "Connection refused" msgstr "連線遭拒" @@ -278,6 +303,10 @@ msgstr "資料下載錯誤:%s" msgid "Devices pass completed: %.02fs" msgstr "裝置通過已完成:%.02fs" +#, python-format +msgid "Directory %r does not map to a valid policy (%s)" +msgstr "目錄 %r 未對映至有效的原則 (%s)" + #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "錯誤:%(db_file)s:%(validate_sync_to_err)s" @@ -349,6 +378,10 @@ msgstr "錯誤:磁碟檔 %(data_file)s 關閉失敗:%(exc)s:%(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "錯誤:異常狀況造成用戶端中斷連線" +#, python-format +msgid "ERROR Exception transferring data to object servers %s" +msgstr "錯誤:將資料轉送至物件伺服器 %s 時發生異常狀況" + msgid "ERROR Failed to get my own IPs?" msgstr "錯誤:無法取得我自己的 IP?" @@ -517,6 +550,10 @@ msgstr "同步分割區時發生錯誤" msgid "Error syncing with node: %s" msgstr "與節點同步時發生錯誤:%s" +#, python-format +msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" +msgstr "嘗試重建 %(path)s 原則 #%(policy)d 分段 #%(frag_index)s 時發生錯誤" + msgid "Error: An error occurred" msgstr "錯誤:發生錯誤" @@ -536,6 +573,9 @@ msgstr "最上層帳戶 Reaper 迴圈發生異常狀況" msgid "Exception in top-level replication loop" msgstr "最上層抄寫迴圈中發生異常狀況" +msgid "Exception in top-levelreconstruction loop" +msgstr "最上層重新建構迴圈中發生異常狀況" + #, python-format msgid "Exception while deleting container %s %s" msgstr "刪除儲存器 %s %s 時發生異常狀況" @@ -572,6 +612,11 @@ msgstr "遵循 %(given_domain)s 到 %(found_domain)s 的 CNAME 鏈" msgid "Found configs:" msgstr "找到配置:" +msgid "" +"Handoffs first mode still has handoffs remaining. Aborting current " +"replication pass." +msgstr "「遞交作業最先」模式仍有剩餘的遞交作業。正在中斷現行抄寫傳遞。" + msgid "Host unreachable" msgstr "無法抵達主機" @@ -591,6 +636,10 @@ msgstr "X-Container-Sync-To 中的主機 %r 無效" msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "無效的擱置項目 %(file)s:%(entry)s" +#, python-format +msgid "Invalid response %(resp)s from %(full_path)s" +msgstr "來自 %(full_path)s 的回應 %(resp)s 無效" + #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "來自 %(ip)s 的回應 %(resp)s 無效" @@ -625,14 +674,26 @@ msgstr "沒有 %r %r 的叢集端點" msgid "No permission to signal PID %d" msgstr "沒有信號 PID %d 的許可權" +#, python-format +msgid "No policy with index %s" +msgstr "沒有具有索引 %s 的原則" + #, python-format msgid "No realm key for %r" msgstr "沒有 %r 的範圍金鑰" +#, python-format +msgid "No space left on device for %s (%s)" +msgstr "裝置上沒有用於 %s 的剩餘空間 (%s)" + #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "節點錯誤限制 %(ip)s:%(port)s (%(device)s)" +#, python-format +msgid "Not enough object servers ack'ed (got %d)" +msgstr "未確認足夠的物件伺服器(已取得 %d)" + #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " @@ -640,6 +701,10 @@ msgid "" msgstr "" "找不到 %(sync_from)r => %(sync_to)r - 物件%(obj_name)r" +#, python-format +msgid "Nothing reconstructed for %s seconds." +msgstr "%s 秒未重新建構任何內容。" + #, python-format msgid "Nothing replicated for %s seconds." msgstr "未抄寫任何項目達 %s 秒。" @@ -669,10 +734,30 @@ msgstr "" "%(quars)d,錯誤總計:%(errors)d,檔案/秒總計:%(frate).2f,位元組/秒總計:" "%(brate).2f,審核時間:%(audit).2f,速率:%(audit_rate).2f" +#, python-format +msgid "" +"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " +"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " +"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " +"%(audit_rate).2f" +msgstr "" +"物件審核 (%(type)s)。自 %(start_time)s 以來:本端:%(passes)d 個已通" +"過,%(quars)d 個已隔離,%(errors)d 個錯誤,檔案/秒:%(frate).2f,位元組數/" +"秒:%(brate).2f,時間總計:%(total).2f,審核時間:%(audit).2f,速率:" +"%(audit_rate).2f" + #, python-format msgid "Object audit stats: %s" msgstr "物件審核統計資料:%s" +#, python-format +msgid "Object reconstruction complete (once). (%.02f minutes)" +msgstr "物件重新建構完成(一次性)。(%.02f 分鐘)" + +#, python-format +msgid "Object reconstruction complete. (%.02f minutes)" +msgstr "物件重新建構完成。(%.02f 分鐘)" + #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "物件抄寫完成(一次性)。(%.02f 分鐘)" @@ -731,6 +816,14 @@ msgstr "X-Container-Sync-To 中需要路徑" msgid "Problem cleaning up %s" msgstr "清除 %s 時發生問題" +#, python-format +msgid "Problem cleaning up %s (%s)" +msgstr "清除 %s 時發生問題 (%s)" + +#, python-format +msgid "Problem writing durable state file %s (%s)" +msgstr "寫入可延續狀態檔 %s 時發生問題 (%s)" + #, python-format msgid "Profiling Error: %s" msgstr "側寫錯誤:%s" @@ -768,6 +861,14 @@ msgstr "正在移除 %s 物件" msgid "Removing partition: %s" msgstr "正在移除分割區:%s" +#, python-format +msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" +msgstr "正在移除具有錯誤 PID %(pid)d 的 PID 檔 %(pid_file)s" + +#, python-format +msgid "Removing pid file %s with invalid pid" +msgstr "正在移除具有無效 PID 的 PID 檔 %s" + #, python-format msgid "Removing stale pid file %s" msgstr "正在移除過時 PID 檔案 %s" @@ -787,6 +888,9 @@ msgstr "" "正在將 %(meth)s 的 498 傳回至 %(acc)s/%(cont)s/%(obj)s。Ratelimit(休眠上" "限)%(e)s" +msgid "Ring change detected. Aborting current reconstruction pass." +msgstr "偵測到環變更。正在中斷現行重新建構傳遞。" + msgid "Ring change detected. Aborting current replication pass." msgstr "偵測到環變更。正在中斷現行抄寫傳遞。" @@ -794,6 +898,9 @@ msgstr "偵測到環變更。正在中斷現行抄寫傳遞。" msgid "Running %s once" msgstr "正在執行 %s 一次" +msgid "Running object reconstructor in script mode." +msgstr "正在 Script 模式下執行物件重新建構器。" + msgid "Running object replicator in script mode." msgstr "正在 Script 模式下執行物件抄寫器" @@ -835,6 +942,12 @@ msgstr "正在跳過 %s,原因是它未裝載" msgid "Starting %s" msgstr "正在啟動 %s" +msgid "Starting object reconstruction pass." +msgstr "正在啟動物件重新建構傳遞。" + +msgid "Starting object reconstructor in daemon mode." +msgstr "正在常駐程式模式下啟動物件重新建構器。" + msgid "Starting object replication pass." msgstr "正在啟動物件抄寫傳遞。" @@ -859,10 +972,22 @@ msgstr "" msgid "Timeout %(action)s to memcached: %(server)s" msgstr "對 memcached %(server)s 執行%(action)s作業時逾時" +#, python-format +msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" +msgstr "%(ip)s:%(port)s/%(device)s 發生逾時異常狀況" + #, python-format msgid "Trying to %(method)s %(path)s" msgstr "正在嘗試 %(method)s %(path)s" +#, python-format +msgid "Trying to GET %(full_path)s" +msgstr "正在嘗試對 %(full_path)s 執行 GET 動作" + +#, python-format +msgid "Trying to get %s status of PUT to %s" +msgstr "正在嘗試使 PUT 的 %s 狀態為 %s" + #, python-format msgid "Trying to get final status of PUT to %s" msgstr "正在嘗試使 PUT 的最終狀態為 %s" @@ -876,6 +1001,10 @@ msgstr "正在嘗試於 GET 期間讀取(正在重試)" msgid "Trying to send to client" msgstr "正在嘗試傳送至用戶端" +#, python-format +msgid "Trying to sync suffixes with %s" +msgstr "正在嘗試與 %s 同步字尾" + #, python-format msgid "Trying to write to %s" msgstr "正在嘗試寫入至 %s" @@ -887,10 +1016,22 @@ msgstr "未捕捉的異常狀況" msgid "Unable to find %s config section in %s" msgstr "找不到 %s 配置區段(在 %s 中)" +#, python-format +msgid "Unable to load internal client from config: %r (%s)" +msgstr "無法從配置載入內部用戶端:%r (%s)" + #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "在 libc 中找不到 %s。保留為 no-op。" +#, python-format +msgid "Unable to locate config for %s" +msgstr "找不到 %s 的配置" + +#, python-format +msgid "Unable to locate config number %s for %s" +msgstr "找不到配置號碼 %s(針對 %s)" + msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "在 libc 中找不到 fallocate、posix_fallocate。保留為 no-op。" @@ -914,6 +1055,11 @@ msgstr "非預期的回應:%s" msgid "Unhandled exception" msgstr "無法處理的異常狀況" +#, python-format +msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" +msgstr "" +"嘗試執行 GET 動作時發生不明異常狀況:%(account)r %(container)r %(object)r" + #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "%(container)s %(dbfile)s 的更新報告失敗" @@ -941,6 +1087,10 @@ msgstr "警告:無法修改記憶體限制。以非 root 使用者身分執行 msgid "Waited %s seconds for %s to die; giving up" msgstr "已等待 %s 秒以讓 %s 當掉;正在放棄" +#, python-format +msgid "Waited %s seconds for %s to die; killing" +msgstr "已等待 %s 秒以讓 %s 當掉" + msgid "Warning: Cannot ratelimit without a memcached client" msgstr "警告:無法在沒有 memcached 用戶端的情況下限制速率" From fd86d5a95d73714365c07cb36bfd1404306142a7 Mon Sep 17 00:00:00 2001 From: Christian Schwede Date: Mon, 15 Feb 2016 19:17:01 +0000 Subject: [PATCH 048/141] Skip already checked partitions when auditing objects after a restart The object auditor will save a short status file on each device, containing a list of remaining partitions for auditing. If the auditor is restarted, it will only audit partitions not yet checked. If all partitions on the current device have been checked, it will simply skip this device. Once all partitions on all disks are successfully audited, all status files are removed. Closes-Bug: #1183656 Change-Id: Icf1d920d0942ce48f1d3d374ea4d63dbc29ea464 --- swift/obj/auditor.py | 7 +++- swift/obj/diskfile.py | 66 ++++++++++++++++++++++++++++++---- test/unit/obj/test_auditor.py | 16 +++++++-- test/unit/obj/test_diskfile.py | 37 +++++++++++++++++++ 4 files changed, 117 insertions(+), 9 deletions(-) diff --git a/swift/obj/auditor.py b/swift/obj/auditor.py index de1199ea52..b9b1643709 100644 --- a/swift/obj/auditor.py +++ b/swift/obj/auditor.py @@ -95,7 +95,9 @@ class AuditorWorker(object): # can find all diskfile locations regardless of policy -- so for now # just use Policy-0's manager. all_locs = (self.diskfile_router[POLICIES[0]] - .object_audit_location_generator(device_dirs=device_dirs)) + .object_audit_location_generator( + device_dirs=device_dirs, + auditor_type=self.auditor_type)) for location in all_locs: loop_time = time.time() self.failsafe_object_audit(location) @@ -156,6 +158,9 @@ class AuditorWorker(object): self.logger.info( _('Object audit stats: %s') % json.dumps(self.stats_buckets)) + # Unset remaining partitions to not skip them in the next run + diskfile.clear_auditor_status(self.devices, self.auditor_type) + def record_stats(self, obj_size): """ Based on config's object_size_stats will keep track of how many objects diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py index 0928a5b688..4e4d3a40c1 100644 --- a/swift/obj/diskfile.py +++ b/swift/obj/diskfile.py @@ -33,6 +33,7 @@ are also not considered part of the backend API. import six.moves.cPickle as pickle import errno import fcntl +import json import os import time import uuid @@ -263,7 +264,7 @@ class AuditLocation(object): def object_audit_location_generator(devices, mount_check=True, logger=None, - device_dirs=None): + device_dirs=None, auditor_type="ALL"): """ Given a devices path (e.g. "/srv/node"), yield an AuditLocation for all objects stored under that directory if device_dirs isn't set. If @@ -277,7 +278,8 @@ def object_audit_location_generator(devices, mount_check=True, logger=None, :param mount_check: flag to check if a mount check should be performed on devices :param logger: a logger object - :device_dirs: a list of directories under devices to traverse + :param device_dirs: a list of directories under devices to traverse + :param auditor_type: either ALL or ZBF """ if not device_dirs: device_dirs = listdir(devices) @@ -307,8 +309,12 @@ def object_audit_location_generator(devices, mount_check=True, logger=None, 'to a valid policy (%s)') % (dir_, e)) continue datadir_path = os.path.join(devices, device, dir_) - partitions = listdir(datadir_path) - for partition in partitions: + + partitions = get_auditor_status(datadir_path, logger, auditor_type) + + for pos, partition in enumerate(partitions): + update_auditor_status(datadir_path, logger, + partitions[pos:], auditor_type) part_path = os.path.join(datadir_path, partition) try: suffixes = listdir(part_path) @@ -329,6 +335,51 @@ def object_audit_location_generator(devices, mount_check=True, logger=None, yield AuditLocation(hsh_path, device, partition, policy) + update_auditor_status(datadir_path, logger, [], auditor_type) + + +def get_auditor_status(datadir_path, logger, auditor_type): + auditor_status = os.path.join( + datadir_path, "auditor_status_%s.json" % auditor_type) + status = {} + try: + with open(auditor_status) as statusfile: + status = statusfile.read() + except (OSError, IOError) as e: + if e.errno != errno.ENOENT and logger: + logger.warning(_('Cannot read %s (%s)') % (auditor_status, e)) + return listdir(datadir_path) + try: + status = json.loads(status) + except ValueError as e: + logger.warning(_('Loading JSON from %s failed (%s)') % ( + auditor_status, e)) + return listdir(datadir_path) + return status['partitions'] + + +def update_auditor_status(datadir_path, logger, partitions, auditor_type): + status = json.dumps({'partitions': partitions}) + auditor_status = os.path.join( + datadir_path, "auditor_status_%s.json" % auditor_type) + try: + with open(auditor_status, "wb") as statusfile: + statusfile.write(status) + except (OSError, IOError) as e: + if logger: + logger.warning(_('Cannot write %s (%s)') % (auditor_status, e)) + + +def clear_auditor_status(devices, auditor_type="ALL"): + for device in os.listdir(devices): + for dir_ in os.listdir(os.path.join(devices, device)): + if not dir_.startswith("objects"): + continue + datadir_path = os.path.join(devices, device, dir_) + auditor_status = os.path.join( + datadir_path, "auditor_status_%s.json" % auditor_type) + remove_file(auditor_status) + def strip_self(f): """ @@ -897,14 +948,17 @@ class BaseDiskFileManager(object): policy=policy, use_splice=self.use_splice, pipe_size=self.pipe_size, **kwargs) - def object_audit_location_generator(self, device_dirs=None): + def object_audit_location_generator(self, device_dirs=None, + auditor_type="ALL"): """ Yield an AuditLocation for all objects stored under device_dirs. :param device_dirs: directory of target device + :param auditor_type: either ALL or ZBF """ return object_audit_location_generator(self.devices, self.mount_check, - self.logger, device_dirs) + self.logger, device_dirs, + auditor_type) def get_diskfile_from_audit_location(self, audit_location): """ diff --git a/test/unit/obj/test_auditor.py b/test/unit/obj/test_auditor.py index b5db2e55e9..bebff1f41d 100644 --- a/test/unit/obj/test_auditor.py +++ b/test/unit/obj/test_auditor.py @@ -26,7 +26,8 @@ from test.unit import FakeLogger, patch_policies, make_timestamp_iter, \ DEFAULT_TEST_EC_TYPE from swift.obj import auditor from swift.obj.diskfile import DiskFile, write_metadata, invalidate_hash, \ - get_data_dir, DiskFileManager, ECDiskFileManager, AuditLocation + get_data_dir, DiskFileManager, ECDiskFileManager, AuditLocation, \ + clear_auditor_status, get_auditor_status from swift.common.utils import mkdirs, normalize_timestamp, Timestamp from swift.common.storage_policy import ECStoragePolicy, StoragePolicy, \ POLICIES @@ -460,6 +461,7 @@ class TestAuditor(unittest.TestCase): self.auditor.run_audit(**kwargs) self.assertFalse(os.path.isdir(quarantine_path)) del(kwargs['zero_byte_fps']) + clear_auditor_status(self.devices) self.auditor.run_audit(**kwargs) self.assertTrue(os.path.isdir(quarantine_path)) @@ -495,10 +497,20 @@ class TestAuditor(unittest.TestCase): self.setup_bad_zero_byte() kwargs = {'mode': 'once'} kwargs['zero_byte_fps'] = 50 - self.auditor.run_audit(**kwargs) + + called_args = [0] + + def mock_get_auditor_status(path, logger, audit_type): + called_args[0] = audit_type + return get_auditor_status(path, logger, audit_type) + + with mock.patch('swift.obj.diskfile.get_auditor_status', + mock_get_auditor_status): + self.auditor.run_audit(**kwargs) quarantine_path = os.path.join(self.devices, 'sda', 'quarantined', 'objects') self.assertTrue(os.path.isdir(quarantine_path)) + self.assertEqual('ZBF', called_args[0]) def test_object_run_fast_track_zero_check_closed(self): rat = [False] diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py index 534882bec3..18530678a2 100644 --- a/test/unit/obj/test_diskfile.py +++ b/test/unit/obj/test_diskfile.py @@ -359,6 +359,9 @@ class TestObjectAuditLocationGenerator(unittest.TestCase): ] self.assertEqual(locations, expected) + # Reset status file for next run + diskfile.clear_auditor_status(tmpdir) + # now without a logger locations = [(loc.path, loc.device, loc.partition, loc.policy) for loc in diskfile.object_audit_location_generator( @@ -433,6 +436,40 @@ class TestObjectAuditLocationGenerator(unittest.TestCase): with mock.patch('os.listdir', splode_if_endswith("b54")): self.assertRaises(OSError, list_locations, tmpdir) + def test_auditor_status(self): + with temptree([]) as tmpdir: + os.makedirs(os.path.join(tmpdir, "sdf", "objects", "1", "a", "b")) + os.makedirs(os.path.join(tmpdir, "sdf", "objects", "2", "a", "b")) + + # Auditor starts, there are two partitions to check + gen = diskfile.object_audit_location_generator(tmpdir, False) + gen.next() + gen.next() + + # Auditor stopped for some reason without raising StopIterator in + # the generator and restarts There is now only one remaining + # partition to check + gen = diskfile.object_audit_location_generator(tmpdir, False) + gen.next() + + # There are no more remaining partitions + self.assertRaises(StopIteration, gen.next) + + # There are no partitions to check if the auditor restarts another + # time and the status files have not been cleared + gen = diskfile.object_audit_location_generator(tmpdir, False) + self.assertRaises(StopIteration, gen.next) + + # Reset status file + diskfile.clear_auditor_status(tmpdir) + + # If the auditor restarts another time, we expect to + # check two partitions again, because the remaining + # partitions were empty and a new listdir was executed + gen = diskfile.object_audit_location_generator(tmpdir, False) + gen.next() + gen.next() + class TestDiskFileRouter(unittest.TestCase): From 365276464ce41070f14fee4314e665dd2934bfa6 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 22 Mar 2016 06:16:34 +0000 Subject: [PATCH 049/141] Imported Translations from Zanata For more information about this automatic import see: https://wiki.openstack.org/wiki/Translations/Infrastructure Change-Id: I70db7d29a9859cb47144ac49df8c289d1c2ec3e6 --- swift/locale/it/LC_MESSAGES/swift.po | 13 +++--- swift/locale/ru/LC_MESSAGES/swift.po | 10 +++-- swift/locale/swift.pot | 59 +++++++++++++++++----------- 3 files changed, 51 insertions(+), 31 deletions(-) diff --git a/swift/locale/it/LC_MESSAGES/swift.po b/swift/locale/it/LC_MESSAGES/swift.po index 1cccad5c97..a6932efb9a 100644 --- a/swift/locale/it/LC_MESSAGES/swift.po +++ b/swift/locale/it/LC_MESSAGES/swift.po @@ -6,16 +6,17 @@ # OpenStack Infra , 2015. #zanata # Tom Cocozzello , 2015. #zanata # Alessandra , 2016. #zanata +# Remo Mattei , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev235\n" +"Project-Id-Version: swift 2.6.1.dev244\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-18 23:11+0000\n" +"POT-Creation-Date: 2016-03-22 03:44+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-19 05:14+0000\n" -"Last-Translator: Alessandra \n" +"PO-Revision-Date: 2016-03-22 05:18+0000\n" +"Last-Translator: Remo Mattei \n" "Language: it\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" @@ -211,7 +212,7 @@ msgid "Beginning pass on account %s" msgstr "Avvio della trasmissione sull'account %s" msgid "Beginning replication run" -msgstr "Avvio dell'esecuzione della replica" +msgstr "Avvio replica" msgid "Broker error trying to rollback locked connection" msgstr "" @@ -1013,7 +1014,7 @@ msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "Rsync di %(src)s eseguito correttamente su %(dst)s (%(time).03f)" msgid "The file type are forbidden to access!" -msgstr "Non è consentito l'accesso al tipo di file." +msgstr "Non è consentito l'accesso a questo tipo di file!" #, python-format msgid "" diff --git a/swift/locale/ru/LC_MESSAGES/swift.po b/swift/locale/ru/LC_MESSAGES/swift.po index 61ae557637..764e83ba6f 100644 --- a/swift/locale/ru/LC_MESSAGES/swift.po +++ b/swift/locale/ru/LC_MESSAGES/swift.po @@ -9,13 +9,13 @@ # Grigory Mokhin , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev235\n" +"Project-Id-Version: swift 2.6.1.dev244\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-18 23:11+0000\n" +"POT-Creation-Date: 2016-03-22 03:44+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-10 07:41+0000\n" +"PO-Revision-Date: 2016-03-21 07:06+0000\n" "Last-Translator: Grigory Mokhin \n" "Language: ru\n" "Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" @@ -907,6 +907,10 @@ msgstr "Удаление объектов %s" msgid "Removing partition: %s" msgstr "Удаление раздела: %s" +#, python-format +msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" +msgstr "Удаление файла pid %(pid_file)s с ошибочным pid %(pid)d" + #, python-format msgid "Removing pid file %s with invalid pid" msgstr "Удаление pid файла %s с неверным pid-ом" diff --git a/swift/locale/swift.pot b/swift/locale/swift.pot index 0b9b9347a2..ec3baaa9fb 100644 --- a/swift/locale/swift.pot +++ b/swift/locale/swift.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev235\n" +"Project-Id-Version: swift 2.6.1.dev244\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-21 06:16+0000\n" +"POT-Creation-Date: 2016-03-22 06:16+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -64,7 +64,7 @@ msgid "ERROR Could not get account info %s" msgstr "" #: swift/account/reaper.py:139 swift/common/utils.py:2342 -#: swift/obj/diskfile.py:359 swift/obj/updater.py:88 swift/obj/updater.py:131 +#: swift/obj/diskfile.py:361 swift/obj/updater.py:88 swift/obj/updater.py:131 #, python-format msgid "Skipping %s as it is not mounted" msgstr "" @@ -568,11 +568,11 @@ msgstr "" msgid "Error listing devices" msgstr "" -#: swift/common/middleware/recon.py:259 +#: swift/common/middleware/recon.py:265 msgid "Error reading ringfile" msgstr "" -#: swift/common/middleware/recon.py:273 +#: swift/common/middleware/recon.py:279 msgid "Error reading swift.conf" msgstr "" @@ -813,7 +813,7 @@ msgstr "" msgid "Begin object audit \"%s\" mode (%s%s)" msgstr "" -#: swift/obj/auditor.py:108 +#: swift/obj/auditor.py:110 #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d " @@ -822,7 +822,7 @@ msgid "" "%(audit).2f, Rate: %(audit_rate).2f" msgstr "" -#: swift/obj/auditor.py:142 +#: swift/obj/auditor.py:144 #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. " @@ -831,78 +831,93 @@ msgid "" "Rate: %(audit_rate).2f" msgstr "" -#: swift/obj/auditor.py:157 +#: swift/obj/auditor.py:159 #, python-format msgid "Object audit stats: %s" msgstr "" -#: swift/obj/auditor.py:185 +#: swift/obj/auditor.py:190 #, python-format msgid "ERROR Trying to audit %s" msgstr "" -#: swift/obj/auditor.py:222 +#: swift/obj/auditor.py:227 #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "" -#: swift/obj/auditor.py:274 +#: swift/obj/auditor.py:279 #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "" -#: swift/obj/auditor.py:345 swift/obj/auditor.py:366 +#: swift/obj/auditor.py:350 swift/obj/auditor.py:371 #, python-format msgid "ERROR auditing: %s" msgstr "" -#: swift/obj/diskfile.py:369 swift/obj/updater.py:162 +#: swift/obj/diskfile.py:371 swift/obj/updater.py:162 #, python-format msgid "Directory %r does not map to a valid policy (%s)" msgstr "" -#: swift/obj/diskfile.py:853 +#: swift/obj/diskfile.py:413 +#, python-format +msgid "Cannot read %s (%s)" +msgstr "" + +#: swift/obj/diskfile.py:418 +#, python-format +msgid "Loading JSON from %s failed (%s)" +msgstr "" + +#: swift/obj/diskfile.py:433 +#, python-format +msgid "Cannot write %s (%s)" +msgstr "" + +#: swift/obj/diskfile.py:904 #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" -#: swift/obj/diskfile.py:973 +#: swift/obj/diskfile.py:1024 msgid "Error hashing suffix" msgstr "" -#: swift/obj/diskfile.py:1134 +#: swift/obj/diskfile.py:1188 #, python-format msgid "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" -#: swift/obj/diskfile.py:1387 +#: swift/obj/diskfile.py:1441 #, python-format msgid "Problem cleaning up %s" msgstr "" -#: swift/obj/diskfile.py:1732 +#: swift/obj/diskfile.py:1786 #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "" -#: swift/obj/diskfile.py:2060 +#: swift/obj/diskfile.py:2114 #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata " "%(meta)s" msgstr "" -#: swift/obj/diskfile.py:2468 +#: swift/obj/diskfile.py:2522 #, python-format msgid "No space left on device for %s (%s)" msgstr "" -#: swift/obj/diskfile.py:2477 +#: swift/obj/diskfile.py:2531 #, python-format msgid "Problem cleaning up %s (%s)" msgstr "" -#: swift/obj/diskfile.py:2480 +#: swift/obj/diskfile.py:2534 #, python-format msgid "Problem writing durable state file %s (%s)" msgstr "" From 88b575895750c2c00f75a6193cc31bc930e980fd Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Tue, 22 Mar 2016 11:10:41 +0000 Subject: [PATCH 050/141] Remove unused code from container sync Change-Id: Ia44138aadcd30c474f744a9c552220e18302ecc6 --- swift/container/sync.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/swift/container/sync.py b/swift/container/sync.py index 1c2260e0f5..2ade27a5a1 100644 --- a/swift/container/sync.py +++ b/swift/container/sync.py @@ -39,7 +39,6 @@ from swift.common.utils import ( whataremyips, Timestamp, decode_timestamps) from swift.common.daemon import Daemon from swift.common.http import HTTP_UNAUTHORIZED, HTTP_NOT_FOUND -from swift.common.storage_policy import POLICIES from swift.common.wsgi import ConfigString @@ -238,15 +237,6 @@ class ContainerSync(Daemon): _('Unable to load internal client from config: %r (%s)') % (internal_client_conf_path, err)) - def get_object_ring(self, policy_idx): - """ - Get the ring object to use based on its policy. - - :policy_idx: policy index as defined in swift.conf - :returns: appropriate ring object - """ - return POLICIES.get_object_ring(policy_idx, self.swift_dir) - def run_forever(self, *args, **kwargs): """ Runs container sync scans until stopped. @@ -364,8 +354,6 @@ class ContainerSync(Daemon): row = rows[0] if row['ROWID'] > sync_point1: break - key = hash_path(info['account'], info['container'], - row['name'], raw_digest=True) # This node will only initially sync out one third of the # objects (if 3 replicas, 1/4 if 4, etc.) and will skip # problematic rows as needed in case of faults. From 736de613f1e2a6a460f4499cbd1e9f7f1fc3da68 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Tue, 22 Mar 2016 11:36:32 +0000 Subject: [PATCH 051/141] Docs: Container sync does not require POST-as-COPY Updates docs to remove warnings that container sync only works with object_post_as_copy=True. Since commit e91de49 container sync will also sync POST updates when using object_post_as_copy=False. Change-Id: I5cc3cc6e8f9ba2fef6f896f2b11d2a4e06825f7f --- doc/manpages/proxy-server.conf.5 | 3 +-- doc/source/deployment_guide.rst | 6 +----- doc/source/overview_container_sync.rst | 14 -------------- etc/proxy-server.conf-sample | 3 +-- swift/container/sync.py | 7 ------- 5 files changed, 3 insertions(+), 30 deletions(-) diff --git a/doc/manpages/proxy-server.conf.5 b/doc/manpages/proxy-server.conf.5 index 724aafd70c..6fd9d16ea3 100644 --- a/doc/manpages/proxy-server.conf.5 +++ b/doc/manpages/proxy-server.conf.5 @@ -971,8 +971,7 @@ is false. .IP \fBobject_post_as_copy\fR Set object_post_as_copy = false to turn on fast posts where only the metadata changes are stored as new and the original data file is kept in place. This makes for quicker -posts; but since the container metadata isn't updated in this mode, features like -container sync won't be able to sync posts. The default is True. +posts. The default is True. .IP \fBaccount_autocreate\fR If set to 'true' authorized accounts that do not yet exist within the Swift cluster will be automatically created. The default is set to false. diff --git a/doc/source/deployment_guide.rst b/doc/source/deployment_guide.rst index 94d418c660..590b1f6a27 100644 --- a/doc/source/deployment_guide.rst +++ b/doc/source/deployment_guide.rst @@ -1325,11 +1325,7 @@ object_post_as_copy true Set object_post_as_copy = false the metadata changes are stored anew and the original data file is kept in place. This makes for - quicker posts; but since the - container metadata isn't updated - in this mode, features like - container sync won't be able to - sync posts. + quicker posts. account_autocreate false If set to 'true' authorized accounts that do not yet exist within the Swift cluster will diff --git a/doc/source/overview_container_sync.rst b/doc/source/overview_container_sync.rst index c1255acaff..25772bdf1e 100644 --- a/doc/source/overview_container_sync.rst +++ b/doc/source/overview_container_sync.rst @@ -12,13 +12,6 @@ configure their cluster to allow/accept sync requests to/from other clusters, and the user specifies where to sync their container to along with a secret synchronization key. -.. note:: - - Container sync will sync object POSTs only if the proxy server is set to - use "object_post_as_copy = true" which is the default. So-called fast - object posts, "object_post_as_copy = false" do not update the container - listings and therefore can't be detected for synchronization. - .. note:: If you are using the large objects feature you will need to ensure both @@ -386,13 +379,6 @@ from ``sync-containers``. cluster. Therefore, the container servers must be permitted to initiate outbound connections to the remote proxy servers (or load balancers). -.. note:: - - Container sync will sync object POSTs only if the proxy server is set to - use "object_post_as_copy = true" which is the default. So-called fast - object posts, "object_post_as_copy = false" do not update the container - listings and therefore can't be detected for synchronization. - The actual syncing is slightly more complicated to make use of the three (or number-of-replicas) main nodes for a container without each trying to do the exact same work but also without missing work if one node happens to diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample index 8267ce4ab6..a746329b55 100644 --- a/etc/proxy-server.conf-sample +++ b/etc/proxy-server.conf-sample @@ -131,8 +131,7 @@ use = egg:swift#proxy # # Set object_post_as_copy = false to turn on fast posts where only the metadata # changes are stored anew and the original data file is kept in place. This -# makes for quicker posts; but since the container metadata isn't updated in -# this mode, features like container sync won't be able to sync posts. +# makes for quicker posts. # object_post_as_copy = true # # If set to 'true' authorized accounts that do not yet exist within the Swift diff --git a/swift/container/sync.py b/swift/container/sync.py index 7bb37f9225..0b3b933ac7 100644 --- a/swift/container/sync.py +++ b/swift/container/sync.py @@ -100,13 +100,6 @@ class ContainerSync(Daemon): If they exist, newer rows since the last sync will trigger PUTs or DELETEs to the other container. - .. note:: - - Container sync will sync object POSTs only if the proxy server is set - to use "object_post_as_copy = true" which is the default. So-called - fast object posts, "object_post_as_copy = false" do not update the - container listings and therefore can't be detected for synchronization. - The actual syncing is slightly more complicated to make use of the three (or number-of-replicas) main nodes for a container without each trying to do the exact same work but also without missing work if one node happens to From 2afa3681cb9c8a74cbe23b1de8d4f2e1a06e8f3a Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Tue, 22 Mar 2016 11:51:11 +0000 Subject: [PATCH 052/141] Add .eggs/* to .gitignore After running: python setup.py build_sphinx there is a .eggs directory left in the repo root directory which is not currently ignored by git. Change-Id: Id15811f94046fd8bb22153425bf5cafe6c045453 --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 580518daac..c5e9f93b5d 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,7 @@ ChangeLog .coverage *.egg *.egg-info +.eggs/* .DS_Store .tox pycscope.* From eecb1f2d54b8737c784952072b7374ceb7647245 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 23 Mar 2016 06:16:13 +0000 Subject: [PATCH 053/141] Imported Translations from Zanata For more information about this automatic import see: https://wiki.openstack.org/wiki/Translations/Infrastructure Change-Id: Ia2c2819db372da46538d71a80888a4e27538bdcd --- swift/locale/it/LC_MESSAGES/swift.po | 26 +++- swift/locale/ja/LC_MESSAGES/swift.po | 13 +- swift/locale/pt_BR/LC_MESSAGES/swift.po | 166 +++++++++++++++++++++++- swift/locale/zh_CN/LC_MESSAGES/swift.po | 25 +++- 4 files changed, 210 insertions(+), 20 deletions(-) diff --git a/swift/locale/it/LC_MESSAGES/swift.po b/swift/locale/it/LC_MESSAGES/swift.po index a6932efb9a..e7f30cac7f 100644 --- a/swift/locale/it/LC_MESSAGES/swift.po +++ b/swift/locale/it/LC_MESSAGES/swift.po @@ -9,13 +9,13 @@ # Remo Mattei , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev244\n" +"Project-Id-Version: swift 2.6.1.dev254\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-22 03:44+0000\n" +"POT-Creation-Date: 2016-03-22 19:48+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-22 05:18+0000\n" +"PO-Revision-Date: 2016-03-22 05:31+0000\n" "Last-Translator: Remo Mattei \n" "Language: it\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" @@ -227,6 +227,14 @@ msgstr "Impossibile accedere al file %s." msgid "Can not load profile data from %s." msgstr "Impossibile caricare i dati del profilo da %s." +#, python-format +msgid "Cannot read %s (%s)" +msgstr "Non e' possibile leggere %s (%s)" + +#, python-format +msgid "Cannot write %s (%s)" +msgstr "Non e' possibile scriver %s (%s)" + #, python-format msgid "Client did not read from proxy within %ss" msgstr "Il client non ha eseguito la lettura dal proxy in %ss" @@ -440,7 +448,7 @@ msgstr "ERRORE Eccezione non gestita nella richiesta" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " -msgstr "ERRORE Errore __call__ con %(method)s %(path)s " +msgstr "ERRORE errore __call__ con %(method)s %(path)s " #, python-format msgid "" @@ -588,7 +596,7 @@ msgstr "" "%(frag_index)s" msgid "Error: An error occurred" -msgstr "Errore: si è verificato un errore" +msgstr "Errore: Si è verificato un errore" msgid "Error: missing config path argument" msgstr "Errore: Argomento path della configurazione mancante" @@ -598,7 +606,7 @@ msgid "Error: unable to locate %s" msgstr "Errore: impossibile individuare %s" msgid "Exception dumping recon cache" -msgstr "Eccezione durante il dump della cache di riconoscimento" +msgstr "Eccezione durante il dump della cache di recon" msgid "Exception in top-level account reaper loop" msgstr "Eccezione nel loop reaper dell'account di livello superiore" @@ -694,6 +702,10 @@ msgstr "" msgid "Killing long-running rsync: %s" msgstr "Chiusura rsync ad elaborazione prolungata: %s" +#, python-format +msgid "Loading JSON from %s failed (%s)" +msgstr "Caricamento JSON dal %s fallito (%s)" + msgid "Lockup detected.. killing live coros." msgstr "Blocco rilevato... chiusura dei coros attivi." @@ -926,7 +938,7 @@ msgstr "Esecuzione della replica TERMINATA" #, python-format msgid "Returning 497 because of blacklisting: %s" -msgstr "Viene restituito 497 a causa della blacklist: %s" +msgstr "Viene restituito il codice 497 a causa della blacklist: %s" #, python-format msgid "" diff --git a/swift/locale/ja/LC_MESSAGES/swift.po b/swift/locale/ja/LC_MESSAGES/swift.po index e49d3e63d6..1922a46b16 100644 --- a/swift/locale/ja/LC_MESSAGES/swift.po +++ b/swift/locale/ja/LC_MESSAGES/swift.po @@ -7,16 +7,17 @@ # Akihiro Motoki , 2015. #zanata # OpenStack Infra , 2015. #zanata # Tom Cocozzello , 2015. #zanata +# 笹原 昌美 , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev176\n" +"Project-Id-Version: swift 2.6.1.dev254\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-08 04:09+0000\n" +"POT-Creation-Date: 2016-03-22 19:48+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2015-09-26 09:26+0000\n" -"Last-Translator: Akihiro Motoki \n" +"PO-Revision-Date: 2016-03-23 02:20+0000\n" +"Last-Translator: 笹原 昌美 \n" "Language: ja\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" @@ -889,6 +890,10 @@ msgstr "" msgid "Timeout %(action)s to memcached: %(server)s" msgstr "memcached %(server)s に対する %(action)s がタイムアウトになりました" +#, python-format +msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" +msgstr "%(ip)s:%(port)s/%(device)s のタイムアウト例外" + #, python-format msgid "Trying to %(method)s %(path)s" msgstr "%(method)s %(path)s を試行中" diff --git a/swift/locale/pt_BR/LC_MESSAGES/swift.po b/swift/locale/pt_BR/LC_MESSAGES/swift.po index 0820c031bd..9cffa44b2a 100644 --- a/swift/locale/pt_BR/LC_MESSAGES/swift.po +++ b/swift/locale/pt_BR/LC_MESSAGES/swift.po @@ -9,16 +9,17 @@ # Volmar Oliveira Junior , 2014 # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata +# Carlos Marques , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev176\n" +"Project-Id-Version: swift 2.6.1.dev254\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-08 04:09+0000\n" +"POT-Creation-Date: 2016-03-22 19:48+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2015-08-11 11:22+0000\n" -"Last-Translator: openstackjenkins \n" +"PO-Revision-Date: 2016-03-22 02:07+0000\n" +"Last-Translator: Carlos Marques \n" "Language: pt-BR\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" @@ -55,6 +56,16 @@ msgstr "%(ip)s/%(device)s respondeu como desmontado" msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" +#, python-format +msgid "" +"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of %(device)d/" +"%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in %(time).2fs " +"(%(rate).2f/sec, %(remaining)s remaining)" +msgstr "" +"%(reconstructed)d/%(total)d (%(percentage).2f%%) partições de %(device)d/" +"%(dtotal)d (%(dpercentage).2f%%) dispositivos reconstruídos em %(time).2fs " +"(%(rate).2f/sec, %(remaining)s restantes)" + #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " @@ -91,6 +102,10 @@ msgstr "%s não existe" msgid "%s is not mounted" msgstr "%s não está montado" +#, python-format +msgid "%s responded as unmounted" +msgstr "%s respondeu como não montado" + #, python-format msgid "%s running (%s - %s)" msgstr "%s em execução (%s - %s)" @@ -212,6 +227,14 @@ msgstr "Não é possível acessar o arquivo %s." msgid "Can not load profile data from %s." msgstr "Não é possível carregar dados do perfil a partir de %s." +#, python-format +msgid "Cannot read %s (%s)" +msgstr "Não é possível ler %s (%s)" + +#, python-format +msgid "Cannot write %s (%s)" +msgstr "Não é possível gravar %s (%s)" + #, python-format msgid "Client did not read from proxy within %ss" msgstr "O cliente não leu no proxy dentro de %ss" @@ -222,6 +245,9 @@ msgstr "Cliente desconectado durante leitura" msgid "Client disconnected without sending enough data" msgstr "Cliente desconecatdo sem ter enviado dados suficientes" +msgid "Client disconnected without sending last chunk" +msgstr "Cliente desconectado sem ter enviado o último chunk" + #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" @@ -229,6 +255,13 @@ msgstr "" "Caminho do cliente %(client)s não corresponde ao caminho armazenado nos " "metadados do objeto %(meta)s" +msgid "" +"Configuration option internal_client_conf_path not defined. Using default " +"configuration, See internal-client.conf-sample for options" +msgstr "" +"Opção de configuração internal_client_conf_path não definida. Usando a " +"configuração padrão. Consulte internal-client.conf-sample para obter opções" + msgid "Connection refused" msgstr "Conexão recusada" @@ -288,6 +321,10 @@ msgstr "Erro ao fazer download de dados: %s" msgid "Devices pass completed: %.02fs" msgstr "Dispositivos finalizados: %.02fs" +#, python-format +msgid "Directory %r does not map to a valid policy (%s)" +msgstr "O diretório %r não está mapeado para uma política válida (%s)" + #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "ERRO %(db_file)s: %(validate_sync_to_err)s" @@ -363,6 +400,10 @@ msgstr "ERROR DiskFile %(data_file)s falha ao fechar: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "ERRO Exceção causando clientes a desconectar" +#, python-format +msgid "ERROR Exception transferring data to object servers %s" +msgstr "ERRO Exceção ao transferir dados para os servidores de objeto %s" + msgid "ERROR Failed to get my own IPs?" msgstr "ERRO Falha ao pegar meu próprio IPs?" @@ -536,6 +577,11 @@ msgstr "Erro ao sincronizar partição" msgid "Error syncing with node: %s" msgstr "Erro ao sincronizar com o nó: %s" +#, python-format +msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" +msgstr "" +"Erro ao tentar reconstruir %(path)s policy#%(policy)d frag#%(frag_index)s" + msgid "Error: An error occurred" msgstr "Erro: Ocorreu um erro" @@ -555,6 +601,9 @@ msgstr "Exceção no loop do removedor da conta de nível superior" msgid "Exception in top-level replication loop" msgstr "Exceção no loop de replicação de nível superior" +msgid "Exception in top-levelreconstruction loop" +msgstr "Exceção no loop de reconstrução de nível superior" + #, python-format msgid "Exception while deleting container %s %s" msgstr "Exceção ao excluir contêiner %s %s" @@ -592,6 +641,13 @@ msgstr "Cadeia CNAME a seguir para %(given_domain)s para%(found_domain)s" msgid "Found configs:" msgstr "Localizados arquivos de configuração:" +msgid "" +"Handoffs first mode still has handoffs remaining. Aborting current " +"replication pass." +msgstr "" +"O primeiro modo de handoffs ainda possui handoffs. Interrompendo a aprovação " +"da replicação atual." + msgid "Host unreachable" msgstr "Destino inalcançável" @@ -611,6 +667,10 @@ msgstr "Host inválido %r em X-Container-Sync-To" msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "Entrada pendente inválida %(file)s: %(entry)s" +#, python-format +msgid "Invalid response %(resp)s from %(full_path)s" +msgstr "Resposta inválida %(resp)s a partir de %(full_path)s" + #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "Resposta inválida %(resp)s a partir de %(ip)s" @@ -627,6 +687,10 @@ msgstr "" msgid "Killing long-running rsync: %s" msgstr "Eliminando a ressincronização de longa execução: %s" +#, python-format +msgid "Loading JSON from %s failed (%s)" +msgstr "Falha ao carregar JSON a partir do %s (%s)" + msgid "Lockup detected.. killing live coros." msgstr "Bloqueio detectado... eliminando núcleos em tempo real." @@ -646,10 +710,18 @@ msgstr "Nenhum terminal de cluster para %r %r" msgid "No permission to signal PID %d" msgstr "Nenhuma permissão para PID do sinal %d" +#, python-format +msgid "No policy with index %s" +msgstr "Nenhuma política com índice %s" + #, python-format msgid "No realm key for %r" msgstr "Nenhuma chave do domínio para %r" +#, python-format +msgid "No space left on device for %s (%s)" +msgstr "Nenhum espaço deixado no dispositivo para %s (%s)" + #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Erro de nó limitado %(ip)s:%(port)s (%(device)s)" @@ -667,6 +739,10 @@ msgstr "" "Não localizado %(sync_from)r => %(sync_to)r – objeto " "%(obj_name)r" +#, python-format +msgid "Nothing reconstructed for %s seconds." +msgstr "Nada foi reconstruído durante %s segundos." + #, python-format msgid "Nothing replicated for %s seconds." msgstr "Nada foi replicado para %s segundos." @@ -699,10 +775,30 @@ msgstr "" "Total de arquivos/seg: %(frate).2f, Total de bytes/seg: %(brate).2f, Tempo " "de auditoria: %(audit).2f, Taxa: %(audit_rate).2f" +#, python-format +msgid "" +"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " +"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " +"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " +"%(audit_rate).2f" +msgstr "" +"Auditoria de objeto (%(type)s). Desde %(start_time)s: Localmente: %(passes)d " +"aprovado, %(quars)d em quarentena, %(errors)d erros, arquivos/s: " +"%(frate).2f, bytes/seg: %(brate).2f, Tempo total: %(total).2f, Tempo de " +"auditoria: %(audit).2f, Taxa: %(audit_rate).2f" + #, python-format msgid "Object audit stats: %s" msgstr "Estatísticas de auditoria do objeto: %s" +#, python-format +msgid "Object reconstruction complete (once). (%.02f minutes)" +msgstr "Reconstrução do objeto concluída (única). (%.02f minutos)" + +#, python-format +msgid "Object reconstruction complete. (%.02f minutes)" +msgstr "Reconstrução do objeto concluída. (%.02f minutos)" + #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "Replicação completa do objeto (única). (%.02f minutos)" @@ -763,6 +859,14 @@ msgstr "Caminho necessário em X-Container-Sync-To" msgid "Problem cleaning up %s" msgstr "Problema ao limpar %s" +#, python-format +msgid "Problem cleaning up %s (%s)" +msgstr "Problema ao limpar %s (%s)" + +#, python-format +msgid "Problem writing durable state file %s (%s)" +msgstr "Problema ao gravar arquivo de estado durável %s (%s)" + #, python-format msgid "Profiling Error: %s" msgstr "Erro da Criação de Perfil: %s" @@ -805,6 +909,14 @@ msgstr "Removendo %s objetos" msgid "Removing partition: %s" msgstr "Removendo partição: %s" +#, python-format +msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" +msgstr "Removendo arquivo pid %(pid_file)s com pid errado %(pid)d" + +#, python-format +msgid "Removing pid file %s with invalid pid" +msgstr "Removendo o arquivo pid %s com pid inválido" + #, python-format msgid "Removing stale pid file %s" msgstr "Removendo o arquivo pid %s antigo" @@ -824,6 +936,10 @@ msgstr "" "Retornando 498 para %(meth)s para %(acc)s/%(cont)s/%(obj)s. Limite de taxa " "(Suspensão Máxima) %(e)s" +msgid "Ring change detected. Aborting current reconstruction pass." +msgstr "" +"Mudança no anel detectada. Interrompendo a aprovação da recosntrução atual." + msgid "Ring change detected. Aborting current replication pass." msgstr "" "Alteração do anel detectada. Interrompendo a aprovação da replicação atual." @@ -832,6 +948,9 @@ msgstr "" msgid "Running %s once" msgstr "Executando %s uma vez," +msgid "Running object reconstructor in script mode." +msgstr "Executando o reconstrutor do objeto no modo de script." + msgid "Running object replicator in script mode." msgstr "Executando replicador do objeto no modo de script." @@ -875,6 +994,12 @@ msgstr "Pulando %s porque não está montado" msgid "Starting %s" msgstr "Iniciando %s" +msgid "Starting object reconstruction pass." +msgstr "Iniciando a aprovação da reconstrução de objeto." + +msgid "Starting object reconstructor in daemon mode." +msgstr "Iniciando o reconstrutor do objeto no modo daemon." + msgid "Starting object replication pass." msgstr "Iniciando a aprovação da replicação de objeto." @@ -900,10 +1025,22 @@ msgstr "" msgid "Timeout %(action)s to memcached: %(server)s" msgstr "Tempo limite %(action)s para memcached: %(server)s" +#, python-format +msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" +msgstr "Exceção de tempo limite com %(ip)s:%(port)s/%(device)s" + #, python-format msgid "Trying to %(method)s %(path)s" msgstr "Tentando %(method)s %(path)s" +#, python-format +msgid "Trying to GET %(full_path)s" +msgstr "Tentando GET %(full_path)s" + +#, python-format +msgid "Trying to get %s status of PUT to %s" +msgstr "Tentando obter o status %s do PUT para o %s" + #, python-format msgid "Trying to get final status of PUT to %s" msgstr "Tentando obter o status final do PUT para o %s" @@ -917,6 +1054,10 @@ msgstr "Tentando ler durante GET (tentando novamente)" msgid "Trying to send to client" msgstr "Tentando enviar para o cliente" +#, python-format +msgid "Trying to sync suffixes with %s" +msgstr "Tentando sincronizar sufixos com %s" + #, python-format msgid "Trying to write to %s" msgstr "Tentando escrever para %s" @@ -928,6 +1069,11 @@ msgstr "EXCEÇÃO NÃO CAPTURADA" msgid "Unable to find %s config section in %s" msgstr "Não é possível localizar %s da seção de configuração em %s" +#, python-format +msgid "Unable to load internal client from config: %r (%s)" +msgstr "" +"Não é possível carregar cliente interno a partir da configuração: %r (%s)" + #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "Não é possível localizar %s em libc. Saindo como um não operacional." @@ -936,6 +1082,10 @@ msgstr "Não é possível localizar %s em libc. Saindo como um não operacional. msgid "Unable to locate config for %s" msgstr "Não é possível localizar configuração para %s" +#, python-format +msgid "Unable to locate config number %s for %s" +msgstr "Não é possível localizar o número de configuração %s para %s" + msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" @@ -961,6 +1111,10 @@ msgstr "Resposta inesperada: %s" msgid "Unhandled exception" msgstr "Exceção não-tratada" +#, python-format +msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" +msgstr "Exceção inesperada ao tentar GET: %(account)r %(container)r %(object)r" + #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "Atualize o relatório com falha para %(container)s %(dbfile)s" @@ -994,6 +1148,10 @@ msgstr "" msgid "Waited %s seconds for %s to die; giving up" msgstr "Esperou %s segundos para %s eliminar; desistindo" +#, python-format +msgid "Waited %s seconds for %s to die; killing" +msgstr "Esperou %s segundos para %s eliminar; eliminando" + msgid "Warning: Cannot ratelimit without a memcached client" msgstr "Aviso: Não é possível um limite de taxa sem um cliente memcached" diff --git a/swift/locale/zh_CN/LC_MESSAGES/swift.po b/swift/locale/zh_CN/LC_MESSAGES/swift.po index 7684fa92e2..2a6097fd27 100644 --- a/swift/locale/zh_CN/LC_MESSAGES/swift.po +++ b/swift/locale/zh_CN/LC_MESSAGES/swift.po @@ -6,18 +6,17 @@ # Pearl Yajing Tan(Seagate Tech) , 2014 # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata -# Andreas Jaeger , 2016. #zanata # Linda , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev205\n" +"Project-Id-Version: swift 2.6.1.dev254\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-12 17:33+0000\n" +"POT-Creation-Date: 2016-03-22 19:48+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-13 07:28+0000\n" -"Last-Translator: Andreas Jaeger \n" +"PO-Revision-Date: 2016-03-22 10:30+0000\n" +"Last-Translator: Linda \n" "Language: zh-CN\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" @@ -222,6 +221,14 @@ msgstr "无法访问文件%s" msgid "Can not load profile data from %s." msgstr "无法从%s下载分析数据" +#, python-format +msgid "Cannot read %s (%s)" +msgstr "无法读取 %s (%s)" + +#, python-format +msgid "Cannot write %s (%s)" +msgstr "无法写入 %s (%s)" + #, python-format msgid "Client did not read from proxy within %ss" msgstr "客户尚未从代理处读取%ss" @@ -655,6 +662,10 @@ msgstr "" msgid "Killing long-running rsync: %s" msgstr "终止long-running同步: %s" +#, python-format +msgid "Loading JSON from %s failed (%s)" +msgstr "从 %s 读取 JSON 失败 (%s)" + msgid "Lockup detected.. killing live coros." msgstr "检测到lockup。终止正在执行的coros" @@ -860,6 +871,10 @@ msgstr "正在移除 %s 个对象" msgid "Removing partition: %s" msgstr "移除分区:%s" +#, python-format +msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" +msgstr "移除 pid 文件 %(pid_file)s 失败,pid %(pid)d 不正确" + #, python-format msgid "Removing pid file %s with invalid pid" msgstr "正在移除带有无效 pid 的 pid 文件 %s" From 125d18e0ffac572749da58fad1591d5099b5cda7 Mon Sep 17 00:00:00 2001 From: OSHRITF Date: Wed, 20 Jan 2016 15:55:30 +0200 Subject: [PATCH 054/141] Container-Sync to perform HEAD before PUT object on remote This change adds a remote HEAD object request before each call to sync_row. Currently, container-sync-row attempts to replicate the object (using PUT) regardless of the existance of the object on the remote side, thus causing each object to be transferred on the wire several times (depending on the replication factor) An alternative to HEAD is to do a conditional PUT (using, 100-continue). However, this change is more involved and requires upgrade of both the client and server side clusters to work. In the Tokyo design summit it was decided to start with the HEAD approach. Change-Id: I60d982dd2cc79a0f13b0924507cd03d7f9c9d70b Closes-Bug: #1277223 --- swift/common/internal_client.py | 11 +- swift/container/sync.py | 107 ++++++++++++---- test/probe/test_container_sync.py | 20 +++ test/unit/common/test_internal_client.py | 149 +++++++++++++---------- test/unit/container/test_sync.py | 129 ++++++++++++++++++++ 5 files changed, 326 insertions(+), 90 deletions(-) diff --git a/swift/common/internal_client.py b/swift/common/internal_client.py index 2413d9ad6d..d9911326f1 100644 --- a/swift/common/internal_client.py +++ b/swift/common/internal_client.py @@ -769,6 +769,7 @@ class SimpleClient(object): req.get_method = lambda: method conn = urllib2.urlopen(req, timeout=timeout) body = conn.read() + info = conn.info() try: body_data = json.loads(body) except ValueError: @@ -792,13 +793,13 @@ class SimpleClient(object): url, conn.getcode(), sent_content_length, - conn.info()['content-length'], + info['content-length'], trans_start, trans_stop, trans_stop - trans_start, additional_info ))) - return [None, body_data] + return [info, body_data] def retry_request(self, method, **kwargs): retries = kwargs.pop('retries', self.retries) @@ -837,6 +838,12 @@ class SimpleClient(object): contents=contents.read(), **kwargs) +def head_object(url, **kwargs): + """For usage with container sync """ + client = SimpleClient(url=url) + return client.retry_request('HEAD', **kwargs) + + def put_object(url, **kwargs): """For usage with container sync """ client = SimpleClient(url=url) diff --git a/swift/container/sync.py b/swift/container/sync.py index 2ade27a5a1..c2fd863eec 100644 --- a/swift/container/sync.py +++ b/swift/container/sync.py @@ -29,7 +29,8 @@ from swift.container.backend import ContainerBroker from swift.container.sync_store import ContainerSyncStore from swift.common.container_sync_realms import ContainerSyncRealms from swift.common.internal_client import ( - delete_object, put_object, InternalClient, UnexpectedResponse) + delete_object, put_object, head_object, + InternalClient, UnexpectedResponse) from swift.common.exceptions import ClientException from swift.common.ring import Ring from swift.common.ring.utils import is_local_device @@ -396,10 +397,84 @@ class ContainerSync(Daemon): self.logger.exception(_('ERROR Syncing %s'), broker if broker else path) + def _update_sync_to_headers(self, name, sync_to, user_key, + realm, realm_key, method, headers): + """ + Updates container sync headers + + :param name: The name of the object + :param sync_to: The URL to the remote container. + :param user_key: The X-Container-Sync-Key to use when sending requests + to the other container. + :param realm: The realm from self.realms_conf, if there is one. + If None, fallback to using the older allowed_sync_hosts + way of syncing. + :param realm_key: The realm key from self.realms_conf, if there + is one. If None, fallback to using the older + allowed_sync_hosts way of syncing. + :param method: HTTP method to create sig with + :param headers: headers to update with container sync headers + """ + if realm and realm_key: + nonce = uuid.uuid4().hex + path = urlparse(sync_to).path + '/' + quote(name) + sig = self.realms_conf.get_sig(method, path, + headers.get('x-timestamp', 0), + nonce, realm_key, + user_key) + headers['x-container-sync-auth'] = '%s %s %s' % (realm, + nonce, + sig) + else: + headers['x-container-sync-key'] = user_key + + def _object_in_remote_container(self, name, sync_to, user_key, + realm, realm_key, timestamp): + """ + Performs head object on remote to eliminate extra remote put and + local get object calls + + :param name: The name of the object in the updated row in the local + database triggering the sync update. + :param sync_to: The URL to the remote container. + :param user_key: The X-Container-Sync-Key to use when sending requests + to the other container. + :param realm: The realm from self.realms_conf, if there is one. + If None, fallback to using the older allowed_sync_hosts + way of syncing. + :param realm_key: The realm key from self.realms_conf, if there + is one. If None, fallback to using the older + allowed_sync_hosts way of syncing. + :param timestamp: last modified date of local object + :returns: True if object already exists in remote + """ + headers = {'x-timestamp': timestamp.internal} + self._update_sync_to_headers(name, sync_to, user_key, realm, + realm_key, 'HEAD', headers) + try: + metadata, _ = head_object(sync_to, name=name, + headers=headers, + proxy=self.select_http_proxy(), + logger=self.logger, + retries=0) + remote_ts = Timestamp(metadata.get('x-timestamp', 0)) + self.logger.debug("remote obj timestamp %s local obj %s" % + (timestamp.internal, remote_ts.internal)) + if timestamp <= remote_ts: + return True + # Object in remote should be updated + return False + except ClientException as http_err: + # Object not in remote + if http_err.http_status == 404: + return False + raise http_err + def container_sync_row(self, row, sync_to, user_key, broker, info, realm, realm_key): """ Sends the update the row indicates to the sync_to container. + Update can be either delete or put. :param row: The updated row in the local database triggering the sync update. @@ -427,17 +502,9 @@ class ContainerSync(Daemon): # timestamp of the source tombstone try: headers = {'x-timestamp': ts_data.internal} - if realm and realm_key: - nonce = uuid.uuid4().hex - path = urlparse(sync_to).path + '/' + quote( - row['name']) - sig = self.realms_conf.get_sig( - 'DELETE', path, headers['x-timestamp'], nonce, - realm_key, user_key) - headers['x-container-sync-auth'] = '%s %s %s' % ( - realm, nonce, sig) - else: - headers['x-container-sync-key'] = user_key + self._update_sync_to_headers(row['name'], sync_to, + user_key, realm, realm_key, + 'DELETE', headers) delete_object(sync_to, name=row['name'], headers=headers, proxy=self.select_http_proxy(), logger=self.logger, @@ -451,6 +518,10 @@ class ContainerSync(Daemon): else: # when sync'ing a live object, use ts_meta - this is the time # at which the source object was last modified by a PUT or POST + if self._object_in_remote_container(row['name'], + sync_to, user_key, realm, + realm_key, ts_meta): + return True exc = None # look up for the newest one headers_out = {'X-Newest': True, @@ -485,16 +556,8 @@ class ContainerSync(Daemon): if 'content-type' in headers: headers['content-type'] = clean_content_type( headers['content-type']) - if realm and realm_key: - nonce = uuid.uuid4().hex - path = urlparse(sync_to).path + '/' + quote(row['name']) - sig = self.realms_conf.get_sig( - 'PUT', path, headers['x-timestamp'], nonce, realm_key, - user_key) - headers['x-container-sync-auth'] = '%s %s %s' % ( - realm, nonce, sig) - else: - headers['x-container-sync-key'] = user_key + self._update_sync_to_headers(row['name'], sync_to, user_key, + realm, realm_key, 'PUT', headers) put_object(sync_to, name=row['name'], headers=headers, contents=FileLikeIter(body), proxy=self.select_http_proxy(), logger=self.logger, diff --git a/test/probe/test_container_sync.py b/test/probe/test_container_sync.py index 7282cfd50a..ea98e32383 100644 --- a/test/probe/test_container_sync.py +++ b/test/probe/test_container_sync.py @@ -266,6 +266,26 @@ class TestContainerSync(ReplProbeTest): % item) for item in mismatched_headers]) self.fail(msg) + def test_sync_newer_remote(self): + source_container, dest_container = self._setup_synced_containers() + + # upload to source + object_name = 'object-%s' % uuid.uuid4() + client.put_object(self.url, self.token, source_container, object_name, + 'old-source-body') + + # upload to dest with same name + client.put_object(self.url, self.token, dest_container, object_name, + 'new-test-body') + + # cycle container-sync + Manager(['container-sync']).once() + + # verify that the remote object did not change + resp_headers, body = client.get_object(self.url, self.token, + dest_container, object_name) + self.assertEqual(body, 'new-test-body') + if __name__ == "__main__": unittest.main() diff --git a/test/unit/common/test_internal_client.py b/test/unit/common/test_internal_client.py index 834206e55b..68dbc3e18d 100644 --- a/test/unit/common/test_internal_client.py +++ b/test/unit/common/test_internal_client.py @@ -343,6 +343,9 @@ class TestInternalClient(unittest.TestCase): def read(self): return json.dumps(body) + def info(self): + return {} + for timeout in (0.0, 42.0, None): mocked_func = 'swift.common.internal_client.urllib2.urlopen' with mock.patch(mocked_func) as mock_urlopen: @@ -1181,76 +1184,84 @@ class TestGetAuth(unittest.TestCase): 'http://127.0.0.1', 'user', 'key', auth_version=2.0) -mock_time_value = 1401224049.98 - - -def mock_time(): - global mock_time_value - mock_time_value += 1 - return mock_time_value - - class TestSimpleClient(unittest.TestCase): + def _test_get_head(self, request, urlopen, method): + + mock_time_value = [1401224049.98] + + def mock_time(): + # global mock_time_value + mock_time_value[0] += 1 + return mock_time_value[0] + + with mock.patch('swift.common.internal_client.time', mock_time): + # basic request, only url as kwarg + request.return_value.get_type.return_value = "http" + urlopen.return_value.read.return_value = '' + urlopen.return_value.getcode.return_value = 200 + urlopen.return_value.info.return_value = {'content-length': '345'} + sc = internal_client.SimpleClient(url='http://127.0.0.1') + logger = FakeLogger() + retval = sc.retry_request( + method, headers={'content-length': '123'}, logger=logger) + self.assertEqual(urlopen.call_count, 1) + request.assert_called_with('http://127.0.0.1?format=json', + headers={'content-length': '123'}, + data=None) + self.assertEqual([{'content-length': '345'}, None], retval) + self.assertEqual(method, request.return_value.get_method()) + self.assertEqual(logger.log_dict['debug'], [( + ('-> 2014-05-27T20:54:11 ' + method + + ' http://127.0.0.1%3Fformat%3Djson 200 ' + '123 345 1401224050.98 1401224051.98 1.0 -',), {})]) + + # Check if JSON is decoded + urlopen.return_value.read.return_value = '{}' + retval = sc.retry_request(method) + self.assertEqual([{'content-length': '345'}, {}], retval) + + # same as above, now with token + sc = internal_client.SimpleClient(url='http://127.0.0.1', + token='token') + retval = sc.retry_request(method) + request.assert_called_with('http://127.0.0.1?format=json', + headers={'X-Auth-Token': 'token'}, + data=None) + self.assertEqual([{'content-length': '345'}, {}], retval) + + # same as above, now with prefix + sc = internal_client.SimpleClient(url='http://127.0.0.1', + token='token') + retval = sc.retry_request(method, prefix="pre_") + request.assert_called_with( + 'http://127.0.0.1?format=json&prefix=pre_', + headers={'X-Auth-Token': 'token'}, data=None) + self.assertEqual([{'content-length': '345'}, {}], retval) + + # same as above, now with container name + retval = sc.retry_request(method, container='cont') + request.assert_called_with('http://127.0.0.1/cont?format=json', + headers={'X-Auth-Token': 'token'}, + data=None) + self.assertEqual([{'content-length': '345'}, {}], retval) + + # same as above, now with object name + retval = sc.retry_request(method, container='cont', name='obj') + request.assert_called_with('http://127.0.0.1/cont/obj', + headers={'X-Auth-Token': 'token'}, + data=None) + self.assertEqual([{'content-length': '345'}, {}], retval) + @mock.patch('eventlet.green.urllib2.urlopen') @mock.patch('eventlet.green.urllib2.Request') - @mock.patch('swift.common.internal_client.time', mock_time) def test_get(self, request, urlopen): - # basic GET request, only url as kwarg - request.return_value.get_type.return_value = "http" - urlopen.return_value.read.return_value = '' - urlopen.return_value.getcode.return_value = 200 - urlopen.return_value.info.return_value = {'content-length': '345'} - sc = internal_client.SimpleClient(url='http://127.0.0.1') - logger = FakeLogger() - retval = sc.retry_request( - 'GET', headers={'content-length': '123'}, logger=logger) - self.assertEqual(urlopen.call_count, 1) - request.assert_called_with('http://127.0.0.1?format=json', - headers={'content-length': '123'}, - data=None) - self.assertEqual([None, None], retval) - self.assertEqual('GET', request.return_value.get_method()) - self.assertEqual(logger.log_dict['debug'], [( - ('-> 2014-05-27T20:54:11 GET http://127.0.0.1%3Fformat%3Djson 200 ' - '123 345 1401224050.98 1401224051.98 1.0 -',), {})]) + self._test_get_head(request, urlopen, 'GET') - # Check if JSON is decoded - urlopen.return_value.read.return_value = '{}' - retval = sc.retry_request('GET') - self.assertEqual([None, {}], retval) - - # same as above, now with token - sc = internal_client.SimpleClient(url='http://127.0.0.1', - token='token') - retval = sc.retry_request('GET') - request.assert_called_with('http://127.0.0.1?format=json', - headers={'X-Auth-Token': 'token'}, - data=None) - self.assertEqual([None, {}], retval) - - # same as above, now with prefix - sc = internal_client.SimpleClient(url='http://127.0.0.1', - token='token') - retval = sc.retry_request('GET', prefix="pre_") - request.assert_called_with('http://127.0.0.1?format=json&prefix=pre_', - headers={'X-Auth-Token': 'token'}, - data=None) - self.assertEqual([None, {}], retval) - - # same as above, now with container name - retval = sc.retry_request('GET', container='cont') - request.assert_called_with('http://127.0.0.1/cont?format=json', - headers={'X-Auth-Token': 'token'}, - data=None) - self.assertEqual([None, {}], retval) - - # same as above, now with object name - retval = sc.retry_request('GET', container='cont', name='obj') - request.assert_called_with('http://127.0.0.1/cont/obj', - headers={'X-Auth-Token': 'token'}, - data=None) - self.assertEqual([None, {}], retval) + @mock.patch('eventlet.green.urllib2.urlopen') + @mock.patch('eventlet.green.urllib2.Request') + def test_head(self, request, urlopen): + self._test_get_head(request, urlopen, 'HEAD') @mock.patch('eventlet.green.urllib2.urlopen') @mock.patch('eventlet.green.urllib2.Request') @@ -1272,6 +1283,7 @@ class TestSimpleClient(unittest.TestCase): request.return_value.get_type.return_value = "http" mock_resp = mock.MagicMock() mock_resp.read.return_value = '' + mock_resp.info.return_value = {} urlopen.side_effect = [urllib2.URLError(''), mock_resp] sc = internal_client.SimpleClient(url='http://127.0.0.1', retries=1, token='token') @@ -1283,13 +1295,14 @@ class TestSimpleClient(unittest.TestCase): self.assertEqual(urlopen.call_count, 2) request.assert_called_with('http://127.0.0.1?format=json', data=None, headers={'X-Auth-Token': 'token'}) - self.assertEqual([None, None], retval) + self.assertEqual([{}, None], retval) self.assertEqual(sc.attempts, 2) @mock.patch('eventlet.green.urllib2.urlopen') def test_get_with_retries_param(self, mock_urlopen): mock_response = mock.MagicMock() mock_response.read.return_value = '' + mock_response.info.return_value = {} mock_urlopen.side_effect = internal_client.httplib.BadStatusLine('') c = internal_client.SimpleClient(url='http://127.0.0.1', token='token') self.assertEqual(c.retries, 5) @@ -1315,7 +1328,7 @@ class TestSimpleClient(unittest.TestCase): retval = c.retry_request('GET', retries=1) self.assertEqual(mock_sleep.call_count, 1) self.assertEqual(mock_urlopen.call_count, 2) - self.assertEqual([None, None], retval) + self.assertEqual([{}, None], retval) @mock.patch('eventlet.green.urllib2.urlopen') def test_request_with_retries_with_HTTPError(self, mock_urlopen): @@ -1380,9 +1393,13 @@ class TestSimpleClient(unittest.TestCase): url = 'https://127.0.0.1:1/a' class FakeConn(object): + def read(self): return 'irrelevant' + def info(self): + return {} + mocked = 'swift.common.internal_client.urllib2.urlopen' # module level methods diff --git a/test/unit/container/test_sync.py b/test/unit/container/test_sync.py index 9cb56d2d05..ef4a4f5a82 100644 --- a/test/unit/container/test_sync.py +++ b/test/unit/container/test_sync.py @@ -855,6 +855,8 @@ class TestContainerSync(unittest.TestCase): def _test_container_sync_row_put(self, realm, realm_key): orig_uuid = sync.uuid orig_put_object = sync.put_object + orig_head_object = sync.head_object + try: class FakeUUID(object): class uuid4(object): @@ -891,6 +893,7 @@ class TestContainerSync(unittest.TestCase): sync.put_object = fake_put_object expected_put_count = 0 + excepted_failure_count = 0 with mock.patch('swift.container.sync.InternalClient'): cs = sync.ContainerSync({}, container_ring=FakeRing(), @@ -911,6 +914,14 @@ class TestContainerSync(unittest.TestCase): # Success as everything says it worked. # simulate a row with data at 1.1 and later ctype, meta times created_at = ts_data.internal + '+1388+1388' # last modified = 1.2 + + def fake_object_in_rcontainer(row, sync_to, user_key, + broker, realm, realm_key): + return False + + orig_object_in_rcontainer = cs._object_in_remote_container + cs._object_in_remote_container = fake_object_in_rcontainer + self.assertTrue(cs.container_sync_row( {'deleted': False, 'name': 'object', @@ -935,6 +946,7 @@ class TestContainerSync(unittest.TestCase): iter('contents')) cs.swift.get_object = fake_get_object + # Success as everything says it worked, also checks 'date' and # 'last-modified' headers are removed and that 'etag' header is # stripped of double quotes. @@ -980,6 +992,7 @@ class TestContainerSync(unittest.TestCase): {'account': 'a', 'container': 'c', 'storage_policy_index': 0}, realm, realm_key)) self.assertEqual(cs.container_puts, expected_put_count) + excepted_failure_count += 1 self.assertEqual(len(exc), 1) self.assertEqual(str(exc[-1]), 'test exception') @@ -1003,6 +1016,7 @@ class TestContainerSync(unittest.TestCase): {'account': 'a', 'container': 'c', 'storage_policy_index': 0}, realm, realm_key)) self.assertEqual(cs.container_puts, expected_put_count) + excepted_failure_count += 1 self.assertEqual(len(exc), 1) self.assertEqual(str(exc[-1]), 'test client exception') @@ -1029,6 +1043,8 @@ class TestContainerSync(unittest.TestCase): {'account': 'a', 'container': 'c', 'storage_policy_index': 0}, realm, realm_key)) self.assertEqual(cs.container_puts, expected_put_count) + excepted_failure_count += 1 + self.assertEqual(cs.container_failures, excepted_failure_count) self.assertLogMessage('info', 'Unauth') def fake_put_object(*args, **kwargs): @@ -1044,6 +1060,8 @@ class TestContainerSync(unittest.TestCase): {'account': 'a', 'container': 'c', 'storage_policy_index': 0}, realm, realm_key)) self.assertEqual(cs.container_puts, expected_put_count) + excepted_failure_count += 1 + self.assertEqual(cs.container_failures, excepted_failure_count) self.assertLogMessage('info', 'Not found', 1) def fake_put_object(*args, **kwargs): @@ -1059,10 +1077,121 @@ class TestContainerSync(unittest.TestCase): {'account': 'a', 'container': 'c', 'storage_policy_index': 0}, realm, realm_key)) self.assertEqual(cs.container_puts, expected_put_count) + excepted_failure_count += 1 + self.assertEqual(cs.container_failures, excepted_failure_count) self.assertLogMessage('error', 'ERROR Syncing') + + # Test the following cases: + # remote has the same date and a put doesn't take place + # remote has more up to date copy and a put doesn't take place + # head_object returns ClientException(404) and a put takes place + # head_object returns other ClientException put doesn't take place + # and we get failure + # head_object returns other Exception put does not take place + # and we get failure + # remote returns old copy and a put takes place + test_row = {'deleted': False, + 'name': 'object', + 'created_at': timestamp.internal, + 'etag': '1111'} + test_info = {'account': 'a', + 'container': 'c', + 'storage_policy_index': 0} + + actual_puts = [] + + def fake_put_object(*args, **kwargs): + actual_puts.append((args, kwargs)) + + def fake_head_object(*args, **kwargs): + return ({'x-timestamp': '1.2'}, '') + + sync.put_object = fake_put_object + sync.head_object = fake_head_object + cs._object_in_remote_container = orig_object_in_rcontainer + self.assertTrue(cs.container_sync_row( + test_row, 'http://sync/to/path', + 'key', FakeContainerBroker('broker'), + test_info, + realm, realm_key)) + # No additional put has taken place + self.assertEqual(len(actual_puts), 0) + # No additional errors + self.assertEqual(cs.container_failures, excepted_failure_count) + + def fake_head_object(*args, **kwargs): + return ({'x-timestamp': '1.3'}, '') + + sync.head_object = fake_head_object + self.assertTrue(cs.container_sync_row( + test_row, 'http://sync/to/path', + 'key', FakeContainerBroker('broker'), + test_info, + realm, realm_key)) + # No additional put has taken place + self.assertEqual(len(actual_puts), 0) + # No additional errors + self.assertEqual(cs.container_failures, excepted_failure_count) + + actual_puts = [] + + def fake_head_object(*args, **kwargs): + raise ClientException('test client exception', http_status=404) + + sync.head_object = fake_head_object + self.assertTrue(cs.container_sync_row( + test_row, 'http://sync/to/path', + 'key', FakeContainerBroker('broker'), + test_info, realm, realm_key)) + # Additional put has taken place + self.assertEqual(len(actual_puts), 1) + # No additional errors + self.assertEqual(cs.container_failures, excepted_failure_count) + + def fake_head_object(*args, **kwargs): + raise ClientException('test client exception', http_status=401) + + sync.head_object = fake_head_object + self.assertFalse(cs.container_sync_row( + test_row, 'http://sync/to/path', + 'key', FakeContainerBroker('broker'), + test_info, realm, realm_key)) + # No additional put has taken place, failures increased + self.assertEqual(len(actual_puts), 1) + excepted_failure_count += 1 + self.assertEqual(cs.container_failures, excepted_failure_count) + + def fake_head_object(*args, **kwargs): + raise Exception() + + sync.head_object = fake_head_object + self.assertFalse(cs.container_sync_row( + test_row, + 'http://sync/to/path', + 'key', FakeContainerBroker('broker'), + test_info, realm, realm_key)) + # No additional put has taken place, failures increased + self.assertEqual(len(actual_puts), 1) + excepted_failure_count += 1 + self.assertEqual(cs.container_failures, excepted_failure_count) + + def fake_head_object(*args, **kwargs): + return ({'x-timestamp': '1.1'}, '') + + sync.head_object = fake_head_object + self.assertTrue(cs.container_sync_row( + test_row, 'http://sync/to/path', + 'key', FakeContainerBroker('broker'), + test_info, realm, realm_key)) + # Additional put has taken place + self.assertEqual(len(actual_puts), 2) + # No additional errors + self.assertEqual(cs.container_failures, excepted_failure_count) + finally: sync.uuid = orig_uuid sync.put_object = orig_put_object + sync.head_object = orig_head_object def test_select_http_proxy_None(self): From 1d03803a85ca50272071725518c7110e1b2dacb1 Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Tue, 15 Mar 2016 17:09:21 -0700 Subject: [PATCH 055/141] Auditor will clean up stale rsync tempfiles DiskFile already fills in the _ondisk_info attribute when it tries to open a diskfile - even if the DiskFile's fileset is not valid or deleted. During this process the rsync tempfiles would be discovered and logged, but no-one would attempt to clean them up - even if they were really old. Instead of logging and ignoring unexpected files when validate a DiskFile fileset we'll add unexpected files to the unexpected key in the _ondisk_info attribute. With a little bit of re-organization in the auditor's object_audit method to get things into a single return path we can add an unconditional check for unexpected files and remove those that are "old enough". Since the replicator will kill any rsync processes that are running longer than the configured rsync_timeout we know that any rsync tempfiles older than this can be deleted. Split unlink_older_than in common.utils into two functions to allow an explicit list of previously discovered paths to be passed in to avoid an extra listdir. Since the getmtime handling already ignores OSError there's less concern of race condition where a previous discovered unexpected file is reaped by rsync while we're attempting to clean it up. Update some doc on the new config option. Closes-Bug: #1554005 Change-Id: Id67681cb77f605e3491b8afcb9c69d769e154283 --- doc/manpages/object-server.conf.5 | 3 + doc/source/deployment_guide.rst | 5 + etc/object-server.conf-sample | 7 ++ swift/common/utils.py | 17 ++- swift/obj/auditor.py | 74 ++++++++++--- swift/obj/diskfile.py | 14 ++- swift/obj/replicator.py | 4 +- test/unit/common/test_utils.py | 81 ++++++++++++++ test/unit/obj/test_auditor.py | 176 ++++++++++++++++++++++++++++-- 9 files changed, 344 insertions(+), 37 deletions(-) diff --git a/doc/manpages/object-server.conf.5 b/doc/manpages/object-server.conf.5 index ac1a8889e3..2e58de32fb 100644 --- a/doc/manpages/object-server.conf.5 +++ b/doc/manpages/object-server.conf.5 @@ -499,6 +499,9 @@ and ensure that swift has read/write. The default is /var/cache/swift. Takes a comma separated list of ints. If set, the object auditor will increment a counter for every object whose size is <= to the given break points and report the result after a full scan. +.IP \fBrsync_tempfile_timeout\fR +Time elapsed in seconds before rsync tempfiles will be unlinked. Config value of "auto" +will try to use object-replicator's rsync_timeout + 900 or fall-back to 86400 (1 day). .RE diff --git a/doc/source/deployment_guide.rst b/doc/source/deployment_guide.rst index 9743aef8b2..ec86c90c93 100644 --- a/doc/source/deployment_guide.rst +++ b/doc/source/deployment_guide.rst @@ -738,6 +738,11 @@ concurrency 1 The number of parallel processes zero_byte_files_per_second 50 object_size_stats recon_cache_path /var/cache/swift Path to recon cache +rsync_tempfile_timeout auto Time elapsed in seconds before rsync + tempfiles will be unlinked. Config value + of "auto" try to use object-replicator's + rsync_timeout + 900 or fallback to 86400 + (1 day). =========================== =================== ========================================== ------------------------------ diff --git a/etc/object-server.conf-sample b/etc/object-server.conf-sample index 80731584ee..e01193bff2 100644 --- a/etc/object-server.conf-sample +++ b/etc/object-server.conf-sample @@ -306,6 +306,13 @@ use = egg:swift#recon # points and report the result after a full scan. # object_size_stats = +# The auditor will cleanup old rsync tempfiles after they are "old +# enough" to delete. You can configure the time elapsed in seconds +# before rsync tempfiles will be unlinked, or the default value of +# "auto" try to use object-replicator's rsync_timeout + 900 and fallback +# to 86400 (1 day). +# rsync_tempfile_timeout = auto + # Note: Put it at the beginning of the pipleline to profile all middleware. But # it is safer to put this after healthcheck. [filter:xprofile] diff --git a/swift/common/utils.py b/swift/common/utils.py index e975bf1ad2..210dd9f4e0 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -2122,10 +2122,21 @@ def unlink_older_than(path, mtime): Remove any file in a given path that that was last modified before mtime. :param path: path to remove file from - :mtime: timestamp of oldest file to keep + :param mtime: timestamp of oldest file to keep """ - for fname in listdir(path): - fpath = os.path.join(path, fname) + filepaths = map(functools.partial(os.path.join, path), listdir(path)) + return unlink_paths_older_than(filepaths, mtime) + + +def unlink_paths_older_than(filepaths, mtime): + """ + Remove any files from the given list that that were + last modified before mtime. + + :param filepaths: a list of strings, the full paths of files to check + :param mtime: timestamp of oldest file to keep + """ + for fpath in filepaths: try: if os.path.getmtime(fpath) < mtime: os.unlink(fpath) diff --git a/swift/obj/auditor.py b/swift/obj/auditor.py index db6e4f4988..3b5f2de785 100644 --- a/swift/obj/auditor.py +++ b/swift/obj/auditor.py @@ -18,18 +18,23 @@ import os import sys import time import signal +import re from random import shuffle from swift import gettext_ as _ from contextlib import closing from eventlet import Timeout -from swift.obj import diskfile -from swift.common.utils import get_logger, ratelimit_sleep, dump_recon_cache, \ - list_from_csv, listdir +from swift.obj import diskfile, replicator +from swift.common.utils import ( + get_logger, ratelimit_sleep, dump_recon_cache, list_from_csv, listdir, + unlink_paths_older_than, readconf, config_auto_int_value) from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist from swift.common.daemon import Daemon from swift.common.storage_policy import POLICIES +# This matches rsync tempfiles, like "..data.Xy095a" +RE_RSYNC_TEMPFILE = re.compile(r'^\..*\.([a-zA-Z0-9_]){6}$') + class AuditorWorker(object): """Walk through file system to audit objects""" @@ -42,6 +47,27 @@ class AuditorWorker(object): self.max_files_per_second = float(conf.get('files_per_second', 20)) self.max_bytes_per_second = float(conf.get('bytes_per_second', 10000000)) + try: + # ideally unless ops overrides the rsync_tempfile_timeout in the + # auditor section we can base our behavior on whatever they + # configure for their replicator + replicator_config = readconf(self.conf['__file__'], + 'object-replicator') + except (KeyError, SystemExit): + # if we can't parse the real config (generally a KeyError on + # __file__, or SystemExit on no object-replicator section) we use + # a very conservative default + default = 86400 + else: + replicator_rsync_timeout = int(replicator_config.get( + 'rsync_timeout', replicator.DEFAULT_RSYNC_TIMEOUT)) + # Here we can do some light math for ops and use the *replicator's* + # rsync_timeout (plus 15 mins to avoid deleting local tempfiles + # before the remote replicator kills it's rsync) + default = replicator_rsync_timeout + 900 + self.rsync_tempfile_timeout = config_auto_int_value( + self.conf.get('rsync_tempfile_timeout'), default) + self.auditor_type = 'ALL' self.zero_byte_only_at_fps = zero_byte_only_at_fps if self.zero_byte_only_at_fps: @@ -200,34 +226,46 @@ class AuditorWorker(object): raise DiskFileQuarantined(msg) diskfile_mgr = self.diskfile_router[location.policy] + # this method doesn't normally raise errors, even if the audit + # location does not exist; if this raises an unexpected error it + # will get logged in failsafe + df = diskfile_mgr.get_diskfile_from_audit_location(location) + reader = None try: - df = diskfile_mgr.get_diskfile_from_audit_location(location) with df.open(): metadata = df.get_metadata() obj_size = int(metadata['Content-Length']) if self.stats_sizes: self.record_stats(obj_size) - if self.zero_byte_only_at_fps and obj_size: - self.passes += 1 - return - reader = df.reader(_quarantine_hook=raise_dfq) - with closing(reader): - for chunk in reader: - chunk_len = len(chunk) - self.bytes_running_time = ratelimit_sleep( - self.bytes_running_time, - self.max_bytes_per_second, - incr_by=chunk_len) - self.bytes_processed += chunk_len - self.total_bytes_processed += chunk_len + if obj_size and not self.zero_byte_only_at_fps: + reader = df.reader(_quarantine_hook=raise_dfq) + if reader: + with closing(reader): + for chunk in reader: + chunk_len = len(chunk) + self.bytes_running_time = ratelimit_sleep( + self.bytes_running_time, + self.max_bytes_per_second, + incr_by=chunk_len) + self.bytes_processed += chunk_len + self.total_bytes_processed += chunk_len except DiskFileNotExist: - return + pass except DiskFileQuarantined as err: self.quarantines += 1 self.logger.error(_('ERROR Object %(obj)s failed audit and was' ' quarantined: %(err)s'), {'obj': location, 'err': err}) self.passes += 1 + # _ondisk_info attr is initialized to None and filled in by open + ondisk_info_dict = df._ondisk_info or {} + if 'unexpected' in ondisk_info_dict: + is_rsync_tempfile = lambda fpath: RE_RSYNC_TEMPFILE.match( + os.path.basename(fpath)) + rsync_tempfile_paths = filter(is_rsync_tempfile, + ondisk_info_dict['unexpected']) + mtime = time.time() - self.rsync_tempfile_timeout + unlink_paths_older_than(rsync_tempfile_paths, mtime) class ObjectAuditor(Daemon): diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py index 881371b07c..7ea2f25109 100644 --- a/swift/obj/diskfile.py +++ b/swift/obj/diskfile.py @@ -741,7 +741,10 @@ class BaseDiskFileManager(object): # dicts for the files having that extension. The file_info dicts are of # the form returned by parse_on_disk_filename, with the filename added. # Each list is sorted in reverse timestamp order. - # + + # the results dict is used to collect results of file filtering + results = {} + # The exts dict will be modified during subsequent processing as files # are removed to be discarded or ignored. exts = defaultdict(list) @@ -752,16 +755,15 @@ class BaseDiskFileManager(object): file_info['filename'] = afile exts[file_info['ext']].append(file_info) except DiskFileError as e: - self.logger.warning('Unexpected file %s: %s' % - (os.path.join(datadir or '', afile), e)) + file_path = os.path.join(datadir or '', afile) + self.logger.warning('Unexpected file %s: %s', + file_path, e) + results.setdefault('unexpected', []).append(file_path) for ext in exts: # For each extension sort files into reverse chronological order. exts[ext] = sorted( exts[ext], key=lambda info: info['timestamp'], reverse=True) - # the results dict is used to collect results of file filtering - results = {} - if exts.get('.ts'): # non-tombstones older than or equal to latest tombstone are # obsolete diff --git a/swift/obj/replicator.py b/swift/obj/replicator.py index aa38407d35..ff89b3213a 100644 --- a/swift/obj/replicator.py +++ b/swift/obj/replicator.py @@ -41,6 +41,7 @@ from swift.obj import ssync_sender from swift.obj.diskfile import DiskFileManager, get_data_dir, get_tmp_dir from swift.common.storage_policy import POLICIES, REPL_POLICY +DEFAULT_RSYNC_TIMEOUT = 900 hubs.use_hub(get_hub()) @@ -76,7 +77,8 @@ class ObjectReplicator(Daemon): self.partition_times = [] self.interval = int(conf.get('interval') or conf.get('run_pause') or 30) - self.rsync_timeout = int(conf.get('rsync_timeout', 900)) + self.rsync_timeout = int(conf.get('rsync_timeout', + DEFAULT_RSYNC_TIMEOUT)) self.rsync_io_timeout = conf.get('rsync_io_timeout', '30') self.rsync_bwlimit = conf.get('rsync_bwlimit', '0') self.rsync_compress = config_true_value( diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index 14e3aa8696..409990ad4a 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -18,6 +18,7 @@ from __future__ import print_function from test.unit import temptree import ctypes +import contextlib import errno import eventlet import eventlet.event @@ -3422,6 +3423,86 @@ class ResellerConfReader(unittest.TestCase): self.assertEqual('pre2_group', options['PRE2_'].get('require_group')) +class TestUnlinkOlder(unittest.TestCase): + + def setUp(self): + self.tempdir = mkdtemp() + self.mtime = {} + + def tearDown(self): + rmtree(self.tempdir, ignore_errors=True) + + def touch(self, fpath, mtime=None): + self.mtime[fpath] = mtime or time.time() + open(fpath, 'w') + + @contextlib.contextmanager + def high_resolution_getmtime(self): + orig_getmtime = os.path.getmtime + + def mock_getmtime(fpath): + mtime = self.mtime.get(fpath) + if mtime is None: + mtime = orig_getmtime(fpath) + return mtime + + with mock.patch('os.path.getmtime', mock_getmtime): + yield + + def test_unlink_older_than_path_not_exists(self): + path = os.path.join(self.tempdir, 'does-not-exist') + # just make sure it doesn't blow up + utils.unlink_older_than(path, time.time()) + + def test_unlink_older_than_file(self): + path = os.path.join(self.tempdir, 'some-file') + self.touch(path) + with self.assertRaises(OSError) as ctx: + utils.unlink_older_than(path, time.time()) + self.assertEqual(ctx.exception.errno, errno.ENOTDIR) + + def test_unlink_older_than_now(self): + self.touch(os.path.join(self.tempdir, 'test')) + with self.high_resolution_getmtime(): + utils.unlink_older_than(self.tempdir, time.time()) + self.assertEqual([], os.listdir(self.tempdir)) + + def test_unlink_not_old_enough(self): + start = time.time() + self.touch(os.path.join(self.tempdir, 'test')) + with self.high_resolution_getmtime(): + utils.unlink_older_than(self.tempdir, start) + self.assertEqual(['test'], os.listdir(self.tempdir)) + + def test_unlink_mixed(self): + self.touch(os.path.join(self.tempdir, 'first')) + cutoff = time.time() + self.touch(os.path.join(self.tempdir, 'second')) + with self.high_resolution_getmtime(): + utils.unlink_older_than(self.tempdir, cutoff) + self.assertEqual(['second'], os.listdir(self.tempdir)) + + def test_unlink_paths(self): + paths = [] + for item in ('first', 'second', 'third'): + path = os.path.join(self.tempdir, item) + self.touch(path) + paths.append(path) + # don't unlink everyone + with self.high_resolution_getmtime(): + utils.unlink_paths_older_than(paths[:2], time.time()) + self.assertEqual(['third'], os.listdir(self.tempdir)) + + def test_unlink_empty_paths(self): + # just make sure it doesn't blow up + utils.unlink_paths_older_than([], time.time()) + + def test_unlink_not_exists_paths(self): + path = os.path.join(self.tempdir, 'does-not-exist') + # just make sure it doesn't blow up + utils.unlink_paths_older_than([path], time.time()) + + class TestSwiftInfo(unittest.TestCase): def tearDown(self): diff --git a/test/unit/obj/test_auditor.py b/test/unit/obj/test_auditor.py index fed9c4b211..db37e49ab1 100644 --- a/test/unit/obj/test_auditor.py +++ b/test/unit/obj/test_auditor.py @@ -22,15 +22,18 @@ import string from shutil import rmtree from hashlib import md5 from tempfile import mkdtemp -from test.unit import FakeLogger, patch_policies, make_timestamp_iter, \ - DEFAULT_TEST_EC_TYPE -from swift.obj import auditor -from swift.obj.diskfile import DiskFile, write_metadata, invalidate_hash, \ - get_data_dir, DiskFileManager, ECDiskFileManager, AuditLocation, \ - clear_auditor_status, get_auditor_status -from swift.common.utils import mkdirs, normalize_timestamp, Timestamp -from swift.common.storage_policy import ECStoragePolicy, StoragePolicy, \ - POLICIES +import textwrap +from test.unit import (FakeLogger, patch_policies, make_timestamp_iter, + DEFAULT_TEST_EC_TYPE) +from swift.obj import auditor, replicator +from swift.obj.diskfile import ( + DiskFile, write_metadata, invalidate_hash, get_data_dir, + DiskFileManager, ECDiskFileManager, AuditLocation, clear_auditor_status, + get_auditor_status) +from swift.common.utils import ( + mkdirs, normalize_timestamp, Timestamp, readconf) +from swift.common.storage_policy import ( + ECStoragePolicy, StoragePolicy, POLICIES) _mocked_policies = [ @@ -275,6 +278,161 @@ class TestAuditor(unittest.TestCase): policy=POLICIES.legacy)) self.assertEqual(auditor_worker.errors, 1) + def test_audit_location_gets_quarantined(self): + auditor_worker = auditor.AuditorWorker(self.conf, self.logger, + self.rcache, self.devices) + + location = AuditLocation(self.disk_file._datadir, 'sda', '0', + policy=self.disk_file.policy) + + # instead of a datadir, we'll make a file! + mkdirs(os.path.dirname(self.disk_file._datadir)) + open(self.disk_file._datadir, 'w') + + # after we turn the crank ... + auditor_worker.object_audit(location) + + # ... it should get quarantined + self.assertFalse(os.path.exists(self.disk_file._datadir)) + self.assertEqual(1, auditor_worker.quarantines) + + def test_rsync_tempfile_timeout_auto_option(self): + # if we don't have access to the replicator config section we'll use + # our default + auditor_worker = auditor.AuditorWorker(self.conf, self.logger, + self.rcache, self.devices) + self.assertEqual(auditor_worker.rsync_tempfile_timeout, 86400) + # if the rsync_tempfile_timeout option is set explicitly we use that + self.conf['rsync_tempfile_timeout'] = '1800' + auditor_worker = auditor.AuditorWorker(self.conf, self.logger, + self.rcache, self.devices) + self.assertEqual(auditor_worker.rsync_tempfile_timeout, 1800) + # if we have a real config we can be a little smarter + config_path = os.path.join(self.testdir, 'objserver.conf') + stub_config = """ + [object-auditor] + rsync_tempfile_timeout = auto + """ + with open(config_path, 'w') as f: + f.write(textwrap.dedent(stub_config)) + # the Daemon loader will hand the object-auditor config to the + # auditor who will build the workers from it + conf = readconf(config_path, 'object-auditor') + auditor_worker = auditor.AuditorWorker(conf, self.logger, + self.rcache, self.devices) + # if there is no object-replicator section we still have to fall back + # to default because we can't parse the config for that section! + self.assertEqual(auditor_worker.rsync_tempfile_timeout, 86400) + stub_config = """ + [object-replicator] + [object-auditor] + rsync_tempfile_timeout = auto + """ + with open(os.path.join(self.testdir, 'objserver.conf'), 'w') as f: + f.write(textwrap.dedent(stub_config)) + conf = readconf(config_path, 'object-auditor') + auditor_worker = auditor.AuditorWorker(conf, self.logger, + self.rcache, self.devices) + # if the object-replicator section will parse but does not override + # the default rsync_timeout we assume the default rsync_timeout value + # and add 15mins + self.assertEqual(auditor_worker.rsync_tempfile_timeout, + replicator.DEFAULT_RSYNC_TIMEOUT + 900) + stub_config = """ + [DEFAULT] + reclaim_age = 1209600 + [object-replicator] + rsync_timeout = 3600 + [object-auditor] + rsync_tempfile_timeout = auto + """ + with open(os.path.join(self.testdir, 'objserver.conf'), 'w') as f: + f.write(textwrap.dedent(stub_config)) + conf = readconf(config_path, 'object-auditor') + auditor_worker = auditor.AuditorWorker(conf, self.logger, + self.rcache, self.devices) + # if there is an object-replicator section with a rsync_timeout + # configured we'll use that value (3600) + 900 + self.assertEqual(auditor_worker.rsync_tempfile_timeout, 3600 + 900) + + def test_inprogress_rsync_tempfiles_get_cleaned_up(self): + auditor_worker = auditor.AuditorWorker(self.conf, self.logger, + self.rcache, self.devices) + + location = AuditLocation(self.disk_file._datadir, 'sda', '0', + policy=self.disk_file.policy) + + data = 'VERIFY' + etag = md5() + timestamp = str(normalize_timestamp(time.time())) + with self.disk_file.create() as writer: + writer.write(data) + etag.update(data) + metadata = { + 'ETag': etag.hexdigest(), + 'X-Timestamp': timestamp, + 'Content-Length': str(os.fstat(writer._fd).st_size), + } + writer.put(metadata) + writer.commit(Timestamp(timestamp)) + + datafilename = None + datadir_files = os.listdir(self.disk_file._datadir) + for filename in datadir_files: + if filename.endswith('.data'): + datafilename = filename + break + else: + self.fail('Did not find .data file in %r: %r' % + (self.disk_file._datadir, datadir_files)) + rsynctempfile_path = os.path.join(self.disk_file._datadir, + '.%s.9ILVBL' % datafilename) + open(rsynctempfile_path, 'w') + # sanity check we have an extra file + rsync_files = os.listdir(self.disk_file._datadir) + self.assertEqual(len(datadir_files) + 1, len(rsync_files)) + + # and after we turn the crank ... + auditor_worker.object_audit(location) + + # ... we've still got the rsync file + self.assertEqual(rsync_files, os.listdir(self.disk_file._datadir)) + + # and we'll keep it - depending on the rsync_tempfile_timeout + self.assertEqual(auditor_worker.rsync_tempfile_timeout, 86400) + self.conf['rsync_tempfile_timeout'] = '3600' + auditor_worker = auditor.AuditorWorker(self.conf, self.logger, + self.rcache, self.devices) + self.assertEqual(auditor_worker.rsync_tempfile_timeout, 3600) + now = time.time() + 1900 + with mock.patch('swift.obj.auditor.time.time', + return_value=now): + auditor_worker.object_audit(location) + self.assertEqual(rsync_files, os.listdir(self.disk_file._datadir)) + + # but *tomorrow* when we run + tomorrow = time.time() + 86400 + with mock.patch('swift.obj.auditor.time.time', + return_value=tomorrow): + auditor_worker.object_audit(location) + + # ... we'll totally clean that stuff up! + self.assertEqual(datadir_files, os.listdir(self.disk_file._datadir)) + + # but if we have some random crazy file in there + random_crazy_file_path = os.path.join(self.disk_file._datadir, + '.random.crazy.file') + open(random_crazy_file_path, 'w') + + tomorrow = time.time() + 86400 + with mock.patch('swift.obj.auditor.time.time', + return_value=tomorrow): + auditor_worker.object_audit(location) + + # that's someone elses problem + self.assertIn(os.path.basename(random_crazy_file_path), + os.listdir(self.disk_file._datadir)) + def test_generic_exception_handling(self): auditor_worker = auditor.AuditorWorker(self.conf, self.logger, self.rcache, self.devices) From 51bea3943f9eadb179dc7327d42dcb9062a3a8e5 Mon Sep 17 00:00:00 2001 From: Christian Schwede Date: Mon, 21 Mar 2016 09:52:23 +0000 Subject: [PATCH 056/141] Ignore files in the devices directory when auditing objects The object auditor raises an exception if there are some files in /srv/node (or any other defined "devices" directory). This change simply skips any file in the devices directory when generating locations for the object auditor. Change-Id: I934594994adc577799723edb6c5648685682a9e7 --- swift/obj/diskfile.py | 10 +++++++++- test/unit/obj/test_diskfile.py | 30 ++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py index 23b94aa2fb..6b27bd9729 100644 --- a/swift/obj/diskfile.py +++ b/swift/obj/diskfile.py @@ -296,7 +296,15 @@ def object_audit_location_generator(devices, mount_check=True, logger=None, _('Skipping %s as it is not mounted'), device) continue # loop through object dirs for all policies - for dir_ in os.listdir(os.path.join(devices, device)): + device_dir = os.path.join(devices, device) + try: + dirs = os.listdir(device_dir) + except OSError as e: + if logger: + logger.debug( + _('Skipping %s: %s') % (device_dir, e.strerror)) + continue + for dir_ in dirs: if not dir_.startswith(DATADIR_BASE): continue try: diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py index 9829eb868e..cce150432d 100644 --- a/test/unit/obj/test_diskfile.py +++ b/test/unit/obj/test_diskfile.py @@ -402,6 +402,36 @@ class TestObjectAuditLocationGenerator(unittest.TestCase): 'Skipping %s as it is not mounted', 'sdq') + def test_skipping_files(self): + with temptree([]) as tmpdir: + os.makedirs(os.path.join(tmpdir, "sdp", "objects", + "2607", "df3", + "ec2871fe724411f91787462f97d30df3")) + with open(os.path.join(tmpdir, "garbage"), "wb") as fh: + fh.write('') + + locations = [ + (loc.path, loc.device, loc.partition, loc.policy) + for loc in diskfile.object_audit_location_generator( + devices=tmpdir, mount_check=False)] + + self.assertEqual( + locations, + [(os.path.join(tmpdir, "sdp", "objects", + "2607", "df3", + "ec2871fe724411f91787462f97d30df3"), + "sdp", "2607", POLICIES[0])]) + + # Do it again, this time with a logger. + ml = mock.MagicMock() + locations = [ + (loc.path, loc.device, loc.partition, loc.policy) + for loc in diskfile.object_audit_location_generator( + devices=tmpdir, mount_check=False, logger=ml)] + ml.debug.assert_called_once_with( + 'Skipping %s: Not a directory' % + os.path.join(tmpdir, "garbage")) + def test_only_catch_expected_errors(self): # Crazy exceptions should still escape object_audit_location_generator # so that errors get logged and a human can see what's going wrong; From 5d00ce9e3a1f5e32ae91c78b6bdfd953658ab984 Mon Sep 17 00:00:00 2001 From: John Dickinson Date: Mon, 21 Mar 2016 22:03:34 -0700 Subject: [PATCH 057/141] 2.7.0 authors and changelog updates Change-Id: I16ad0c61b048921ca01fa96862ae7eea0eec6017 --- .mailmap | 8 +++ AUTHORS | 20 +++++++- CHANGELOG | 148 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 174 insertions(+), 2 deletions(-) diff --git a/.mailmap b/.mailmap index 6827a2d4a3..b640039bfb 100644 --- a/.mailmap +++ b/.mailmap @@ -93,3 +93,11 @@ Richard Hawkins Ondrej Novy Peter Lisak Ke Liang +Daisuke Morita +Andreas Jaeger +Hugo Kuo +Gage Hugo +Oshrit Feder +Larry Rensing +Ben Keller +Chaozhe Chen diff --git a/AUTHORS b/AUTHORS index f3225dff2a..f299597cdb 100644 --- a/AUTHORS +++ b/AUTHORS @@ -13,7 +13,7 @@ Jay Payne (letterj@gmail.com) Will Reese (wreese@gmail.com) Chuck Thier (cthier@gmail.com) -CORE Emeritus +Core Emeritus ------------- Chmouel Boudjnah (chmouel@enovance.com) Florian Hines (syn@ronin.io) @@ -33,6 +33,7 @@ Joe Arnold (joe@swiftstack.com) Ionuț Arțăriși (iartarisi@suse.cz) Minwoo Bae (minwoob@us.ibm.com) Bob Ball (bob.ball@citrix.com) +Christopher Bartz (bartz@dkrz.de) Christian Berendt (berendt@b1-systems.de) Luis de Bethencourt (luis@debethencourt.com) Keshava Bharadwaj (kb.sankethi@gmail.com) @@ -54,6 +55,7 @@ Emmanuel Cazenave (contact@emcaz.fr) Mahati Chamarthy (mahati.chamarthy@gmail.com) Zap Chang (zapchang@gmail.com) François Charlier (francois.charlier@enovance.com) +Chaozhe Chen (chaozhe.chen@easystack.cn) Ray Chen (oldsharp@163.com) Harshit Chitalia (harshit@acelio.com) Brian Cline (bcline@softlayer.com) @@ -61,6 +63,7 @@ Alistair Coles (alistair.coles@hpe.com) Clément Contini (ccontini@cloudops.com) Brian Curtin (brian.curtin@rackspace.com) Thiago da Silva (thiago@redhat.com) +dangming (dangming@unitedstack.com) Julien Danjou (julien@danjou.info) Paul Dardeau (paul.dardeau@intel.com) Zack M. Davis (zdavis@swiftstack.com) @@ -86,9 +89,11 @@ Filippo Giunchedi (fgiunchedi@wikimedia.org) Mark Gius (launchpad@markgius.com) David Goetz (david.goetz@rackspace.com) Tushar Gohad (tushar.gohad@intel.com) +Thomas Goirand (thomas@goirand.fr) Jonathan Gonzalez V (jonathan.abdiel@gmail.com) Joe Gordon (jogo@cloudscaling.com) ChangBo Guo(gcb) (eric.guo@easystack.cn) +Ankur Gupta (ankur.gupta@intel.com) David Hadas (davidh@il.ibm.com) Andrew Hale (andy@wwwdata.eu) Soren Hansen (soren@linux2go.dk) @@ -106,6 +111,7 @@ Charles Hsu (charles0126@gmail.com) Joanna H. Huang (joanna.huitzu.huang@gmail.com) Kun Huang (gareth@unitedstack.com) Bill Huber (wbhuber@us.ibm.com) +Gage Hugo (gh159m@att.com) Matthieu Huin (mhu@enovance.com) Hodong Hwang (hodong.hwang@kt.com) Motonobu Ichimura (motonobu@gmail.com) @@ -127,6 +133,7 @@ Ilya Kharin (ikharin@mirantis.com) Dae S. Kim (dae@velatum.com) Nathan Kinder (nkinder@redhat.com) Eugene Kirpichov (ekirpichov@gmail.com) +Ben Keller (bjkeller@us.ibm.com) Leah Klearman (lklrmn@gmail.com) Martin Kletzander (mkletzan@redhat.com) Jaivish Kothari (jaivish.kothari@nectechnologies.in) @@ -134,6 +141,7 @@ Steve Kowalik (steven@wedontsleep.org) Sergey Kraynev (skraynev@mirantis.com) Sushil Kumar (sushil.kumar2@globallogic.com) Madhuri Kumari (madhuri.rai07@gmail.com) +Hugo Kuo (tonytkdk@gmail.com) Steven Lang (Steven.Lang@hgst.com) Gonéri Le Bouder (goneri.lebouder@enovance.com) Romain Le Disez (romain.ledisez@ovh.net) @@ -143,6 +151,8 @@ Thomas Leaman (thomas.leaman@hp.com) Eohyung Lee (liquidnuker@gmail.com) Zhao Lei (zhaolei@cn.fujitsu.com) Jamie Lennox (jlennox@redhat.com) +Cheng Li (shcli@cn.ibm.com) +Mingyu Li (li.mingyu@99cloud.net) Tong Li (litong01@us.ibm.com) Ke Liang (ke.liang@easystack.cn) Peter Lisak (peter.lisak@firma.seznam.cz) @@ -161,6 +171,7 @@ Juan J. Martinez (juan@memset.com) Marcelo Martins (btorch@gmail.com) Nakagawa Masaaki (nakagawamsa@nttdata.co.jp) Dolph Mathews (dolph.mathews@gmail.com) +Tomas Matlocha (tomas.matlocha@firma.seznam.cz) Kenichiro Matsuda (matsuda_kenichi@jp.fujitsu.com) Michael Matur (michael.matur@gmail.com) Donagh McCabe (donagh.mccabe@hpe.com) @@ -171,7 +182,7 @@ Samuel Merritt (sam@swiftstack.com) Stephen Milton (milton@isomedia.com) Jola Mirecka (jola.mirecka@hp.com) Kazuhiro Miyahara (miyahara.kazuhiro@lab.ntt.co.jp) -Daisuke Morita (morita.daisuke@lab.ntt.co.jp) +Daisuke Morita (morita.daisuke@ntti3.com) Dirk Mueller (dirk@dmllr.de) Takashi Natsume (natsume.takashi@lab.ntt.co.jp) Russ Nelson (russ@crynwr.com) @@ -198,11 +209,13 @@ Sivasathurappan Radhakrishnan (siva.radhakrishnan@intel.com) Sarvesh Ranjan (saranjan@cisco.com) Falk Reimann (falk.reimann@sap.com) Brian Reitz (brian.reitz@oracle.com) +Qiaowei Ren (qiaowei.ren@intel.com) Felipe Reyes (freyes@tty.cl) Janie Richling (jrichli@us.ibm.com) Matt Riedemann (mriedem@us.ibm.com) Li Riqiang (lrqrun@gmail.com) Rafael Rivero (rafael@cloudscaling.com) +Larry Rensing (lr699s@att.com) Victor Rodionov (victor.rodionov@nexenta.com) Eran Rom (eranr@il.ibm.com) Aaron Rosen (arosen@nicira.com) @@ -211,6 +224,7 @@ Hamdi Roumani (roumani@ca.ibm.com) Shilla Saebi (shilla.saebi@gmail.com) Atsushi Sakai (sakaia@jp.fujitsu.com) Cristian A Sanchez (cristian.a.sanchez@intel.com) +Olga Saprycheva (osapryc@us.ibm.com) Christian Schwede (cschwede@redhat.com) Mark Seger (mark.seger@hpe.com) Azhagu Selvan SP (tamizhgeek@gmail.com) @@ -223,6 +237,7 @@ Michael Shuler (mshuler@gmail.com) David Moreau Simard (dmsimard@iweb.com) Scott Simpson (sasimpson@gmail.com) Pradeep Kumar Singh (pradeep.singh@nectechnologies.in) +Sarafraj Singh (Sarafraj.Singh@intel.com) Liu Siqi (meizu647@gmail.com) Adrian Smith (adrian_f_smith@dell.com) Jon Snitow (otherjon@swiftstack.com) @@ -259,6 +274,7 @@ Yaguang Wang (yaguang.wang@intel.com) Chris Wedgwood (cw@f00f.org) Conrad Weidenkeller (conrad.weidenkeller@rackspace.com) Doug Weimer (dweimer@gmail.com) +Andrew Welleck (awellec@us.ibm.com) Wu Wenxiang (wu.wenxiang@99cloud.net) Cory Wright (cory.wright@rackspace.com) Ye Jia Xu (xyj.asmy@gmail.com) diff --git a/CHANGELOG b/CHANGELOG index c1b335d548..799382b38e 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,3 +1,151 @@ +swift (2.7.0, OpenStack Mitaka) + + * Bump PyECLib requirement to >= 1.2.0 + + * Update container on fast-POST + + "Fast-POST" is the mode where `object_post_as_copy` is set to + `False` in the proxy server config. This mode now allows for + fast, efficient updates of metadata without needing to fully + recopy the contents of the object. While the default still is + `object_post_as_copy` as True, the plan is to change the default + to False and then deprecate post-as-copy functionality in later + releases. Fast-POST now supports container-sync functionality. + + * Add concurrent reads option to proxy. + + This change adds 2 new parameters to enable and control concurrent + GETs in Swift, these are `concurrent_gets` and `concurrency_timeout`. + + `concurrent_gets` allows you to turn on or off concurrent + GETs; when on, it will set the GET/HEAD concurrency to the + replica count. And in the case of EC HEADs it will set it to + ndata. The proxy will then serve only the first valid source to + respond. This applies to all account, container, and replicated + object GETs and HEADs. For EC only HEAD requests are affected. + The default for `concurrent_gets` is off. + + `concurrency_timeout` is related to `concurrent_gets` and is + the amount of time to wait before firing the next thread. A + value of 0 will fire at the same time (fully concurrent), but + setting another value will stagger the firing allowing you the + ability to give a node a short chance to respond before firing + the next. This value is a float and should be somewhere between + 0 and `node_timeout`. The default is `conn_timeout`, meaning by + default it will stagger the firing. + + * Added an operational procedures guide to the docs. It can be + found at http://swift.openstack.org/ops_runbook/index.html and + includes information on detecting and handling day-to-day + operational issues in a Swift cluster. + + * Make `handoffs_first` a more useful mode for the object replicator. + + The `handoffs_first` replication mode is used during periods of + problematic cluster behavior (e.g. full disks) when replication + needs to quickly drain partitions from a handoff node and move + them to a primary node. + + Previously, `handoffs_first` would sort that handoff work before + "normal" replication jobs, but the normal replication work could + take quite some time and result in handoffs not being drained + quickly enough. + + In order to focus on getting handoff partitions off the node + `handoffs_first` mode will now abort the current replication + sweep before attempting any primary suffix syncing if any of the + handoff partitions were not removed for any reason - and start + over with replication of handoffs jobs as the highest priority. + + Note that `handoffs_first` being enabled will emit a warning on + start up, even if no handoff jobs fail, because of the negative + impact it can have during normal operations by dog-piling on a + node that was temporarily unavailable. + + * By default, inbound `X-Timestamp` headers are now disallowed + (except when in an authorized container-sync request). This + header is useful for allowing data migration from other storage + systems to Swift and keeping the original timestamp of the data. + If you have this migration use case (or any other requirement on + allowing the clients to set an object's timestamp), set the + `shunt_inbound_x_timestamp` config variable to False in the + gatekeeper middleware config section of the proxy server config. + + * Requesting a SLO manifest file with the query parameters + "?multipart-manifest=get&format=raw" will return the contents of + the manifest in the format as was originally sent by the client. + The "format=raw" is new. + + * Static web page listings can now be rendered with a custom + label. By default listings are rendered with a label of: + "Listing of /v1///". This change adds + a new custom metadata key/value pair + `X-Container-Meta-Web-Listings-Label: My Label` that when set, + will cause the following: "Listing of My Label/" to be + rendered instead. + + * Previously, static large objects (SLOs) had a minimum segment + size (default to 1MiB). This limit has been removed, but small + segments will be ratelimited. The config parameter + `rate_limit_under_size` controls the definition of "small" + segments (1MiB by default), and `rate_limit_segments_per_sec` + controls how many segments per second can be served (default is 1). + With the default values, the effective behavior is identical to the + previous behavior when serving SLOs. + + * Container sync has been improved to perform a HEAD on the remote + side of the sync for each object being synced. If the object + exists on the remote side, container-sync will no longer + transfer the object, thus significantly lowering the network + requirements to use the feature. + + * The object auditor will now clean up any old, stale rsync temp + files that it finds. These rsync temp files are left if the + rsync process fails without completing a full transfer of an + object. Since these files can be large, the temp files may end + up filling a disk. The new auditor functionality will reap these + rsync temp files if they are old. The new object-auditor config + variable `rsync_tempfile_timeout` is the number of seconds old a + tempfile must be before it is reaped. By default, this variable + is set to "auto" or the rsync_timeout plus 900 seconds (falling + back to a value of 1 day). + + * The Erasure Code reconstruction process has been made more + efficient by not syncing data files when only the durable commit + file is missing. + + * Fixed a bug where 304 and 416 response may not have the right + Etag and Accept-Ranges headers when the object is stored in an + Erasure Coded policy. + + * Versioned writes now correctly stores the date of previous versions + using GMT instead of local time. + + * The deprecated Keystone middleware option is_admin has been removed. + + * Fixed log format in object auditor. + + * The zero-byte mode (ZBF) of the object auditor will now properly + observe the `--once` option. + + * Swift keeps track, internally, of "dirty" parts of the partition + keyspace with a "hashes.pkl" file. Operations on this file no + longer require a read-modify-write cycle and use a new + "hashes.invalid" file to track dirty partitions. This change + will improve end-user performance for PUT and DELETE operations. + + * The object replicator's succeeded and failed counts are now logged. + + * `swift-recon` can now query hosts by storage policy. + + * The log_statsd_host value can now be an IPv6 address or a hostname + which only resolves to an IPv6 address. + + * Erasure coded fragments now properly call fallocate to reserve disk + space before being written. + + * Various other minor bug fixes and improvements. + swift (2.6.0) * Dependency changes From a696c1e89ef1a1c7630c14bbdda3f6c8039bb05e Mon Sep 17 00:00:00 2001 From: Christian Schwede Date: Mon, 14 Dec 2015 16:03:43 +0100 Subject: [PATCH 058/141] Fix full_listing in internal_client The internal_client is used in swift-dispersion-report, and in case one has more than 10000 containers or objects these are not queried. This patch adds support to the internal_client to iterate over all containers/objects if the listing exceeds the default of 10000 entries and the argument full_listing=True is used. Closes-Bug: 1314817 Closes-Bug: 1525995 Change-Id: I6892390d72f70f1bc519b482d4f72603e1570163 --- swift/common/internal_client.py | 19 ++++++++++++++++++- test/unit/common/test_internal_client.py | 23 +++++++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/swift/common/internal_client.py b/swift/common/internal_client.py index d9911326f1..ecffa999e4 100644 --- a/swift/common/internal_client.py +++ b/swift/common/internal_client.py @@ -741,11 +741,25 @@ class SimpleClient(object): def base_request(self, method, container=None, name=None, prefix=None, headers=None, proxy=None, contents=None, full_listing=None, logger=None, additional_info=None, - timeout=None): + timeout=None, marker=None): # Common request method trans_start = time() url = self.url + if full_listing: + body_data = self.base_request(method, container, name, prefix, + headers, proxy, timeout=timeout, + marker=marker) + listing = body_data[1] + while listing: + marker = listing[-1]['name'] + listing = self.base_request(method, container, name, prefix, + headers, proxy, timeout=timeout, + marker=marker)[1] + if listing: + body_data[1].extend(listing) + return body_data + if headers is None: headers = {} @@ -762,6 +776,9 @@ class SimpleClient(object): if prefix: url += '&prefix=%s' % prefix + if marker: + url += '&marker=%s' % quote(marker) + req = urllib2.Request(url, headers=headers, data=contents) if proxy: proxy = urllib.parse.urlparse(proxy) diff --git a/test/unit/common/test_internal_client.py b/test/unit/common/test_internal_client.py index 68dbc3e18d..e4218a9e02 100644 --- a/test/unit/common/test_internal_client.py +++ b/test/unit/common/test_internal_client.py @@ -356,6 +356,29 @@ class TestInternalClient(unittest.TestCase): # sanity check self.assertEqual(body, resp_body) + def test_base_full_listing(self): + body1 = [{'name': 'a'}, {'name': "b"}, {'name': "c"}] + body2 = [{'name': 'd'}] + body3 = [] + + class FakeConn(object): + def __init__(self, body): + self.body = body + + def read(self): + return json.dumps(self.body) + + def info(self): + return {} + + mocked_func = 'swift.common.internal_client.urllib2.urlopen' + with mock.patch(mocked_func) as mock_urlopen: + mock_urlopen.side_effect = [ + FakeConn(body1), FakeConn(body2), FakeConn(body3)] + sc = internal_client.SimpleClient('http://0.0.0.0/') + _, resp_body = sc.base_request('GET', full_listing=True) + self.assertEqual(body1 + body2, resp_body) + def test_make_request_method_path_headers(self): class InternalClient(internal_client.InternalClient): def __init__(self): From 2f24fb9683a57b67348d65864d5af8c3a03dee67 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Wed, 23 Mar 2016 20:49:50 +0000 Subject: [PATCH 059/141] Check marker params in SimpleClient full listing requests Follow up for change [1] to add some assertions to check that marker param is included in sequential GET requests sent during a full listing. Extract multiple FakeConn class definitions to single class at module level and share between all classes. Also, explicitly unpack the return values from base request calls made in the full listing section of base_request, and explicitly return a list to make more consistent with rest of the method. [1] Change-Id: I6892390d72f70f1bc519b482d4f72603e1570163 Change-Id: Iad038709f46364b8324d25ac79be4317add79df5 --- swift/common/internal_client.py | 18 ++++----- test/unit/common/test_internal_client.py | 50 +++++++++++------------- 2 files changed, 32 insertions(+), 36 deletions(-) diff --git a/swift/common/internal_client.py b/swift/common/internal_client.py index ecffa999e4..b9c99a20f3 100644 --- a/swift/common/internal_client.py +++ b/swift/common/internal_client.py @@ -747,18 +747,18 @@ class SimpleClient(object): url = self.url if full_listing: - body_data = self.base_request(method, container, name, prefix, - headers, proxy, timeout=timeout, - marker=marker) - listing = body_data[1] + info, body_data = self.base_request( + method, container, name, prefix, headers, proxy, + timeout=timeout, marker=marker) + listing = body_data while listing: marker = listing[-1]['name'] - listing = self.base_request(method, container, name, prefix, - headers, proxy, timeout=timeout, - marker=marker)[1] + info, listing = self.base_request( + method, container, name, prefix, headers, proxy, + timeout=timeout, marker=marker) if listing: - body_data[1].extend(listing) - return body_data + body_data.extend(listing) + return [info, body_data] if headers is None: headers = {} diff --git a/test/unit/common/test_internal_client.py b/test/unit/common/test_internal_client.py index e4218a9e02..2d4c747179 100644 --- a/test/unit/common/test_internal_client.py +++ b/test/unit/common/test_internal_client.py @@ -34,6 +34,19 @@ from test.unit import with_tempdir, write_fake_ring, patch_policies from test.unit.common.middleware.helpers import FakeSwift +class FakeConn(object): + def __init__(self, body=None): + if body is None: + body = [] + self.body = body + + def read(self): + return json.dumps(self.body) + + def info(self): + return {} + + def not_sleep(seconds): pass @@ -339,17 +352,10 @@ class TestInternalClient(unittest.TestCase): # verify that base_request passes timeout arg on to urlopen body = {"some": "content"} - class FakeConn(object): - def read(self): - return json.dumps(body) - - def info(self): - return {} - for timeout in (0.0, 42.0, None): mocked_func = 'swift.common.internal_client.urllib2.urlopen' with mock.patch(mocked_func) as mock_urlopen: - mock_urlopen.side_effect = [FakeConn()] + mock_urlopen.side_effect = [FakeConn(body)] sc = internal_client.SimpleClient('http://0.0.0.0/') _, resp_body = sc.base_request('GET', timeout=timeout) mock_urlopen.assert_called_once_with(mock.ANY, timeout=timeout) @@ -361,23 +367,21 @@ class TestInternalClient(unittest.TestCase): body2 = [{'name': 'd'}] body3 = [] - class FakeConn(object): - def __init__(self, body): - self.body = body - - def read(self): - return json.dumps(self.body) - - def info(self): - return {} - mocked_func = 'swift.common.internal_client.urllib2.urlopen' with mock.patch(mocked_func) as mock_urlopen: mock_urlopen.side_effect = [ FakeConn(body1), FakeConn(body2), FakeConn(body3)] sc = internal_client.SimpleClient('http://0.0.0.0/') _, resp_body = sc.base_request('GET', full_listing=True) - self.assertEqual(body1 + body2, resp_body) + self.assertEqual(body1 + body2, resp_body) + self.assertEqual(3, mock_urlopen.call_count) + actual_requests = map( + lambda call: call[0][0], mock_urlopen.call_args_list) + self.assertEqual('/?format=json', actual_requests[0].get_selector()) + self.assertEqual( + '/?format=json&marker=c', actual_requests[1].get_selector()) + self.assertEqual( + '/?format=json&marker=d', actual_requests[2].get_selector()) def test_make_request_method_path_headers(self): class InternalClient(internal_client.InternalClient): @@ -1415,14 +1419,6 @@ class TestSimpleClient(unittest.TestCase): proxy = '%s://%s' % (scheme, proxy_host) url = 'https://127.0.0.1:1/a' - class FakeConn(object): - - def read(self): - return 'irrelevant' - - def info(self): - return {} - mocked = 'swift.common.internal_client.urllib2.urlopen' # module level methods From 3407d737c705a7afedeed0159588ab4433a601f3 Mon Sep 17 00:00:00 2001 From: David Liu Date: Thu, 24 Mar 2016 16:08:19 +0800 Subject: [PATCH 060/141] Handle tempurl Content-Disposition header missing from HEAD Content-Disposition headers should make no difference between GET and HEAD according to HTTP rfc. Closes-Bug: #1539805 Change-Id: Ifa41a7cda2f321eb8e36420ede7912ed0a549712 --- swift/common/middleware/tempurl.py | 2 +- test/unit/common/middleware/test_tempurl.py | 25 +++++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/swift/common/middleware/tempurl.py b/swift/common/middleware/tempurl.py index 7fece15b34..234820791c 100644 --- a/swift/common/middleware/tempurl.py +++ b/swift/common/middleware/tempurl.py @@ -400,7 +400,7 @@ class TempURL(object): def _start_response(status, headers, exc_info=None): headers = self._clean_outgoing_headers(headers) - if env['REQUEST_METHOD'] == 'GET' and status[0] == '2': + if env['REQUEST_METHOD'] in ('GET', 'HEAD') and status[0] == '2': # figure out the right value for content-disposition # 1) use the value from the query string # 2) use the value from the object metadata diff --git a/test/unit/common/middleware/test_tempurl.py b/test/unit/common/middleware/test_tempurl.py index d407ba58b1..0fc895f9e2 100644 --- a/test/unit/common/middleware/test_tempurl.py +++ b/test/unit/common/middleware/test_tempurl.py @@ -215,6 +215,31 @@ class TestTempURL(unittest.TestCase): resp = req.get_response(self.tempurl) self.assertEqual(resp.status_int, 200) + def test_head_and_get_headers_match(self): + method = 'HEAD' + expires = int(time() + 86400) + path = '/v1/a/c/o' + key = 'abc' + hmac_body = '%s\n%s\n%s' % (method, expires, path) + sig = hmac.new(key, hmac_body, sha1).hexdigest() + req = self._make_request(path, keys=[key], environ={ + 'REQUEST_METHOD': 'HEAD', + 'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' + % (sig, expires)}) + self.tempurl.app = FakeApp(iter([('200 Ok', (), '123')])) + resp = req.get_response(self.tempurl) + + get_method = 'GET' + get_hmac_body = '%s\n%s\n%s' % (get_method, expires, path) + get_sig = hmac.new(key, get_hmac_body, sha1).hexdigest() + get_req = self._make_request(path, keys=[key], environ={ + 'REQUEST_METHOD': 'GET', + 'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' + % (get_sig, expires)}) + self.tempurl.app = FakeApp(iter([('200 Ok', (), '123')])) + get_resp = get_req.get_response(self.tempurl) + self.assertEqual(resp.headers, get_resp.headers) + def test_get_valid_with_filename_and_inline(self): method = 'GET' expires = int(time() + 86400) From e97c4f794db7292a91135adb3fc3e1d6cce4dc84 Mon Sep 17 00:00:00 2001 From: Janie Richling Date: Sat, 16 Jan 2016 21:59:20 -0600 Subject: [PATCH 061/141] swift-[account|container]-info when disk is full Extended the use of the DatabaseBroker "stale_reads_ok" flag to the AccountBroker and ContainerBroker. Now checks for an sqlite3 error from the _commit_puts call that processes the pending files. If this error is raised, then the stale_reads_ok flag will be checked to determine how to proceed as opposed to simply raising. The first time that print_info is attempted, the flag will be false, but swift-[account|container]-info will check for the raised exception. If it was raised, then a warning is reported that the data may be stale, and another attempt will be made using the stale_reads_ok=True flag. Change-Id: I761526eef62327888c865d87a9caafa3e7eabab6 Closes-Bug: 1531302 --- bin/swift-account-info | 21 +++++-- bin/swift-container-info | 21 +++++-- swift/cli/info.py | 6 +- swift/common/db.py | 2 +- test/unit/account/test_backend.py | 92 +++++++++++++++++++++-------- test/unit/container/test_backend.py | 57 ++++++++++++++++++ 6 files changed, 164 insertions(+), 35 deletions(-) diff --git a/bin/swift-account-info b/bin/swift-account-info index 61c619900c..4d14ec1ebc 100755 --- a/bin/swift-account-info +++ b/bin/swift-account-info @@ -11,12 +11,28 @@ # License for the specific language governing permissions and limitations # under the License. +import sqlite3 import sys from optparse import OptionParser from swift.cli.info import print_info, InfoSystemExit +def run_print_info(args, opts): + try: + print_info('account', *args, **opts) + except InfoSystemExit: + sys.exit(1) + except sqlite3.OperationalError as e: + if not opts.get('stale_reads_ok'): + opts['stale_reads_ok'] = True + print('Warning: Possibly Stale Data') + run_print_info(args, opts) + sys.exit(2) + else: + print('Account info failed: %s' % e) + sys.exit(1) + if __name__ == '__main__': parser = OptionParser('%prog [options] ACCOUNT_DB_FILE') parser.add_option( @@ -28,7 +44,4 @@ if __name__ == '__main__': if len(args) != 1: sys.exit(parser.print_help()) - try: - print_info('account', *args, **vars(options)) - except InfoSystemExit: - sys.exit(1) + run_print_info(args, vars(options)) diff --git a/bin/swift-container-info b/bin/swift-container-info index 8074b22ccd..7ac09ba67e 100755 --- a/bin/swift-container-info +++ b/bin/swift-container-info @@ -11,12 +11,28 @@ # License for the specific language governing permissions and limitations # under the License. +import sqlite3 import sys from optparse import OptionParser from swift.cli.info import print_info, InfoSystemExit +def run_print_info(args, opts): + try: + print_info('container', *args, **opts) + except InfoSystemExit: + sys.exit(1) + except sqlite3.OperationalError as e: + if not opts.get('stale_reads_ok'): + opts['stale_reads_ok'] = True + print('Warning: Possibly Stale Data') + run_print_info(args, opts) + sys.exit(2) + else: + print('Container info failed: %s' % e) + sys.exit(1) + if __name__ == '__main__': parser = OptionParser('%prog [options] CONTAINER_DB_FILE') parser.add_option( @@ -28,7 +44,4 @@ if __name__ == '__main__': if len(args) != 1: sys.exit(parser.print_help()) - try: - print_info('container', *args, **vars(options)) - except InfoSystemExit: - sys.exit(1) + run_print_info(args, vars(options)) diff --git a/swift/cli/info.py b/swift/cli/info.py index ba02cfd25a..c5dda9405c 100644 --- a/swift/cli/info.py +++ b/swift/cli/info.py @@ -308,7 +308,7 @@ def print_obj_metadata(metadata): print_metadata('Other Metadata:', other_metadata) -def print_info(db_type, db_file, swift_dir='/etc/swift'): +def print_info(db_type, db_file, swift_dir='/etc/swift', stale_reads_ok=False): if db_type not in ('account', 'container'): print("Unrecognized DB type: internal error") raise InfoSystemExit() @@ -318,10 +318,10 @@ def print_info(db_type, db_file, swift_dir='/etc/swift'): if not db_file.startswith(('/', './')): db_file = './' + db_file # don't break if the bare db file is given if db_type == 'account': - broker = AccountBroker(db_file) + broker = AccountBroker(db_file, stale_reads_ok=stale_reads_ok) datadir = ABDATADIR else: - broker = ContainerBroker(db_file) + broker = ContainerBroker(db_file, stale_reads_ok=stale_reads_ok) datadir = CBDATADIR try: info = broker.get_info() diff --git a/swift/common/db.py b/swift/common/db.py index cead803375..195d952dc8 100644 --- a/swift/common/db.py +++ b/swift/common/db.py @@ -628,7 +628,7 @@ class DatabaseBroker(object): with lock_parent_directory(self.pending_file, self.pending_timeout): self._commit_puts() - except LockTimeout: + except (LockTimeout, sqlite3.OperationalError): if not self.stale_reads_ok: raise diff --git a/test/unit/account/test_backend.py b/test/unit/account/test_backend.py index ebc0ebfca2..28d649987c 100644 --- a/test/unit/account/test_backend.py +++ b/test/unit/account/test_backend.py @@ -793,33 +793,79 @@ class TestAccountBroker(unittest.TestCase): self.assertEqual(items_by_name['b']['object_count'], 0) self.assertEqual(items_by_name['b']['bytes_used'], 0) - def test_load_old_pending_puts(self): + @with_tempdir + def test_load_old_pending_puts(self, tempdir): # pending puts from pre-storage-policy account brokers won't contain # the storage policy index - tempdir = mkdtemp() broker_path = os.path.join(tempdir, 'test-load-old.db') - try: - broker = AccountBroker(broker_path, account='real') - broker.initialize(Timestamp(1).internal) - with open(broker_path + '.pending', 'a+b') as pending: - pending.write(':') - pending.write(pickle.dumps( - # name, put_timestamp, delete_timestamp, object_count, - # bytes_used, deleted - ('oldcon', Timestamp(200).internal, - Timestamp(0).internal, - 896, 9216695, 0)).encode('base64')) + broker = AccountBroker(broker_path, account='real') + broker.initialize(Timestamp(1).internal) + with open(broker.pending_file, 'a+b') as pending: + pending.write(':') + pending.write(pickle.dumps( + # name, put_timestamp, delete_timestamp, object_count, + # bytes_used, deleted + ('oldcon', Timestamp(200).internal, + Timestamp(0).internal, + 896, 9216695, 0)).encode('base64')) - broker._commit_puts() - with broker.get() as conn: - results = list(conn.execute(''' - SELECT name, storage_policy_index FROM container - ''')) - self.assertEqual(len(results), 1) - self.assertEqual(dict(results[0]), - {'name': 'oldcon', 'storage_policy_index': 0}) - finally: - rmtree(tempdir) + broker._commit_puts() + with broker.get() as conn: + results = list(conn.execute(''' + SELECT name, storage_policy_index FROM container + ''')) + self.assertEqual(len(results), 1) + self.assertEqual(dict(results[0]), + {'name': 'oldcon', 'storage_policy_index': 0}) + + @with_tempdir + def test_get_info_stale_read_ok(self, tempdir): + # test getting a stale read from the db + broker_path = os.path.join(tempdir, 'test-load-old.db') + + def mock_commit_puts(): + raise sqlite3.OperationalError('unable to open database file') + + broker = AccountBroker(broker_path, account='real', + stale_reads_ok=True) + broker.initialize(Timestamp(1).internal) + with open(broker.pending_file, 'a+b') as pending: + pending.write(':') + pending.write(pickle.dumps( + # name, put_timestamp, delete_timestamp, object_count, + # bytes_used, deleted + ('oldcon', Timestamp(200).internal, + Timestamp(0).internal, + 896, 9216695, 0)).encode('base64')) + + broker._commit_puts = mock_commit_puts + broker.get_info() + + @with_tempdir + def test_get_info_no_stale_reads(self, tempdir): + broker_path = os.path.join(tempdir, 'test-load-old.db') + + def mock_commit_puts(): + raise sqlite3.OperationalError('unable to open database file') + + broker = AccountBroker(broker_path, account='real', + stale_reads_ok=False) + broker.initialize(Timestamp(1).internal) + with open(broker.pending_file, 'a+b') as pending: + pending.write(':') + pending.write(pickle.dumps( + # name, put_timestamp, delete_timestamp, object_count, + # bytes_used, deleted + ('oldcon', Timestamp(200).internal, + Timestamp(0).internal, + 896, 9216695, 0)).encode('base64')) + + broker._commit_puts = mock_commit_puts + + with self.assertRaises(sqlite3.OperationalError) as exc_context: + broker.get_info() + self.assertIn('unable to open database file', + str(exc_context.exception)) @patch_policies([StoragePolicy(0, 'zero', False), StoragePolicy(1, 'one', True), diff --git a/test/unit/container/test_backend.py b/test/unit/container/test_backend.py index 721f0f9094..22b70294ee 100644 --- a/test/unit/container/test_backend.py +++ b/test/unit/container/test_backend.py @@ -1679,6 +1679,63 @@ class TestContainerBroker(unittest.TestCase): } self.assertEqual(broker.get_policy_stats(), expected) + @with_tempdir + def test_get_info_no_stale_reads(self, tempdir): + ts = (Timestamp(t).internal for t in + itertools.count(int(time()))) + db_path = os.path.join(tempdir, 'container.db') + + def mock_commit_puts(): + raise sqlite3.OperationalError('unable to open database file') + + broker = ContainerBroker(db_path, account='a', container='c', + stale_reads_ok=False) + broker.initialize(next(ts), 1) + + # manually make some pending entries + with open(broker.pending_file, 'a+b') as fp: + for i in range(10): + name, timestamp, size, content_type, etag, deleted = ( + 'o%s' % i, next(ts), 0, 'c', 'e', 0) + fp.write(':') + fp.write(pickle.dumps( + (name, timestamp, size, content_type, etag, deleted), + protocol=2).encode('base64')) + fp.flush() + + broker._commit_puts = mock_commit_puts + with self.assertRaises(sqlite3.OperationalError) as exc_context: + broker.get_info() + self.assertIn('unable to open database file', + str(exc_context.exception)) + + @with_tempdir + def test_get_info_stale_read_ok(self, tempdir): + ts = (Timestamp(t).internal for t in + itertools.count(int(time()))) + db_path = os.path.join(tempdir, 'container.db') + + def mock_commit_puts(): + raise sqlite3.OperationalError('unable to open database file') + + broker = ContainerBroker(db_path, account='a', container='c', + stale_reads_ok=True) + broker.initialize(next(ts), 1) + + # manually make some pending entries + with open(broker.pending_file, 'a+b') as fp: + for i in range(10): + name, timestamp, size, content_type, etag, deleted = ( + 'o%s' % i, next(ts), 0, 'c', 'e', 0) + fp.write(':') + fp.write(pickle.dumps( + (name, timestamp, size, content_type, etag, deleted), + protocol=2).encode('base64')) + fp.flush() + + broker._commit_puts = mock_commit_puts + broker.get_info() + class TestCommonContainerBroker(test_db.TestExampleBroker): From 925546ae8a211b50cf7fad6634d47fd1dbfeb58e Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 25 Mar 2016 06:36:40 +0000 Subject: [PATCH 062/141] Imported Translations from Zanata For more information about this automatic import see: https://wiki.openstack.org/wiki/Translations/Infrastructure Change-Id: I6ba2f35913e6ae83607b5e268645432d455d587c --- swift/locale/de/LC_MESSAGES/swift.po | 19 +++- swift/locale/ja/LC_MESSAGES/swift.po | 115 +++++++++++++++++++++++- swift/locale/zh_TW/LC_MESSAGES/swift.po | 18 +++- 3 files changed, 143 insertions(+), 9 deletions(-) diff --git a/swift/locale/de/LC_MESSAGES/swift.po b/swift/locale/de/LC_MESSAGES/swift.po index 78184becd9..26bf545973 100644 --- a/swift/locale/de/LC_MESSAGES/swift.po +++ b/swift/locale/de/LC_MESSAGES/swift.po @@ -7,15 +7,16 @@ # Ettore Atalan , 2014-2015 # Jonas John , 2015 # Frank Kloeker , 2016. #zanata +# Monika Wolf , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev235\n" +"Project-Id-Version: swift 2.6.1.dev268\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-18 23:11+0000\n" +"POT-Creation-Date: 2016-03-24 22:25+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-20 07:32+0000\n" +"PO-Revision-Date: 2016-03-24 03:15+0000\n" "Last-Translator: Monika Wolf \n" "Language: de\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" @@ -227,6 +228,14 @@ msgstr "Kann nicht auf die Datei %s zugreifen." msgid "Can not load profile data from %s." msgstr "Die Profildaten von %s können nicht geladen werden." +#, python-format +msgid "Cannot read %s (%s)" +msgstr "%s (%s) kann nicht gelesen werden." + +#, python-format +msgid "Cannot write %s (%s)" +msgstr "Schreiben von %s (%s) nicht möglich." + #, python-format msgid "Client did not read from proxy within %ss" msgstr "Client konnte nicht innerhalb von %ss vom Proxy lesen" @@ -692,6 +701,10 @@ msgstr "" msgid "Killing long-running rsync: %s" msgstr "Lange laufendes rsync wird gekillt: %s" +#, python-format +msgid "Loading JSON from %s failed (%s)" +msgstr "Laden von JSON aus %s fehlgeschlagen: (%s)" + msgid "Lockup detected.. killing live coros." msgstr "Suche erkannt. Live-Coros werden gelöscht." diff --git a/swift/locale/ja/LC_MESSAGES/swift.po b/swift/locale/ja/LC_MESSAGES/swift.po index 1922a46b16..f5ff3b4bc3 100644 --- a/swift/locale/ja/LC_MESSAGES/swift.po +++ b/swift/locale/ja/LC_MESSAGES/swift.po @@ -10,13 +10,13 @@ # 笹原 昌美 , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev254\n" +"Project-Id-Version: swift 2.6.1.dev268\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-22 19:48+0000\n" +"POT-Creation-Date: 2016-03-24 22:25+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-23 02:20+0000\n" +"PO-Revision-Date: 2016-03-25 06:32+0000\n" "Last-Translator: 笹原 昌美 \n" "Language: ja\n" "Plural-Forms: nplurals=1; plural=0;\n" @@ -54,6 +54,16 @@ msgstr "%(ip)s/%(device)s はアンマウントとして応答しました" msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" +#, python-format +msgid "" +"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of %(device)d/" +"%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in %(time).2fs " +"(%(rate).2f/sec, %(remaining)s remaining)" +msgstr "" +"%(device)d/%(dtotal)d (%(dpercentage).2f%%) デバイスの %(reconstructed)d/" +"%(total)d (%(percentage).2f%%) パーティションが %(time).2fs で再構成されまし" +"た (%(rate).2f/秒、残り %(remaining)s)" + #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " @@ -210,6 +220,10 @@ msgstr "ファイル %s にアクセスできません。" msgid "Can not load profile data from %s." msgstr "プロファイルデータを %s からロードできません。" +#, python-format +msgid "Cannot read %s (%s)" +msgstr "%s を読み取ることができません (%s)" + #, python-format msgid "Client did not read from proxy within %ss" msgstr "クライアントは %s 内のプロキシーからの読み取りを行いませんでした" @@ -227,6 +241,14 @@ msgstr "" "クライアントパス %(client)s はオブジェクトメタデータ %(meta)s に保管されたパ" "スに一致しません" +msgid "" +"Configuration option internal_client_conf_path not defined. Using default " +"configuration, See internal-client.conf-sample for options" +msgstr "" +"設定オプション internal_client_conf_path が定義されていません。デフォルト設定" +"を使用しています。オプションについては internal-client.conf-sample を参照して" +"ください" + msgid "Connection refused" msgstr "接続が拒否されました" @@ -284,6 +306,10 @@ msgstr "データダウンロードエラー: %s" msgid "Devices pass completed: %.02fs" msgstr "デバイスパスが完了しました: %.02fs" +#, python-format +msgid "Directory %r does not map to a valid policy (%s)" +msgstr "ディレクトリー %r は有効なポリシーにマップしていません (%s) " + #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "エラー %(db_file)s: %(validate_sync_to_err)s" @@ -534,6 +560,12 @@ msgstr "パーティションとの同期エラー" msgid "Error syncing with node: %s" msgstr "ノードとの同期エラー: %s" +#, python-format +msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" +msgstr "" +"%(path)s の再構築を試行中にエラーが発生しました。ポリシー #%(policy)d フラグ" +"メント #%(frag_index)s" + msgid "Error: An error occurred" msgstr "エラー: エラーが発生しました" @@ -610,6 +642,10 @@ msgstr "無効なホスト %r が X-Container-Sync-To にあります" msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "無効な保留中項目 %(file)s: %(entry)s" +#, python-format +msgid "Invalid response %(resp)s from %(full_path)s" +msgstr "%(full_path)s からの応答 %(resp)s が無効です" + #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "%(ip)s からの応答 %(resp)s が無効です" @@ -626,6 +662,10 @@ msgstr "" msgid "Killing long-running rsync: %s" msgstr "長期実行の再同期を強制終了中: %s" +#, python-format +msgid "Loading JSON from %s failed (%s)" +msgstr "%s からの JSON のロードが失敗しました (%s)" + msgid "Lockup detected.. killing live coros." msgstr "ロックが検出されました.. ライブ coros を強制終了中" @@ -645,10 +685,18 @@ msgstr "%r %r のエンドポイントクラスターがありません" msgid "No permission to signal PID %d" msgstr "PID %d にシグナル通知する許可がありません" +#, python-format +msgid "No policy with index %s" +msgstr "インデックス %s のポリシーはありません" + #, python-format msgid "No realm key for %r" msgstr "%r のレルムキーがありません" +#, python-format +msgid "No space left on device for %s (%s)" +msgstr "%s 用のデバイス容量が残っていません (%s)" + #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "ノードエラー制限 %(ip)s:%(port)s (%(device)s)" @@ -661,6 +709,10 @@ msgstr "" "不検出 %(sync_from)r => %(sync_to)r - オブジェクト " "%(obj_name)r" +#, python-format +msgid "Nothing reconstructed for %s seconds." +msgstr "%s 秒間で何も再構成されませんでした。" + #, python-format msgid "Nothing replicated for %s seconds." msgstr "%s 秒間で何も複製されませんでした。" @@ -692,6 +744,18 @@ msgstr "" "済み: %(quars)d、合計エラー: %(errors)d、合計ファイル/秒: %(frate).2f、合計バ" "イト/秒: %(brate).2f、監査時間: %(audit).2f、率: %(audit_rate).2f" +#, python-format +msgid "" +"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " +"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " +"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " +"%(audit_rate).2f" +msgstr "" +"オブジェクト監査 (%(type)s)。%(start_time)s 以降: ローカル: 合格した監査 " +"%(passes)d、検疫済み %(quars)d、エラー %(errors)d、ファイル/秒: %(frate).2f、" +"バイト/秒: %(brate).2f、合計時間: %(total).2f、監査時間: %(audit).2f、率: " +"%(audit_rate).2f" + #, python-format msgid "Object audit stats: %s" msgstr "オブジェクト監査統計: %s" @@ -757,6 +821,14 @@ msgstr "X-Container-Sync-To にパスが必要です" msgid "Problem cleaning up %s" msgstr "%s のクリーンアップ中に問題が発生しました" +#, python-format +msgid "Problem cleaning up %s (%s)" +msgstr "%s のクリーンアップ中に問題が発生しました (%s)" + +#, python-format +msgid "Problem writing durable state file %s (%s)" +msgstr "永続状態ファイル %s の書き込み中に問題が発生しました (%s)" + #, python-format msgid "Profiling Error: %s" msgstr "プロファイル作成エラー: %s" @@ -797,6 +869,14 @@ msgstr "%s オブジェクトの削除中" msgid "Removing partition: %s" msgstr "パーティションの削除中: %s" +#, python-format +msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" +msgstr "正しくない pid %(pid)d の pid ファイル %(pid_file)s を削除中" + +#, python-format +msgid "Removing pid file %s with invalid pid" +msgstr "無効な pid の pid ファイル %s を削除中" + #, python-format msgid "Removing stale pid file %s" msgstr "失効した pid ファイル %s を削除中" @@ -898,6 +978,10 @@ msgstr "%(ip)s:%(port)s/%(device)s のタイムアウト例外" msgid "Trying to %(method)s %(path)s" msgstr "%(method)s %(path)s を試行中" +#, python-format +msgid "Trying to GET %(full_path)s" +msgstr "GET %(full_path)s を試行中" + #, python-format msgid "Trying to get final status of PUT to %s" msgstr "%s への PUT の最終状況の取得を試行中" @@ -911,6 +995,10 @@ msgstr "GET 時に読み取りを試行中 (再試行中)" msgid "Trying to send to client" msgstr "クライアントへの送信を試行中" +#, python-format +msgid "Trying to sync suffixes with %s" +msgstr "%s でサフィックスの同期を試行中" + #, python-format msgid "Trying to write to %s" msgstr "%s への書き込みを試行中" @@ -922,10 +1010,22 @@ msgstr "キャッチされていない例外" msgid "Unable to find %s config section in %s" msgstr "%s 構成セクションが %s に見つかりません" +#, python-format +msgid "Unable to load internal client from config: %r (%s)" +msgstr "設定から内部クライアントをロードできません: %r (%s)" + #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "%s が libc に見つかりません。no-op として終了します。" +#, python-format +msgid "Unable to locate config for %s" +msgstr "%s の設定が見つかりません" + +#, python-format +msgid "Unable to locate config number %s for %s" +msgstr "%s の設定番号 %s が見つかりません" + msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" @@ -950,6 +1050,11 @@ msgstr "予期しない応答: %s" msgid "Unhandled exception" msgstr "未処理例外" +#, python-format +msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" +msgstr "" +"GET を試行中に不明な例外が発生しました: %(account)r %(container)r %(object)r" + #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "%(container)s %(dbfile)s に関する更新レポートが失敗しました" @@ -978,6 +1083,10 @@ msgstr "警告: メモリー制限を変更できません。非ルートとし msgid "Waited %s seconds for %s to die; giving up" msgstr "%s 秒間、%s の停止を待機しました。中止します" +#, python-format +msgid "Waited %s seconds for %s to die; killing" +msgstr "%s 秒間、%s の停止を待機しました。強制終了します" + msgid "Warning: Cannot ratelimit without a memcached client" msgstr "警告: memcached クライアントなしで ratelimit を行うことはできません" diff --git a/swift/locale/zh_TW/LC_MESSAGES/swift.po b/swift/locale/zh_TW/LC_MESSAGES/swift.po index 03c9083403..46ee202a44 100644 --- a/swift/locale/zh_TW/LC_MESSAGES/swift.po +++ b/swift/locale/zh_TW/LC_MESSAGES/swift.po @@ -8,13 +8,13 @@ # Jennifer , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev235\n" +"Project-Id-Version: swift 2.6.1.dev268\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-18 23:11+0000\n" +"POT-Creation-Date: 2016-03-24 22:25+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-19 12:55+0000\n" +"PO-Revision-Date: 2016-03-24 01:54+0000\n" "Last-Translator: Jennifer \n" "Language: zh-TW\n" "Plural-Forms: nplurals=1; plural=0;\n" @@ -221,6 +221,14 @@ msgstr "無法存取檔案 %s。" msgid "Can not load profile data from %s." msgstr "無法從 %s 中載入設定檔資料。" +#, python-format +msgid "Cannot read %s (%s)" +msgstr "無法讀取 %s (%s)" + +#, python-format +msgid "Cannot write %s (%s)" +msgstr "無法寫入 %s (%s)" + #, python-format msgid "Client did not read from proxy within %ss" msgstr "用戶端未在 %s 秒內從 Proxy 中讀取" @@ -655,6 +663,10 @@ msgstr "" msgid "Killing long-running rsync: %s" msgstr "正在結束長時間執行的遠端同步:%s" +#, python-format +msgid "Loading JSON from %s failed (%s)" +msgstr "從 %s 載入 JSON 失敗 (%s)" + msgid "Lockup detected.. killing live coros." msgstr "偵測到鎖定。正在結束即時 coro。" From 2f7d0f4a2ad2da7e6a35e5b054a47a2fafe5ed01 Mon Sep 17 00:00:00 2001 From: Anh Tran Date: Fri, 25 Mar 2016 11:44:26 +0700 Subject: [PATCH 063/141] Removing some redundant words This patch removes some redundant words. Change-Id: Ia79717664b06ed9a41c3c5dcf1a25e9e49e21cf2 --- doc/source/ops_runbook/diagnose.rst | 2 +- etc/container-sync-realms.conf-sample | 2 +- swift/container/backend.py | 2 +- swift/obj/reconstructor.py | 2 +- test/unit/common/ring/test_builder.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/source/ops_runbook/diagnose.rst b/doc/source/ops_runbook/diagnose.rst index 629cf4881e..9ecb0ffa62 100644 --- a/doc/source/ops_runbook/diagnose.rst +++ b/doc/source/ops_runbook/diagnose.rst @@ -874,7 +874,7 @@ cycle of 15-20 hours can occur if nodes are added to the system and a new ring has been deployed. You can further check if the object replicator is stuck by logging on -the the object server and checking the object replicator progress with +the object server and checking the object replicator progress with the following command: .. code:: diff --git a/etc/container-sync-realms.conf-sample b/etc/container-sync-realms.conf-sample index 29de0eb44d..19ce21c18c 100644 --- a/etc/container-sync-realms.conf-sample +++ b/etc/container-sync-realms.conf-sample @@ -40,7 +40,7 @@ # The endpoint is what the container sync daemon will use when sending out # requests to that cluster. Keep in mind this endpoint must be reachable by all # container servers, since that is where the container sync daemon runs. Note -# the the endpoint ends with /v1/ and that the container sync daemon will then +# that the endpoint ends with /v1/ and that the container sync daemon will then # add the account/container/obj name after that. # # Distribute this container-sync-realms.conf file to all your proxy servers diff --git a/swift/container/backend.py b/swift/container/backend.py index a382cc7ed7..977ae7a79f 100644 --- a/swift/container/backend.py +++ b/swift/container/backend.py @@ -146,7 +146,7 @@ def update_new_item_from_existing(new_item, existing): their timestamps are newer. The multiple timestamps are encoded into a single string for storing - in the 'created_at' column of the the objects db table. + in the 'created_at' column of the objects db table. :param new_item: A dict of object update attributes :param existing: A dict of existing object attributes diff --git a/swift/obj/reconstructor.py b/swift/obj/reconstructor.py index e2ad368344..e41c478c0a 100644 --- a/swift/obj/reconstructor.py +++ b/swift/obj/reconstructor.py @@ -68,7 +68,7 @@ def _get_partners(frag_index, part_nodes): class RebuildingECDiskFileStream(object): """ - This class wraps the the reconstructed fragment archive data and + This class wraps the reconstructed fragment archive data and metadata in the DiskFile interface for ssync. """ diff --git a/test/unit/common/ring/test_builder.py b/test/unit/common/ring/test_builder.py index c8c5023a30..c858c51977 100644 --- a/test/unit/common/ring/test_builder.py +++ b/test/unit/common/ring/test_builder.py @@ -47,7 +47,7 @@ class TestRingBuilder(unittest.TestCase): def _partition_counts(self, builder, key='id'): """ Returns a dictionary mapping the given device key to (number of - partitions assigned to to that key). + partitions assigned to that key). """ counts = defaultdict(int) for part2dev_id in builder._replica2part2dev: From 7402d7d9cf91ed00a9c86fc1511592ac2eed5d6c Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Thu, 24 Mar 2016 16:42:21 -0700 Subject: [PATCH 064/141] Shutdown backend EC connection contexts on disconnect When eventlet.wsgi closes an ECAppIter on client disconnect we need to make sure our sub-iterators are also closed. We already kill the backend sockets, but the executing contexts are left running until they timeout. A slow client can result in needlessly holding queued backend fragments until the client_timeout (default 60s). Update associated test that exposed the problem to demonstrate the issue more quickly. Change-Id: Ibbc89449e7878fc4215e47e3f7dfe4ae58a2d638 --- swift/proxy/controllers/base.py | 4 ++++ swift/proxy/controllers/obj.py | 5 +++++ test/unit/proxy/test_server.py | 33 +++++++++++++++++---------------- 3 files changed, 26 insertions(+), 16 deletions(-) diff --git a/swift/proxy/controllers/base.py b/swift/proxy/controllers/base.py index 3bebd7f52b..7dcc1ca3de 100644 --- a/swift/proxy/controllers/base.py +++ b/swift/proxy/controllers/base.py @@ -926,6 +926,7 @@ class ResumingGetter(object): if nchunks % 5 == 0: sleep() + part_iter = None try: while True: start_byte, end_byte, length, headers, part = \ @@ -939,6 +940,9 @@ class ResumingGetter(object): self.pop_range() except StopIteration: req.environ['swift.non_client_disconnect'] = True + finally: + if part_iter: + part_iter.close() except ChunkReadTimeout: self.app.exception_occurred(node[0], _('Object'), diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py index cda2546c69..b4c01bbb6e 100644 --- a/swift/proxy/controllers/obj.py +++ b/swift/proxy/controllers/obj.py @@ -1118,6 +1118,11 @@ class ECAppIter(object): self.stashed_iter = None def close(self): + # close down the stashed iter first so the ContextPool can + # cleanup the frag queue feeding coros that may be currently + # executing the internal_parts_iters. + if self.stashed_iter: + self.stashed_iter.close() for it in self.internal_parts_iters: close_if_possible(it) diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 69d071a284..631551d337 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -5827,23 +5827,24 @@ class TestObjectController(unittest.TestCase): exp = 'HTTP/1.1 201' self.assertEqual(headers[:len(exp)], exp) - # get object - fd.write('GET /v1/a/ec-discon/test HTTP/1.1\r\n' - 'Host: localhost\r\n' - 'Connection: close\r\n' - 'X-Storage-Token: t\r\n' - '\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 200' - self.assertEqual(headers[:len(exp)], exp) + with mock.patch.object(_test_servers[0], 'client_timeout', new=5): + # get object + fd.write('GET /v1/a/ec-discon/test HTTP/1.1\r\n' + 'Host: localhost\r\n' + 'Connection: close\r\n' + 'X-Storage-Token: t\r\n' + '\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 200' + self.assertEqual(headers[:len(exp)], exp) - # read most of the object, and disconnect - fd.read(10) - sock.fd._sock.close() - condition = \ - lambda: _test_servers[0].logger.get_lines_for_level('warning') - self._sleep_enough(condition) + # read most of the object, and disconnect + fd.read(10) + sock.fd._sock.close() + condition = \ + lambda: _test_servers[0].logger.get_lines_for_level('warning') + self._sleep_enough(condition) # check for disconnect message! expected = ['Client disconnected on read'] * 2 From 5902015fa8495ec0ef3c1ab92ae9a34c5bda4334 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Sat, 26 Mar 2016 06:35:18 +0000 Subject: [PATCH 065/141] Imported Translations from Zanata For more information about this automatic import see: https://wiki.openstack.org/wiki/Translations/Infrastructure Change-Id: I3b5d401649fa3dea6dc43654516f7075bb06ee0d --- swift/locale/fr/LC_MESSAGES/swift.po | 25 ++++++++++-- swift/locale/ja/LC_MESSAGES/swift.po | 59 ++++++++++++++++++++++++++-- 2 files changed, 77 insertions(+), 7 deletions(-) diff --git a/swift/locale/fr/LC_MESSAGES/swift.po b/swift/locale/fr/LC_MESSAGES/swift.po index 744f584c54..0e7868a728 100644 --- a/swift/locale/fr/LC_MESSAGES/swift.po +++ b/swift/locale/fr/LC_MESSAGES/swift.po @@ -10,13 +10,13 @@ # Gael Rehault , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev235\n" +"Project-Id-Version: swift 2.7.1.dev4\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-18 23:11+0000\n" +"POT-Creation-Date: 2016-03-25 11:23+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-18 03:55+0000\n" +"PO-Revision-Date: 2016-03-25 03:29+0000\n" "Last-Translator: Angelique Pillal \n" "Language: fr\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" @@ -230,6 +230,14 @@ msgstr "Ne peut pas accéder au fichier %s." msgid "Can not load profile data from %s." msgstr "Impossible de charger des données de profil depuis %s." +#, python-format +msgid "Cannot read %s (%s)" +msgstr "Impossible de lire %s (%s)" + +#, python-format +msgid "Cannot write %s (%s)" +msgstr "Impossible d'écrire %s (%s)" + #, python-format msgid "Client did not read from proxy within %ss" msgstr "Le client n'a pas lu les données du proxy en %s s" @@ -265,7 +273,7 @@ msgid "Connection timeout" msgstr "Dépassement du délai d'attente de connexion" msgid "Container" -msgstr "Containeur" +msgstr "Conteneur" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" @@ -699,6 +707,10 @@ msgstr "" msgid "Killing long-running rsync: %s" msgstr "Arrêt de l'opération Rsync à exécution longue : %s" +#, python-format +msgid "Loading JSON from %s failed (%s)" +msgstr "Echec du chargement du fichier JSON depuis %s (%s)" + msgid "Lockup detected.. killing live coros." msgstr "Blocage détecté. Arrêt des coroutines actives." @@ -926,6 +938,11 @@ msgstr "Suppression de %s objets" msgid "Removing partition: %s" msgstr "Suppression partition: %s" +#, python-format +msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" +msgstr "" +"Supression du fichier PID %(pid_file)s, comportant un PID incorrect %(pid)d" + #, python-format msgid "Removing pid file %s with invalid pid" msgstr "Suppression du fichier pid %s comportant un pid non valide" diff --git a/swift/locale/ja/LC_MESSAGES/swift.po b/swift/locale/ja/LC_MESSAGES/swift.po index f5ff3b4bc3..53fbce29ca 100644 --- a/swift/locale/ja/LC_MESSAGES/swift.po +++ b/swift/locale/ja/LC_MESSAGES/swift.po @@ -10,13 +10,13 @@ # 笹原 昌美 , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev268\n" +"Project-Id-Version: swift 2.7.1.dev4\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-24 22:25+0000\n" +"POT-Creation-Date: 2016-03-25 11:23+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-25 06:32+0000\n" +"PO-Revision-Date: 2016-03-25 07:46+0000\n" "Last-Translator: 笹原 昌美 \n" "Language: ja\n" "Plural-Forms: nplurals=1; plural=0;\n" @@ -100,6 +100,10 @@ msgstr "%s が存在しません" msgid "%s is not mounted" msgstr "%s がマウントされていません" +#, python-format +msgid "%s responded as unmounted" +msgstr "%s はアンマウントとして応答しました" + #, python-format msgid "%s running (%s - %s)" msgstr "%s が実行中 (%s - %s)" @@ -224,6 +228,10 @@ msgstr "プロファイルデータを %s からロードできません。" msgid "Cannot read %s (%s)" msgstr "%s を読み取ることができません (%s)" +#, python-format +msgid "Cannot write %s (%s)" +msgstr "%s を書き込むことができません (%s)" + #, python-format msgid "Client did not read from proxy within %ss" msgstr "クライアントは %s 内のプロキシーからの読み取りを行いませんでした" @@ -234,6 +242,9 @@ msgstr "クライアントが読み取り時に切断されました" msgid "Client disconnected without sending enough data" msgstr "十分なデータを送信せずにクライアントが切断されました" +msgid "Client disconnected without sending last chunk" +msgstr "最後のチャンクを送信せずにクライアントが切断されました" + #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" @@ -386,6 +397,10 @@ msgstr "" msgid "ERROR Exception causing client disconnect" msgstr "エラー: 例外によりクライアントが切断されています" +#, python-format +msgid "ERROR Exception transferring data to object servers %s" +msgstr "エラー: オブジェクトサーバー %s へのデータ転送で例外が発生しました" + msgid "ERROR Failed to get my own IPs?" msgstr "エラー: 自分の IP の取得に失敗?" @@ -585,6 +600,9 @@ msgstr "最上位アカウントリーパーループで例外が発生しまし msgid "Exception in top-level replication loop" msgstr "最上位複製ループで例外が発生しました" +msgid "Exception in top-levelreconstruction loop" +msgstr "最上位再構成ループで例外が発生しました" + #, python-format msgid "Exception while deleting container %s %s" msgstr "コンテナー %s %s の削除中に例外が発生しました" @@ -623,6 +641,13 @@ msgstr "%(given_domain)s から %(found_domain)s へ CNAME チェーンをフォ msgid "Found configs:" msgstr "構成が見つかりました:" +msgid "" +"Handoffs first mode still has handoffs remaining. Aborting current " +"replication pass." +msgstr "" +"ハンドオフのファーストモードにハンドオフが残っています。現行複製パスを打ち切" +"ります。" + msgid "Host unreachable" msgstr "ホストが到達不能です" @@ -701,6 +726,10 @@ msgstr "%s 用のデバイス容量が残っていません (%s)" msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "ノードエラー制限 %(ip)s:%(port)s (%(device)s)" +#, python-format +msgid "Not enough object servers ack'ed (got %d)" +msgstr "肯定応答を返したオブジェクト・サーバーが不十分です (%d 取得)" + #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " @@ -760,6 +789,14 @@ msgstr "" msgid "Object audit stats: %s" msgstr "オブジェクト監査統計: %s" +#, python-format +msgid "Object reconstruction complete (once). (%.02f minutes)" +msgstr "オブジェクト再構成が完了しました (1 回)。(%.02f 分)" + +#, python-format +msgid "Object reconstruction complete. (%.02f minutes)" +msgstr "オブジェクト再構成が完了しました。(%.02f 分)" + #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "オブジェクト複製が完了しました (1 回)。(%.02f 分)" @@ -896,6 +933,9 @@ msgstr "" "%(acc)s/%(cont)s/%(obj)s に対する %(meth)s に関して 498 を返しています。" "Ratelimit (最大スリープ) %(e)s" +msgid "Ring change detected. Aborting current reconstruction pass." +msgstr "リング変更が検出されました。現行再構成パスを打ち切ります。" + msgid "Ring change detected. Aborting current replication pass." msgstr "リング変更が検出されました。現行複製パスを打ち切ります。" @@ -903,6 +943,9 @@ msgstr "リング変更が検出されました。現行複製パスを打ち切 msgid "Running %s once" msgstr "%s を 1 回実行中" +msgid "Running object reconstructor in script mode." +msgstr "スクリプトモードでオブジェクトリコンストラクターを実行中です。" + msgid "Running object replicator in script mode." msgstr "スクリプトモードでオブジェクトレプリケーターを実行中です。" @@ -945,6 +988,12 @@ msgstr "マウントされていないため、 %s をスキップします" msgid "Starting %s" msgstr "%s を開始しています" +msgid "Starting object reconstruction pass." +msgstr "オブジェクト再構成パスを開始中です。" + +msgid "Starting object reconstructor in daemon mode." +msgstr "オブジェクトリコンストラクターをデーモンモードで開始中です。" + msgid "Starting object replication pass." msgstr "オブジェクト複製パスを開始中です。" @@ -982,6 +1031,10 @@ msgstr "%(method)s %(path)s を試行中" msgid "Trying to GET %(full_path)s" msgstr "GET %(full_path)s を試行中" +#, python-format +msgid "Trying to get %s status of PUT to %s" +msgstr "%s への PUT の状況 %s の取得を試行中" + #, python-format msgid "Trying to get final status of PUT to %s" msgstr "%s への PUT の最終状況の取得を試行中" From 7be55acf1bc4aa07d81b30fd93e144700889898d Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Thu, 11 Feb 2016 16:00:38 -0800 Subject: [PATCH 066/141] Simplify policy-name validation slightly _validate_policy_name always either returns True or raises an exception. Simplify it to just being a callable that may raise an exception. Also, move the check for blank/None names into _validate_policy_name, so it will be applied in more cases. Change-Id: I7832a0c9c895cd75ba4c6d0e8b5568a3c8a0ea25 --- swift/common/storage_policy.py | 23 ++++++++++------------- test/unit/common/test_storage_policy.py | 12 ++++++++++++ 2 files changed, 22 insertions(+), 13 deletions(-) diff --git a/swift/common/storage_policy.py b/swift/common/storage_policy.py index 90c8164197..19b9f26e77 100755 --- a/swift/common/storage_policy.py +++ b/swift/common/storage_policy.py @@ -170,16 +170,13 @@ class BaseStoragePolicy(object): if self.idx < 0: raise PolicyError('Invalid index', idx) self.alias_list = [] - if not name or not self._validate_policy_name(name): - raise PolicyError('Invalid name %r' % name, idx) - self.alias_list.append(name) + self.add_name(name) if aliases: names_list = list_from_csv(aliases) for alias in names_list: if alias == name: continue - self._validate_policy_name(alias) - self.alias_list.append(alias) + self.add_name(alias) self.is_deprecated = config_true_value(is_deprecated) self.is_default = config_true_value(is_default) if self.policy_type not in BaseStoragePolicy.policy_type_to_policy_cls: @@ -288,14 +285,16 @@ class BaseStoragePolicy(object): to check policy names before setting them. :param name: a name string for a single policy name. - :returns: true if the name is valid. :raises: PolicyError if the policy name is invalid. """ + if not name: + raise PolicyError('Invalid name %r' % name, self.idx) # this is defensively restrictive, but could be expanded in the future if not all(c in VALID_CHARS for c in name): - raise PolicyError('Names are used as HTTP headers, and can not ' - 'reliably contain any characters not in %r. ' - 'Invalid name %r' % (VALID_CHARS, name)) + msg = 'Names are used as HTTP headers, and can not ' \ + 'reliably contain any characters not in %r. ' \ + 'Invalid name %r' % (VALID_CHARS, name) + raise PolicyError(msg, self.idx) if name.upper() == LEGACY_POLICY_NAME.upper() and self.idx != 0: msg = 'The name %s is reserved for policy index 0. ' \ 'Invalid name %r' % (LEGACY_POLICY_NAME, name) @@ -305,8 +304,6 @@ class BaseStoragePolicy(object): msg = 'The name %s is already assigned to this policy.' % name raise PolicyError(msg, self.idx) - return True - def add_name(self, name): """ Adds an alias name to the storage policy. Shouldn't be called @@ -316,8 +313,8 @@ class BaseStoragePolicy(object): :param name: a new alias for the storage policy """ - if self._validate_policy_name(name): - self.alias_list.append(name) + self._validate_policy_name(name) + self.alias_list.append(name) def remove_name(self, name): """ diff --git a/test/unit/common/test_storage_policy.py b/test/unit/common/test_storage_policy.py index e1ced03717..12a743f9ba 100755 --- a/test/unit/common/test_storage_policy.py +++ b/test/unit/common/test_storage_policy.py @@ -374,6 +374,15 @@ class TestStoragePolicies(unittest.TestCase): # but only because automated testing requires it. policies = parse_storage_policies(name_repeat_conf) + extra_commas_conf = self._conf(""" + [storage-policy:0] + name = one + aliases = ,,one, , + default = yes + """) + # Extra blank entries should be silently dropped + policies = parse_storage_policies(extra_commas_conf) + bad_conf = self._conf(""" [storage-policy:0] name = one @@ -499,6 +508,9 @@ class TestStoragePolicies(unittest.TestCase): self.assertRaisesWithMessage(PolicyError, 'Invalid name', policies.add_policy_alias, 2, 'double\n') + self.assertRaisesWithMessage(PolicyError, 'Invalid name', + policies.add_policy_alias, 2, '') + # try to add existing name self.assertRaisesWithMessage(PolicyError, 'Duplicate name', policies.add_policy_alias, 2, 'two') From 59bbe27fb0a40236108f09c9b3349e8faef0a95c Mon Sep 17 00:00:00 2001 From: Nguyen Hung Phuong Date: Wed, 30 Mar 2016 11:07:46 +0700 Subject: [PATCH 067/141] Fix typos in Swift files Change-Id: I34e0c9a888127704ac1910e73ddd14e27ebade13 --- bin/swift-init | 2 +- bin/swift-reconciler-enqueue | 2 +- doc/manpages/swift-object-expirer.1 | 32 ++++++++++++++--------------- doc/source/ops_runbook/diagnose.rst | 2 +- 4 files changed, 19 insertions(+), 19 deletions(-) diff --git a/bin/swift-init b/bin/swift-init index 0fcbff5708..c8e0aef5c6 100755 --- a/bin/swift-init +++ b/bin/swift-init @@ -77,7 +77,7 @@ def main(): # SIGKILL daemon after kill_wait period parser.add_option('--kill-after-timeout', dest='kill_after_timeout', action='store_true', - help="Kill daemon and all childs after kill-wait " + help="Kill daemon and all children after kill-wait " "period.") options, args = parser.parse_args() diff --git a/bin/swift-reconciler-enqueue b/bin/swift-reconciler-enqueue index da653151c3..d707571d2e 100755 --- a/bin/swift-reconciler-enqueue +++ b/bin/swift-reconciler-enqueue @@ -25,7 +25,7 @@ from swift.container.reconciler import add_to_reconciler_queue """ This tool is primarily for debugging and development but can be used an example of how an operator could enqueue objects manually if a problem is discovered - -might be particularlly useful if you need to hack a fix into the reconciler +might be particularly useful if you need to hack a fix into the reconciler and re-run it. """ diff --git a/doc/manpages/swift-object-expirer.1 b/doc/manpages/swift-object-expirer.1 index 869b2ac2a6..3b5b1b10ba 100644 --- a/doc/manpages/swift-object-expirer.1 +++ b/doc/manpages/swift-object-expirer.1 @@ -14,31 +14,31 @@ .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. -.\" +.\" .TH swift-object-expirer 1 "3/15/2012" "Linux" "OpenStack Swift" -.SH NAME +.SH NAME .LP .B swift-object-expirer \- Openstack-swift object expirer .SH SYNOPSIS .LP -.B swift-object-expirer +.B swift-object-expirer [CONFIG] [-h|--help] [-v|--verbose] [-o|--once] -.SH DESCRIPTION +.SH DESCRIPTION .PP -The swift-object-expirer offers scheduled deletion of objects. The Swift client would -use the X-Delete-At or X-Delete-After headers during an object PUT or POST and the -cluster would automatically quit serving that object at the specified time and would +The swift-object-expirer offers scheduled deletion of objects. The Swift client would +use the X-Delete-At or X-Delete-After headers during an object PUT or POST and the +cluster would automatically quit serving that object at the specified time and would shortly thereafter remove the object from the system. -The X-Delete-At header takes a Unix Epoch timestamp, in integer form; for example: +The X-Delete-At header takes a Unix Epoch timestamp, in integer form; for example: 1317070737 represents Mon Sep 26 20:58:57 2011 UTC. -The X-Delete-After header takes a integer number of seconds. The proxy server -that receives the request will convert this header into an X-Delete-At header +The X-Delete-After header takes a integer number of seconds. The proxy server +that receives the request will convert this header into an X-Delete-At header using its current time plus the value given. The options are as follows: @@ -53,19 +53,19 @@ The options are as follows: .IP "-o" .IP "--once" .RS 4 -.IP "only run one pass of daemon" +.IP "only run one pass of daemon" .RE .PD .RE - - + + .SH DOCUMENTATION .LP -More in depth documentation in regards to +More in depth documentation in regards to .BI swift-object-expirer -can be foud at +can be found at .BI http://swift.openstack.org/overview_expiring_objects.html -and also about Openstack-Swift as a whole can be found at +and also about Openstack-Swift as a whole can be found at .BI http://swift.openstack.org/index.html diff --git a/doc/source/ops_runbook/diagnose.rst b/doc/source/ops_runbook/diagnose.rst index 9ecb0ffa62..9066112093 100644 --- a/doc/source/ops_runbook/diagnose.rst +++ b/doc/source/ops_runbook/diagnose.rst @@ -575,7 +575,7 @@ command-line wrong. Pick a ``source`` and ``target`` node. The source is often a proxy node and the target is often an object node. Using the same source proxy you can test communication to different object nodes in different AZs to -identity possible bottlekecks. +identity possible bottlenecks. Running tests ^^^^^^^^^^^^^ From ebf0b220127b14bec7c05f1bc0286728f27f39d1 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Wed, 30 Mar 2016 14:19:00 -0700 Subject: [PATCH 068/141] Fix upgrade bug in versioned_writes Previously, versioned_writes assumed that all container servers would always have the latest Swift code, allowing them to return reversed listings. This could cause the wrong version of a file to be restored during rolling upgrades. Now, versioned_writes will check that the listing returned is actually reversed. If it isn't, we will revert to getting the full (in-order) listing of versions and reversing it on the proxy. Change-Id: Ib53574ff71961592426cb386ef00a75eb5824def Closes-Bug: 1562083 --- swift/common/middleware/versioned_writes.py | 80 +++- .../middleware/test_versioned_writes.py | 374 +++++++++++++++++- 2 files changed, 439 insertions(+), 15 deletions(-) diff --git a/swift/common/middleware/versioned_writes.py b/swift/common/middleware/versioned_writes.py index 516eda9cb2..51497c7f8d 100644 --- a/swift/common/middleware/versioned_writes.py +++ b/swift/common/middleware/versioned_writes.py @@ -155,15 +155,74 @@ class VersionedWritesContext(WSGIContext): except ListingIterError: raise HTTPServerError(request=req) - def _listing_pages_iter(self, account_name, lcontainer, lprefix, env): - marker = '' + def _in_proxy_reverse_listing(self, account_name, lcontainer, lprefix, + env, failed_marker, failed_listing): + '''Get the complete prefix listing and reverse it on the proxy. + + This is only necessary if we encounter a response from a + container-server that does not respect the ``reverse`` param + included by default in ``_listing_pages_iter``. This may happen + during rolling upgrades from pre-2.6.0 swift. + + :param failed_marker: the marker that was used when we encountered + the non-reversed listing + :param failed_listing: the non-reversed listing that was encountered. + If ``failed_marker`` is blank, we can use this + to save ourselves a request + :returns: an iterator over all objects starting with ``lprefix`` (up + to but not including the failed marker) in reverse order + ''' + complete_listing = [] + if not failed_marker: + # We've never gotten a reversed listing. So save a request and + # use the failed listing. + complete_listing.extend(failed_listing) + marker = complete_listing[-1]['name'].encode('utf8') + else: + # We've gotten at least one reversed listing. Have to start at + # the beginning. + marker = '' + + # First, take the *entire* prefix listing into memory + try: + for page in self._listing_pages_iter( + account_name, lcontainer, lprefix, + env, marker, end_marker=failed_marker, reverse=False): + complete_listing.extend(page) + except ListingIterNotFound: + pass + + # Now that we've got everything, return the whole listing as one giant + # reversed page + return reversed(complete_listing) + + def _listing_pages_iter(self, account_name, lcontainer, lprefix, + env, marker='', end_marker='', reverse=True): + '''Get "pages" worth of objects that start with a prefix. + + The optional keyword arguments ``marker``, ``end_marker``, and + ``reverse`` are used similar to how they are for containers. We're + either coming: + + - directly from ``_listing_iter``, in which case none of the + optional args are specified, or + + - from ``_in_proxy_reverse_listing``, in which case ``reverse`` + is ``False`` and both ``marker`` and ``end_marker`` are specified + (although they may still be blank). + ''' while True: lreq = make_pre_authed_request( env, method='GET', swift_source='VW', path='/v1/%s/%s' % (account_name, lcontainer)) lreq.environ['QUERY_STRING'] = \ - 'format=json&prefix=%s&reverse=on&marker=%s' % ( + 'format=json&prefix=%s&marker=%s' % ( quote(lprefix), quote(marker)) + if end_marker: + lreq.environ['QUERY_STRING'] += '&end_marker=%s' % ( + quote(end_marker)) + if reverse: + lreq.environ['QUERY_STRING'] += '&reverse=on' lresp = lreq.get_response(self.app) if not is_success(lresp.status_int): if lresp.status_int == HTTP_NOT_FOUND: @@ -179,7 +238,20 @@ class VersionedWritesContext(WSGIContext): sublisting = json.loads(lresp.body) if not sublisting: break - marker = sublisting[-1]['name'].encode('utf-8') + + # When using the ``reverse`` param, check that the listing is + # actually reversed + first_item = sublisting[0]['name'].encode('utf-8') + last_item = sublisting[-1]['name'].encode('utf-8') + page_is_after_marker = marker and first_item > marker + if reverse and (first_item < last_item or page_is_after_marker): + # Apparently there's at least one pre-2.6.0 container server + yield self._in_proxy_reverse_listing( + account_name, lcontainer, lprefix, + env, marker, sublisting) + return + + marker = last_item yield sublisting def handle_obj_versions_put(self, req, object_versions, diff --git a/test/unit/common/middleware/test_versioned_writes.py b/test/unit/common/middleware/test_versioned_writes.py index 64a31a6705..e53ef589b8 100644 --- a/test/unit/common/middleware/test_versioned_writes.py +++ b/test/unit/common/middleware/test_versioned_writes.py @@ -14,6 +14,7 @@ # limitations under the License. import functools +import json import os import time import unittest @@ -54,7 +55,7 @@ def local_tz(func): return wrapper -class VersionedWritesTestCase(unittest.TestCase): +class VersionedWritesBaseTestCase(unittest.TestCase): def setUp(self): self.app = FakeSwift() conf = {'allow_versioned_writes': 'true'} @@ -105,6 +106,8 @@ class VersionedWritesTestCase(unittest.TestCase): self.assertEqual(req.method, other.method) self.assertEqual(req.path, other.path) + +class VersionedWritesTestCase(VersionedWritesBaseTestCase): def test_put_container(self): self.app.register('PUT', '/v1/a/c', swob.HTTPOk, {}, 'passed') req = Request.blank('/v1/a/c', @@ -452,7 +455,7 @@ class VersionedWritesTestCase(unittest.TestCase): 'DELETE', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') self.app.register( 'GET', - '/v1/a/ver_cont?format=json&prefix=001o/&reverse=on&marker=', + '/v1/a/ver_cont?format=json&prefix=001o/&marker=&reverse=on', swob.HTTPNotFound, {}, None) cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) @@ -465,12 +468,18 @@ class VersionedWritesTestCase(unittest.TestCase): self.assertEqual(len(self.authorized), 1) self.assertRequestEqual(req, self.authorized[0]) + prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' + self.assertEqual(self.app.calls, [ + ('GET', prefix_listing_prefix + 'marker=&reverse=on'), + ('DELETE', '/v1/a/c/o'), + ]) + def test_delete_latest_version_success(self): self.app.register( 'DELETE', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') self.app.register( 'GET', - '/v1/a/ver_cont?format=json&prefix=001o/&reverse=on&marker=', + '/v1/a/ver_cont?format=json&prefix=001o/&marker=&reverse=on', swob.HTTPOk, {}, '[{"hash": "y", ' '"last_modified": "2014-11-21T14:23:02.206740", ' @@ -501,17 +510,58 @@ class VersionedWritesTestCase(unittest.TestCase): self.assertRequestEqual(req, self.authorized[0]) # check that X-If-Delete-At was removed from DELETE request - calls = self.app.calls_with_headers - method, path, req_headers = calls.pop() - self.assertEqual('DELETE', method) - self.assertTrue(path.startswith('/v1/a/ver_cont/001o/2')) - self.assertFalse('x-if-delete-at' in req_headers or - 'X-If-Delete-At' in req_headers) + req_headers = self.app.headers[-1] + self.assertNotIn('x-if-delete-at', [h.lower() for h in req_headers]) + + prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' + self.assertEqual(self.app.calls, [ + ('GET', prefix_listing_prefix + 'marker=&reverse=on'), + ('COPY', '/v1/a/ver_cont/001o/2'), + ('DELETE', '/v1/a/ver_cont/001o/2'), + ]) + + def test_delete_single_version_success(self): + # check that if the first listing page has just a single item then + # it is not erroneously inferred to be a non-reversed listing + self.app.register( + 'DELETE', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') + self.app.register( + 'GET', + '/v1/a/ver_cont?format=json&prefix=001o/&marker=&reverse=on', + swob.HTTPOk, {}, + '[{"hash": "y", ' + '"last_modified": "2014-11-21T14:23:02.206740", ' + '"bytes": 3, ' + '"name": "001o/1", ' + '"content_type": "text/plain"}]') + self.app.register( + 'COPY', '/v1/a/ver_cont/001o/1', swob.HTTPCreated, + {}, None) + self.app.register( + 'DELETE', '/v1/a/ver_cont/001o/1', swob.HTTPOk, + {}, None) + + cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) + req = Request.blank( + '/v1/a/c/o', + environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache, + 'CONTENT_LENGTH': '0'}) + status, headers, body = self.call_vw(req) + self.assertEqual(status, '200 OK') + self.assertEqual(len(self.authorized), 1) + self.assertRequestEqual(req, self.authorized[0]) + + prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' + self.assertEqual(self.app.calls, [ + ('GET', prefix_listing_prefix + 'marker=&reverse=on'), + ('COPY', '/v1/a/ver_cont/001o/1'), + ('DELETE', '/v1/a/ver_cont/001o/1'), + ]) def test_DELETE_on_expired_versioned_object(self): self.app.register( 'GET', - '/v1/a/ver_cont?format=json&prefix=001o/&reverse=on&marker=', + '/v1/a/ver_cont?format=json&prefix=001o/&marker=&reverse=on', swob.HTTPOk, {}, '[{"hash": "y", ' '"last_modified": "2014-11-21T14:23:02.206740", ' @@ -545,13 +595,21 @@ class VersionedWritesTestCase(unittest.TestCase): self.assertEqual(len(self.authorized), 1) self.assertRequestEqual(req, self.authorized[0]) + prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' + self.assertEqual(self.app.calls, [ + ('GET', prefix_listing_prefix + 'marker=&reverse=on'), + ('COPY', '/v1/a/ver_cont/001o/2'), + ('COPY', '/v1/a/ver_cont/001o/1'), + ('DELETE', '/v1/a/ver_cont/001o/1'), + ]) + def test_denied_DELETE_of_versioned_object(self): authorize_call = [] self.app.register( 'DELETE', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') self.app.register( 'GET', - '/v1/a/ver_cont?format=json&prefix=001o/&reverse=on&marker=', + '/v1/a/ver_cont?format=json&prefix=001o/&marker=&reverse=on', swob.HTTPOk, {}, '[{"hash": "y", ' '"last_modified": "2014-11-21T14:23:02.206740", ' @@ -581,3 +639,297 @@ class VersionedWritesTestCase(unittest.TestCase): self.assertEqual(status, '403 Forbidden') self.assertEqual(len(authorize_call), 1) self.assertRequestEqual(req, authorize_call[0]) + + prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' + self.assertEqual(self.app.calls, [ + ('GET', prefix_listing_prefix + 'marker=&reverse=on'), + ]) + + +class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase): + def test_delete_latest_version_success(self): + self.app.register( + 'DELETE', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') + self.app.register( + 'GET', '/v1/a/ver_cont?format=json&prefix=001o/&' + 'marker=&reverse=on', + swob.HTTPOk, {}, + '[{"hash": "x", ' + '"last_modified": "2014-11-21T14:14:27.409100", ' + '"bytes": 3, ' + '"name": "001o/1", ' + '"content_type": "text/plain"}, ' + '{"hash": "y", ' + '"last_modified": "2014-11-21T14:23:02.206740", ' + '"bytes": 3, ' + '"name": "001o/2", ' + '"content_type": "text/plain"}]') + self.app.register( + 'GET', '/v1/a/ver_cont?format=json&prefix=001o/' + '&marker=001o/2', + swob.HTTPNotFound, {}, None) + self.app.register( + 'COPY', '/v1/a/ver_cont/001o/2', swob.HTTPCreated, + {}, None) + self.app.register( + 'DELETE', '/v1/a/ver_cont/001o/2', swob.HTTPOk, + {}, None) + + cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) + req = Request.blank( + '/v1/a/c/o', + headers={'X-If-Delete-At': 1}, + environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache, + 'CONTENT_LENGTH': '0'}) + status, headers, body = self.call_vw(req) + self.assertEqual(status, '200 OK') + self.assertEqual(len(self.authorized), 1) + self.assertRequestEqual(req, self.authorized[0]) + + # check that X-If-Delete-At was removed from DELETE request + req_headers = self.app.headers[-1] + self.assertNotIn('x-if-delete-at', [h.lower() for h in req_headers]) + + prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' + self.assertEqual(self.app.calls, [ + ('GET', prefix_listing_prefix + 'marker=&reverse=on'), + ('GET', prefix_listing_prefix + 'marker=001o/2'), + ('COPY', '/v1/a/ver_cont/001o/2'), + ('DELETE', '/v1/a/ver_cont/001o/2'), + ]) + + def test_DELETE_on_expired_versioned_object(self): + self.app.register( + 'GET', '/v1/a/ver_cont?format=json&prefix=001o/&' + 'marker=&reverse=on', + swob.HTTPOk, {}, + '[{"hash": "x", ' + '"last_modified": "2014-11-21T14:14:27.409100", ' + '"bytes": 3, ' + '"name": "001o/1", ' + '"content_type": "text/plain"}, ' + '{"hash": "y", ' + '"last_modified": "2014-11-21T14:23:02.206740", ' + '"bytes": 3, ' + '"name": "001o/2", ' + '"content_type": "text/plain"}]') + self.app.register( + 'GET', '/v1/a/ver_cont?format=json&prefix=001o/' + '&marker=001o/2', + swob.HTTPNotFound, {}, None) + + # expired object + self.app.register( + 'COPY', '/v1/a/ver_cont/001o/2', swob.HTTPNotFound, + {}, None) + self.app.register( + 'COPY', '/v1/a/ver_cont/001o/1', swob.HTTPCreated, + {}, None) + self.app.register( + 'DELETE', '/v1/a/ver_cont/001o/1', swob.HTTPOk, + {}, None) + + cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) + req = Request.blank( + '/v1/a/c/o', + environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache, + 'CONTENT_LENGTH': '0'}) + status, headers, body = self.call_vw(req) + self.assertEqual(status, '200 OK') + self.assertEqual(len(self.authorized), 1) + self.assertRequestEqual(req, self.authorized[0]) + + prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' + self.assertEqual(self.app.calls, [ + ('GET', prefix_listing_prefix + 'marker=&reverse=on'), + ('GET', prefix_listing_prefix + 'marker=001o/2'), + ('COPY', '/v1/a/ver_cont/001o/2'), + ('COPY', '/v1/a/ver_cont/001o/1'), + ('DELETE', '/v1/a/ver_cont/001o/1'), + ]) + + def test_denied_DELETE_of_versioned_object(self): + authorize_call = [] + self.app.register( + 'DELETE', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') + self.app.register( + 'GET', '/v1/a/ver_cont?format=json&prefix=001o/&' + 'marker=&reverse=on', + swob.HTTPOk, {}, + '[{"hash": "x", ' + '"last_modified": "2014-11-21T14:14:27.409100", ' + '"bytes": 3, ' + '"name": "001o/1", ' + '"content_type": "text/plain"}, ' + '{"hash": "y", ' + '"last_modified": "2014-11-21T14:23:02.206740", ' + '"bytes": 3, ' + '"name": "001o/2", ' + '"content_type": "text/plain"}]') + self.app.register( + 'GET', '/v1/a/ver_cont?format=json&prefix=001o/' + '&marker=001o/2', + swob.HTTPNotFound, {}, None) + self.app.register( + 'DELETE', '/v1/a/c/o', swob.HTTPForbidden, + {}, None) + + def fake_authorize(req): + authorize_call.append(req) + return swob.HTTPForbidden() + + cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) + req = Request.blank( + '/v1/a/c/o', + environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache, + 'swift.authorize': fake_authorize, + 'CONTENT_LENGTH': '0'}) + status, headers, body = self.call_vw(req) + self.assertEqual(status, '403 Forbidden') + self.assertEqual(len(authorize_call), 1) + self.assertRequestEqual(req, authorize_call[0]) + prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' + self.assertEqual(self.app.calls, [ + ('GET', prefix_listing_prefix + 'marker=&reverse=on'), + ('GET', prefix_listing_prefix + 'marker=001o/2'), + ]) + + def test_partially_upgraded_cluster(self): + old_versions = [ + {'hash': 'etag%d' % x, + 'last_modified': "2014-11-21T14:14:%02d.409100" % x, + 'bytes': 3, + 'name': '001o/%d' % x, + 'content_type': 'text/plain'} + for x in range(5)] + + # first container server can reverse + self.app.register( + 'GET', '/v1/a/ver_cont?format=json&prefix=001o/&' + 'marker=&reverse=on', + swob.HTTPOk, {}, json.dumps(list(reversed(old_versions[2:])))) + # but all objects are already gone + self.app.register( + 'COPY', '/v1/a/ver_cont/001o/4', swob.HTTPNotFound, + {}, None) + self.app.register( + 'COPY', '/v1/a/ver_cont/001o/3', swob.HTTPNotFound, + {}, None) + self.app.register( + 'COPY', '/v1/a/ver_cont/001o/2', swob.HTTPNotFound, + {}, None) + + # second container server can't reverse + self.app.register( + 'GET', '/v1/a/ver_cont?format=json&prefix=001o/&' + 'marker=001o/2&reverse=on', + swob.HTTPOk, {}, json.dumps(old_versions[3:])) + + # subsequent requests shouldn't reverse + self.app.register( + 'GET', '/v1/a/ver_cont?format=json&prefix=001o/&' + 'marker=&end_marker=001o/2', + swob.HTTPOk, {}, json.dumps(old_versions[:1])) + self.app.register( + 'GET', '/v1/a/ver_cont?format=json&prefix=001o/&' + 'marker=001o/0&end_marker=001o/2', + swob.HTTPOk, {}, json.dumps(old_versions[1:2])) + self.app.register( + 'GET', '/v1/a/ver_cont?format=json&prefix=001o/&' + 'marker=001o/1&end_marker=001o/2', + swob.HTTPOk, {}, '[]') + self.app.register( + 'COPY', '/v1/a/ver_cont/001o/1', swob.HTTPOk, + {}, None) + self.app.register( + 'DELETE', '/v1/a/ver_cont/001o/1', swob.HTTPNoContent, + {}, None) + + cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) + req = Request.blank( + '/v1/a/c/o', + environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache, + 'CONTENT_LENGTH': '0'}) + status, headers, body = self.call_vw(req) + self.assertEqual(status, '204 No Content') + prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' + self.assertEqual(self.app.calls, [ + ('GET', prefix_listing_prefix + 'marker=&reverse=on'), + ('COPY', '/v1/a/ver_cont/001o/4'), + ('COPY', '/v1/a/ver_cont/001o/3'), + ('COPY', '/v1/a/ver_cont/001o/2'), + ('GET', prefix_listing_prefix + 'marker=001o/2&reverse=on'), + ('GET', prefix_listing_prefix + 'marker=&end_marker=001o/2'), + ('GET', prefix_listing_prefix + 'marker=001o/0&end_marker=001o/2'), + ('GET', prefix_listing_prefix + 'marker=001o/1&end_marker=001o/2'), + ('COPY', '/v1/a/ver_cont/001o/1'), + ('DELETE', '/v1/a/ver_cont/001o/1'), + ]) + + def test_partially_upgraded_cluster_single_result_on_second_page(self): + old_versions = [ + {'hash': 'etag%d' % x, + 'last_modified': "2014-11-21T14:14:%02d.409100" % x, + 'bytes': 3, + 'name': '001o/%d' % x, + 'content_type': 'text/plain'} + for x in range(5)] + + # first container server can reverse + self.app.register( + 'GET', '/v1/a/ver_cont?format=json&prefix=001o/&' + 'marker=&reverse=on', + swob.HTTPOk, {}, json.dumps(list(reversed(old_versions[-2:])))) + # but both objects are already gone + self.app.register( + 'COPY', '/v1/a/ver_cont/001o/4', swob.HTTPNotFound, + {}, None) + self.app.register( + 'COPY', '/v1/a/ver_cont/001o/3', swob.HTTPNotFound, + {}, None) + + # second container server can't reverse + self.app.register( + 'GET', '/v1/a/ver_cont?format=json&prefix=001o/&' + 'marker=001o/3&reverse=on', + swob.HTTPOk, {}, json.dumps(old_versions[4:])) + + # subsequent requests shouldn't reverse + self.app.register( + 'GET', '/v1/a/ver_cont?format=json&prefix=001o/&' + 'marker=&end_marker=001o/3', + swob.HTTPOk, {}, json.dumps(old_versions[:2])) + self.app.register( + 'GET', '/v1/a/ver_cont?format=json&prefix=001o/&' + 'marker=001o/1&end_marker=001o/3', + swob.HTTPOk, {}, json.dumps(old_versions[2:3])) + self.app.register( + 'GET', '/v1/a/ver_cont?format=json&prefix=001o/&' + 'marker=001o/2&end_marker=001o/3', + swob.HTTPOk, {}, '[]') + self.app.register( + 'COPY', '/v1/a/ver_cont/001o/2', swob.HTTPOk, + {}, None) + self.app.register( + 'DELETE', '/v1/a/ver_cont/001o/2', swob.HTTPNoContent, + {}, None) + + cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) + req = Request.blank( + '/v1/a/c/o', + environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache, + 'CONTENT_LENGTH': '0'}) + status, headers, body = self.call_vw(req) + self.assertEqual(status, '204 No Content') + prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' + self.assertEqual(self.app.calls, [ + ('GET', prefix_listing_prefix + 'marker=&reverse=on'), + ('COPY', '/v1/a/ver_cont/001o/4'), + ('COPY', '/v1/a/ver_cont/001o/3'), + ('GET', prefix_listing_prefix + 'marker=001o/3&reverse=on'), + ('GET', prefix_listing_prefix + 'marker=&end_marker=001o/3'), + ('GET', prefix_listing_prefix + 'marker=001o/1&end_marker=001o/3'), + ('GET', prefix_listing_prefix + 'marker=001o/2&end_marker=001o/3'), + ('COPY', '/v1/a/ver_cont/001o/2'), + ('DELETE', '/v1/a/ver_cont/001o/2'), + ]) From ee6af69af429e1a278581531e509be92774f668d Mon Sep 17 00:00:00 2001 From: KATO Tomoyuki Date: Fri, 1 Apr 2016 23:37:25 +0900 Subject: [PATCH 069/141] [docs] Update Administrator Guide URL Change-Id: I7f6f4ffb7033d2a321362c91e05f1576847939b0 --- doc/source/api/object_api_v1_overview.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/api/object_api_v1_overview.rst b/doc/source/api/object_api_v1_overview.rst index 04bd0cde22..02c3dc57f8 100644 --- a/doc/source/api/object_api_v1_overview.rst +++ b/doc/source/api/object_api_v1_overview.rst @@ -7,8 +7,8 @@ metadata by using the Object Storage API, which is implemented as a set of Representational State Transfer (REST) web services. For an introduction to OpenStack Object Storage, see `Object -Storage ` -in the *OpenStack Cloud Administrator Guide*. +Storage ` +in the *OpenStack Administrator Guide*. You use the HTTPS (SSL) protocol to interact with Object Storage, and you use standard HTTP calls to perform API operations. You can also use From 4e412e0cd877be0da2b2716a301d1ae2b4c29a13 Mon Sep 17 00:00:00 2001 From: Cheng Li Date: Sun, 3 Apr 2016 21:07:00 +0800 Subject: [PATCH 070/141] Fix strings with positional arguments As descriped in develop guideline, Any message with more than one variable should use named interpolation instead of positional http://docs.openstack.org/developer/oslo.i18n/guidelines.html#adding-variables-to-translated-messages Change-Id: Ia25517cfb52037cf49d0a6ebf528344267fb22cd Closes-bug:#1559431 --- swift/account/auditor.py | 3 ++- swift/account/reaper.py | 5 ++-- swift/common/db.py | 6 +++-- swift/common/manager.py | 45 +++++++++++++++++++++------------- swift/common/utils.py | 16 +++++++----- swift/obj/auditor.py | 6 +++-- swift/obj/diskfile.py | 26 +++++++++++--------- swift/obj/expirer.py | 10 +++++--- swift/proxy/controllers/obj.py | 4 +-- 9 files changed, 74 insertions(+), 47 deletions(-) diff --git a/swift/account/auditor.py b/swift/account/auditor.py index 0f72999b90..dddc3d1d91 100644 --- a/swift/account/auditor.py +++ b/swift/account/auditor.py @@ -145,7 +145,8 @@ class AccountAuditor(Daemon): self.logger.increment('failures') self.account_failures += 1 self.logger.error( - _('Audit Failed for %s: %s'), path, str(e)) + _('Audit Failed for %(path)s: %(err)s'), + {'path': path, 'err': str(e)}) except (Exception, Timeout): self.logger.increment('failures') self.account_failures += 1 diff --git a/swift/account/reaper.py b/swift/account/reaper.py index a88f612918..ea7307183b 100644 --- a/swift/account/reaper.py +++ b/swift/account/reaper.py @@ -313,8 +313,9 @@ class AccountReaper(Daemon): delete_timestamp = Timestamp(info['delete_timestamp']) if self.stats_containers_remaining and \ begin - float(delete_timestamp) >= self.reap_not_done_after: - self.logger.warning(_('Account %s has not been reaped since %s') % - (account, delete_timestamp.isoformat)) + self.logger.warning( + _('Account %(account)s has not been reaped since %(time)s') % + {'account': account, 'time': delete_timestamp.isoformat}) return True def reap_container(self, account, account_partition, account_nodes, diff --git a/swift/common/db.py b/swift/common/db.py index 1ae1696440..3fe20722a9 100644 --- a/swift/common/db.py +++ b/swift/common/db.py @@ -350,8 +350,10 @@ class DatabaseBroker(object): raise quar_path = "%s-%s" % (quar_path, uuid4().hex) renamer(self.db_dir, quar_path, fsync=False) - detail = _('Quarantined %s to %s due to %s database') % \ - (self.db_dir, quar_path, exc_hint) + detail = _('Quarantined %(db_dir)s to %(quar_path)s due to ' + '%(exc_hint)s database') % {'db_dir': self.db_dir, + 'quar_path': quar_path, + 'exc_hint': exc_hint} self.logger.error(detail) raise sqlite3.DatabaseError(detail) diff --git a/swift/common/manager.py b/swift/common/manager.py index 54f84c5e1b..92d8f4a6d6 100644 --- a/swift/common/manager.py +++ b/swift/common/manager.py @@ -288,7 +288,8 @@ class Manager(object): for server, killed_pid in watch_server_pids(server_pids, interval=kill_wait, **kwargs): - print(_("%s (%s) appears to have stopped") % (server, killed_pid)) + print(_("%(server)s (%(pid)s) appears to have stopped") % + {'server': server, 'pid': killed_pid}) killed_pids.add(killed_pid) if not killed_pids.symmetric_difference(signaled_pids): # all processes have been stopped @@ -300,12 +301,15 @@ class Manager(object): if not killed_pids.issuperset(pids): # some pids of this server were not killed if kill_after_timeout: - print(_('Waited %s seconds for %s to die; killing') % ( - kill_wait, server)) + print(_('Waited %(kill_wait)s seconds for %(server)s ' + 'to die; killing') % + {'kill_wait': kill_wait, 'server': server}) # Send SIGKILL to all remaining pids for pid in set(pids.keys()) - killed_pids: - print(_('Signal %s pid: %s signal: %s') % ( - server, pid, signal.SIGKILL)) + print(_('Signal %(server)s pid: %(pid)s signal: ' + '%(signal)s') % {'server': server, + 'pid': pid, + 'signal': signal.SIGKILL}) # Send SIGKILL to process group try: kill_group(pid, signal.SIGKILL) @@ -314,8 +318,9 @@ class Manager(object): if e.errno != errno.ESRCH: raise e else: - print(_('Waited %s seconds for %s to die; giving up') % ( - kill_wait, server)) + print(_('Waited %(kill_wait)s seconds for %(server)s ' + 'to die; giving up') % + {'kill_wait': kill_wait, 'server': server}) return 1 @command @@ -498,8 +503,9 @@ class Server(object): # maybe there's a config file(s) out there, but I couldn't find it! if not kwargs.get('quiet'): if number: - print(_('Unable to locate config number %s for %s') - % (number, self.server)) + print(_('Unable to locate config number %(number)s for' + ' %(server)s') % + {'number': number, 'server': self.server}) else: print(_('Unable to locate config for %s') % self.server) if kwargs.get('verbose') and not kwargs.get('quiet'): @@ -556,8 +562,9 @@ class Server(object): continue try: if sig != signal.SIG_DFL: - print(_('Signal %s pid: %s signal: %s') % (self.server, - pid, sig)) + print(_('Signal %(server)s pid: %(pid)s signal: ' + '%(signal)s') % + {'server': self.server, 'pid': pid, 'signal': sig}) safe_kill(pid, sig, 'swift-%s' % self.server) except InvalidPidFileException as e: if kwargs.get('verbose'): @@ -616,14 +623,16 @@ class Server(object): kwargs['quiet'] = True conf_files = self.conf_files(**kwargs) if conf_files: - print(_("%s #%d not running (%s)") % (self.server, number, - conf_files[0])) + print(_("%(server)s #%(number)d not running (%(conf)s)") % + {'server': self.server, 'number': number, + 'conf': conf_files[0]}) else: print(_("No %s running") % self.server) return 1 for pid, pid_file in pids.items(): conf_file = self.get_conf_file_name(pid_file) - print(_("%s running (%s - %s)") % (self.server, pid, conf_file)) + print(_("%(server)s running (%(pid)s - %(conf)s)") % + {'server': self.server, 'pid': pid, 'conf': conf_file}) return 0 def spawn(self, conf_file, once=False, wait=True, daemon=True, **kwargs): @@ -716,11 +725,13 @@ class Server(object): # any unstarted instances if conf_file in conf_files: already_started = True - print(_("%s running (%s - %s)") % - (self.server, pid, conf_file)) + print(_("%(server)s running (%(pid)s - %(conf)s)") % + {'server': self.server, 'pid': pid, 'conf': conf_file}) elif not kwargs.get('number', 0): already_started = True - print(_("%s running (%s - %s)") % (self.server, pid, pid_file)) + print(_("%(server)s running (%(pid)s - %(pid_file)s)") % + {'server': self.server, 'pid': pid, + 'pid_file': pid_file}) if already_started: print(_("%s already started...") % self.server) diff --git a/swift/common/utils.py b/swift/common/utils.py index 210dd9f4e0..fcfe86a8dd 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -672,8 +672,9 @@ def fsync_dir(dirpath): if err.errno == errno.ENOTDIR: # Raise error if someone calls fsync_dir on a non-directory raise - logging.warning(_("Unable to perform fsync() on directory %s: %s"), - dirpath, os.strerror(err.errno)) + logging.warning(_('Unable to perform fsync() on directory %(dir)s:' + ' %(err)s'), + {'dir': dirpath, 'err': os.strerror(err.errno)}) finally: if dirfd: os.close(dirfd) @@ -1243,10 +1244,13 @@ class LoggerFileObject(object): self.logger.error( _('%s: Connection reset by peer'), self.log_type) else: - self.logger.error(_('%s: %s'), self.log_type, value) + self.logger.error(_('%(type)s: %(value)s'), + {'type': self.log_type, 'value': value}) def writelines(self, values): - self.logger.error(_('%s: %s'), self.log_type, '#012'.join(values)) + self.logger.error(_('%(type)s: %(value)s'), + {'type': self.log_type, + 'value': '#012'.join(values)}) def close(self): pass @@ -2214,8 +2218,8 @@ def readconf(conf_path, section_name=None, log_name=None, defaults=None, if c.has_section(section_name): conf = dict(c.items(section_name)) else: - print(_("Unable to find %s config section in %s") % - (section_name, conf_path)) + print(_("Unable to find %(section)s config section in %(conf)s") % + {'section': section_name, 'conf': conf_path}) sys.exit(1) if "log_name" not in conf: if log_name is not None: diff --git a/swift/obj/auditor.py b/swift/obj/auditor.py index 3b5f2de785..4c4d24391c 100644 --- a/swift/obj/auditor.py +++ b/swift/obj/auditor.py @@ -104,8 +104,10 @@ class AuditorWorker(object): description = _(' - parallel, %s') % device_dir_str else: description = _(' - %s') % device_dir_str - self.logger.info(_('Begin object audit "%s" mode (%s%s)') % - (mode, self.auditor_type, description)) + self.logger.info(_('Begin object audit "%(mode)s" mode (%(audi_type)s' + '%(description)s)') % + {'mode': mode, 'audi_type': self.auditor_type, + 'description': description}) begin = reported = time.time() self.total_bytes_processed = 0 self.total_files_processed = 0 diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py index e4a72cab5d..991dc5228a 100644 --- a/swift/obj/diskfile.py +++ b/swift/obj/diskfile.py @@ -367,7 +367,8 @@ def object_audit_location_generator(devices, mount_check=True, logger=None, except OSError as e: if logger: logger.debug( - _('Skipping %s: %s') % (device_dir, e.strerror)) + _('Skipping %(dir)s: %(err)s') % {'dir': device_dir, + 'err': e.strerror}) continue for dir_ in dirs: if not dir_.startswith(DATADIR_BASE): @@ -418,13 +419,15 @@ def get_auditor_status(datadir_path, logger, auditor_type): status = statusfile.read() except (OSError, IOError) as e: if e.errno != errno.ENOENT and logger: - logger.warning(_('Cannot read %s (%s)') % (auditor_status, e)) + logger.warning(_('Cannot read %(auditor_status)s (%(err)s)') % + {'auditor_status': auditor_status, 'err': e}) return listdir(datadir_path) try: status = json.loads(status) except ValueError as e: - logger.warning(_('Loading JSON from %s failed (%s)') % ( - auditor_status, e)) + logger.warning(_('Loading JSON from %(auditor_status)s failed' + ' (%(err)s)') % + {'auditor_status': auditor_status, 'err': e}) return listdir(datadir_path) return status['partitions'] @@ -438,7 +441,8 @@ def update_auditor_status(datadir_path, logger, partitions, auditor_type): statusfile.write(status) except (OSError, IOError) as e: if logger: - logger.warning(_('Cannot write %s (%s)') % (auditor_status, e)) + logger.warning(_('Cannot write %(auditor_status)s (%(err)s)') % + {'auditor_status': auditor_status, 'err': e}) def clear_auditor_status(devices, auditor_type="ALL"): @@ -2529,8 +2533,8 @@ class ECDiskFileWriter(BaseDiskFileWriter): if err.errno not in (errno.ENOSPC, errno.EDQUOT): # re-raise to catch all handler raise - msg = (_('No space left on device for %s (%s)') % - (durable_file_path, err)) + msg = (_('No space left on device for %(file)s (%(err)s)') % + {'file': durable_file_path, 'err': err}) self.manager.logger.error(msg) exc = DiskFileNoSpace(str(err)) else: @@ -2538,11 +2542,11 @@ class ECDiskFileWriter(BaseDiskFileWriter): self.manager.cleanup_ondisk_files(self._datadir)['files'] except OSError as os_err: self.manager.logger.exception( - _('Problem cleaning up %s (%s)') % - (self._datadir, os_err)) + _('Problem cleaning up %(datadir)s (%(err)s)') % + {'datadir': self._datadir, 'err': os_err}) except Exception as err: - msg = (_('Problem writing durable state file %s (%s)') % - (durable_file_path, err)) + msg = (_('Problem writing durable state file %(file)s (%(err)s)') % + {'file': durable_file_path, 'err': err}) self.manager.logger.exception(msg) exc = DiskFileError(msg) if exc: diff --git a/swift/obj/expirer.py b/swift/obj/expirer.py index 7f26f129c3..6ecfd24829 100644 --- a/swift/obj/expirer.py +++ b/swift/obj/expirer.py @@ -194,8 +194,9 @@ class ObjectExpirer(Daemon): acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT)) except (Exception, Timeout) as err: self.logger.exception( - _('Exception while deleting container %s %s') % - (container, str(err))) + _('Exception while deleting container %(container)s ' + '%(err)s') % {'container': container, + 'err': str(err)}) self.logger.debug('Run end') self.report(final=True) except (Exception, Timeout): @@ -266,8 +267,9 @@ class ObjectExpirer(Daemon): except (Exception, Timeout) as err: self.logger.increment('errors') self.logger.exception( - _('Exception while deleting object %s %s %s') % - (container, obj, str(err))) + _('Exception while deleting object %(container)s %(obj)s' + ' %(err)s') % {'container': container, + 'obj': obj, 'err': str(err)}) self.logger.timing_since('timing', start_time) self.report() diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py index cda2546c69..0e85c06d21 100644 --- a/swift/proxy/controllers/obj.py +++ b/swift/proxy/controllers/obj.py @@ -2403,8 +2403,8 @@ class ECObjectController(BaseObjectController): status_type = 'commit' self.app.exception_occurred( conn.node, _('Object'), - _('Trying to get %s status of PUT to %s') % ( - status_type, req.path)) + _('Trying to get %(status_type)s status of PUT to %(path)s') % + {'status_type': status_type, 'path': req.path}) return (conn, resp) def _get_put_responses(self, req, putters, num_nodes, final_phase, From a460e5affe7089437867afa1ed49ea677c5f4e34 Mon Sep 17 00:00:00 2001 From: Thiago da Silva Date: Mon, 4 Apr 2016 16:27:14 -0400 Subject: [PATCH 071/141] added javaswift to associated projects Change-Id: I139ac3acb1c7d2498e87df554b81824ada2dbd00 Signed-off-by: Thiago da Silva --- doc/source/associated_projects.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/associated_projects.rst b/doc/source/associated_projects.rst index 8f2ce5f072..b92dc4ed21 100644 --- a/doc/source/associated_projects.rst +++ b/doc/source/associated_projects.rst @@ -26,6 +26,7 @@ Application Bindings * `swift_client `_ - Small but powerful Ruby client to interact with OpenStack Swift * `nightcrawler_swift `_ - This Ruby gem teleports your assets to a OpenStack Swift bucket/container * `swift storage `_ - Simple OpenStack Swift storage client. + * `javaswift `_ - Collection of Java tools for Swift Authentication -------------- From 950b601a9c9e87661e35c6ed7a97ae9611560bc5 Mon Sep 17 00:00:00 2001 From: Sivasathurappan Radhakrishnan Date: Tue, 5 Apr 2016 22:45:17 +0000 Subject: [PATCH 072/141] Modified REPLICATE request to use replication_ip direct_client.direct_get_suffix_hashes doesn't use replication ip and port for REPLICATE request. Since we have an option of doing replication in separate network, we can add replication_ip and port while creating rings if not it will get filled in with the regular node's ip. Change-Id: I34067df27042fc3146b795191ab8043ee1aed3ce Closes-Bug:1566395 --- swift/common/direct_client.py | 5 +++-- test/unit/common/test_direct_client.py | 16 +++++++++++++++- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/swift/common/direct_client.py b/swift/common/direct_client.py index 0dea8acefc..a507901edc 100644 --- a/swift/common/direct_client.py +++ b/swift/common/direct_client.py @@ -482,8 +482,9 @@ def direct_get_suffix_hashes(node, part, suffixes, conn_timeout=5, path = '/%s' % '-'.join(suffixes) with Timeout(conn_timeout): - conn = http_connect(node['ip'], node['port'], node['device'], part, - 'REPLICATE', path, headers=gen_headers(headers)) + conn = http_connect(node['replication_ip'], node['replication_port'], + node['device'], part, 'REPLICATE', path, + headers=gen_headers(headers)) with Timeout(response_timeout): resp = conn.getresponse() if not is_success(resp.status): diff --git a/test/unit/common/test_direct_client.py b/test/unit/common/test_direct_client.py index 664a6227b1..2bcc94c13d 100644 --- a/test/unit/common/test_direct_client.py +++ b/test/unit/common/test_direct_client.py @@ -19,6 +19,7 @@ import os from contextlib import contextmanager from hashlib import md5 import time +import pickle import mock import six @@ -97,7 +98,8 @@ def mocked_http_conn(*args, **kwargs): class TestDirectClient(unittest.TestCase): def setUp(self): - self.node = {'ip': '1.2.3.4', 'port': '6000', 'device': 'sda'} + self.node = {'ip': '1.2.3.4', 'port': '6000', 'device': 'sda', + 'replication_ip': '1.2.3.5', 'replication_port': '7000'} self.part = '0' self.account = u'\u062a account' @@ -617,6 +619,18 @@ class TestDirectClient(unittest.TestCase): self.assertEqual(err.http_status, 503) self.assertTrue('DELETE' in str(err)) + def test_direct_get_suffix_hashes(self): + data = {'a83': 'c130a2c17ed45102aada0f4eee69494ff'} + body = pickle.dumps(data) + with mocked_http_conn(200, {}, body) as conn: + resp = direct_client.direct_get_suffix_hashes(self.node, + self.part, ['a83']) + self.assertEqual(conn.method, 'REPLICATE') + self.assertEqual(conn.path, '/sda/0/a83') + self.assertEqual(conn.host, '1.2.3.5') + self.assertEqual(conn.port, '7000') + self.assertEqual(data, resp) + def test_direct_put_object_with_content_length(self): contents = six.StringIO('123456') From e15bceaa7e541c77f26a1f11ee2cbddbc871cbf1 Mon Sep 17 00:00:00 2001 From: Kota Tsuyuzaki Date: Mon, 21 Dec 2015 03:13:50 -0800 Subject: [PATCH 073/141] Refactor CORS unit tests This is a follow-up patch for https://review.openstack.org/#/c/258392/ That one added good unit test cases for various kinds of allowe_origin like '*' or ''(empty). However, the result of handling in Swift proxy will depend on strict_cors_mode option configuration. This patch refactors the unit tests to split out for strict_cors_mode = on/off and add some missing unit tests for each case. Change-Id: I55f7cd279436b5c9f71d81fecf06021380e35579 --- test/unit/proxy/test_server.py | 202 ++++++++++++++++----------------- 1 file changed, 101 insertions(+), 101 deletions(-) diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 8dbd3e799a..89a608961b 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -5839,21 +5839,18 @@ class TestObjectController(unittest.TestCase): 7) self.assertEqual('999', resp.headers['access-control-max-age']) - def test_CORS_valid(self): + def _get_CORS_response(self, container_cors, strict_mode, object_get=None): with save_globals(): controller = ReplicatedObjectController( self.app, 'a', 'c', 'o') def stubContainerInfo(*args): return { - 'cors': { - 'allow_origin': 'http://not.foo.bar', - 'expose_headers': 'X-Object-Meta-Color ' - 'X-Object-Meta-Color-Ex' - } + 'cors': container_cors } + controller.container_info = stubContainerInfo - controller.app.strict_cors_mode = False + controller.app.strict_cors_mode = strict_mode def objectGET(controller, req): return Response(headers={ @@ -5861,116 +5858,119 @@ class TestObjectController(unittest.TestCase): 'X-Super-Secret': 'hush', }) - req = Request.blank( - '/v1/a/c/o.jpg', - {'REQUEST_METHOD': 'GET'}, - headers={'Origin': 'http://foo.bar'}) - - resp = cors_validation(objectGET)(controller, req) - - self.assertEqual(200, resp.status_int) - self.assertEqual('http://foo.bar', - resp.headers['access-control-allow-origin']) - self.assertEqual('red', resp.headers['x-object-meta-color']) - # X-Super-Secret is in the response, but not "exposed" - self.assertEqual('hush', resp.headers['x-super-secret']) - self.assertIn('access-control-expose-headers', resp.headers) - exposed = set( - h.strip() for h in - resp.headers['access-control-expose-headers'].split(',')) - expected_exposed = set(['cache-control', 'content-language', - 'content-type', 'expires', 'last-modified', - 'pragma', 'etag', 'x-timestamp', - 'x-trans-id', 'x-object-meta-color', - 'x-object-meta-color-ex']) - self.assertEqual(expected_exposed, exposed) - - controller.app.strict_cors_mode = True - req = Request.blank( - '/v1/a/c/o.jpg', - {'REQUEST_METHOD': 'GET'}, - headers={'Origin': 'http://foo.bar'}) - - resp = cors_validation(objectGET)(controller, req) - - self.assertEqual(200, resp.status_int) - self.assertNotIn('access-control-expose-headers', resp.headers) - self.assertNotIn('access-control-allow-origin', resp.headers) - - controller.app.strict_cors_mode = False - - def stubContainerInfoWithAsteriskAllowOrigin(*args): - return { - 'cors': { - 'allow_origin': '*' - } - } - controller.container_info = \ - stubContainerInfoWithAsteriskAllowOrigin + mock_object_get = object_get or objectGET req = Request.blank( '/v1/a/c/o.jpg', {'REQUEST_METHOD': 'GET'}, headers={'Origin': 'http://foo.bar'}) - resp = cors_validation(objectGET)(controller, req) + resp = cors_validation(mock_object_get)(controller, req) - self.assertEqual(200, resp.status_int) - self.assertEqual('*', - resp.headers['access-control-allow-origin']) + return resp - def stubContainerInfoWithEmptyAllowOrigin(*args): - return { - 'cors': { - 'allow_origin': '' - } - } - controller.container_info = stubContainerInfoWithEmptyAllowOrigin + def test_CORS_valid_non_strict(self): + # test expose_headers to non-allowed origins + container_cors = {'allow_origin': 'http://not.foo.bar', + 'expose_headers': 'X-Object-Meta-Color ' + 'X-Object-Meta-Color-Ex'} + resp = self._get_CORS_response( + container_cors=container_cors, strict_mode=False) - req = Request.blank( - '/v1/a/c/o.jpg', - {'REQUEST_METHOD': 'GET'}, - headers={'Origin': 'http://foo.bar'}) + self.assertEqual(200, resp.status_int) + self.assertEqual('http://foo.bar', + resp.headers['access-control-allow-origin']) + self.assertEqual('red', resp.headers['x-object-meta-color']) + # X-Super-Secret is in the response, but not "exposed" + self.assertEqual('hush', resp.headers['x-super-secret']) + self.assertIn('access-control-expose-headers', resp.headers) + exposed = set( + h.strip() for h in + resp.headers['access-control-expose-headers'].split(',')) + expected_exposed = set(['cache-control', 'content-language', + 'content-type', 'expires', 'last-modified', + 'pragma', 'etag', 'x-timestamp', + 'x-trans-id', 'x-object-meta-color', + 'x-object-meta-color-ex']) + self.assertEqual(expected_exposed, exposed) - resp = cors_validation(objectGET)(controller, req) + # test allow_origin * + container_cors = {'allow_origin': '*'} - self.assertEqual(200, resp.status_int) - self.assertEqual('http://foo.bar', - resp.headers['access-control-allow-origin']) + resp = self._get_CORS_response( + container_cors=container_cors, strict_mode=False) + self.assertEqual(200, resp.status_int) + self.assertEqual('*', + resp.headers['access-control-allow-origin']) + + # test allow_origin empty + container_cors = {'allow_origin': ''} + resp = self._get_CORS_response( + container_cors=container_cors, strict_mode=False) + self.assertEqual(200, resp.status_int) + self.assertEqual('http://foo.bar', + resp.headers['access-control-allow-origin']) + + def test_CORS_valid_strict(self): + # test expose_headers to non-allowed origins + container_cors = {'allow_origin': 'http://not.foo.bar', + 'expose_headers': 'X-Object-Meta-Color ' + 'X-Object-Meta-Color-Ex'} + resp = self._get_CORS_response( + container_cors=container_cors, strict_mode=True) + + self.assertEqual(200, resp.status_int) + self.assertNotIn('access-control-expose-headers', resp.headers) + self.assertNotIn('access-control-allow-origin', resp.headers) + + # test allow_origin * + container_cors = {'allow_origin': '*'} + + resp = self._get_CORS_response( + container_cors=container_cors, strict_mode=True) + self.assertEqual(200, resp.status_int) + self.assertEqual('*', + resp.headers['access-control-allow-origin']) + self.assertEqual('red', resp.headers['x-object-meta-color']) + # X-Super-Secret is in the response, but not "exposed" + self.assertEqual('hush', resp.headers['x-super-secret']) + self.assertIn('access-control-expose-headers', resp.headers) + exposed = set( + h.strip() for h in + resp.headers['access-control-expose-headers'].split(',')) + expected_exposed = set(['cache-control', 'content-language', + 'content-type', 'expires', 'last-modified', + 'pragma', 'etag', 'x-timestamp', + 'x-trans-id', 'x-object-meta-color']) + self.assertEqual(expected_exposed, exposed) + + # test allow_origin empty + container_cors = {'allow_origin': ''} + resp = self._get_CORS_response( + container_cors=container_cors, strict_mode=True) + self.assertNotIn('access-control-expose-headers', resp.headers) + self.assertNotIn('access-control-allow-origin', resp.headers) def test_CORS_valid_with_obj_headers(self): - with save_globals(): - controller = ReplicatedObjectController( - self.app, 'a', 'c', 'o') + container_cors = {'allow_origin': 'http://foo.bar'} - def stubContainerInfo(*args): - return { - 'cors': { - 'allow_origin': 'http://foo.bar' - } - } - controller.container_info = stubContainerInfo + def objectGET(controller, req): + return Response(headers={ + 'X-Object-Meta-Color': 'red', + 'X-Super-Secret': 'hush', + 'Access-Control-Allow-Origin': 'http://obj.origin', + 'Access-Control-Expose-Headers': 'x-trans-id' + }) - def objectGET(controller, req): - return Response(headers={ - 'X-Object-Meta-Color': 'red', - 'X-Super-Secret': 'hush', - 'Access-Control-Allow-Origin': 'http://obj.origin', - 'Access-Control-Expose-Headers': 'x-trans-id' - }) + resp = self._get_CORS_response( + container_cors=container_cors, strict_mode=True, + object_get=objectGET) - req = Request.blank( - '/v1/a/c/o.jpg', - {'REQUEST_METHOD': 'GET'}, - headers={'Origin': 'http://foo.bar'}) - - resp = cors_validation(objectGET)(controller, req) - - self.assertEqual(200, resp.status_int) - self.assertEqual('http://obj.origin', - resp.headers['access-control-allow-origin']) - self.assertEqual('x-trans-id', - resp.headers['access-control-expose-headers']) + self.assertEqual(200, resp.status_int) + self.assertEqual('http://obj.origin', + resp.headers['access-control-allow-origin']) + self.assertEqual('x-trans-id', + resp.headers['access-control-expose-headers']) def _gather_x_container_headers(self, controller_call, req, *connect_args, **kwargs): From 5d56f40f04fa9211a2e392d667fe395f38d2cca5 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Wed, 6 Apr 2016 11:48:48 +0100 Subject: [PATCH 074/141] Make DirectClientException report correct ip and port When direct_client.direct_get_suffix_hashes raises a DirectClientException the exception message and variables should report the replication_ip and replication_port, as opposed to the ip and port values reported for all other case when the exception is raised. Add option to override ip and port reported in DirectClientException. Also adds unit tests to verify both cases. Related-Bug: 1566395 Change-Id: If3d952847c7199f4e9f6164858085367266386d2 --- swift/common/direct_client.py | 14 +++++++--- test/unit/common/test_direct_client.py | 36 +++++++++++++++++++++++++- 2 files changed, 45 insertions(+), 5 deletions(-) diff --git a/swift/common/direct_client.py b/swift/common/direct_client.py index a507901edc..94be486d74 100644 --- a/swift/common/direct_client.py +++ b/swift/common/direct_client.py @@ -39,13 +39,16 @@ from swift.common.utils import quote class DirectClientException(ClientException): - def __init__(self, stype, method, node, part, path, resp): + def __init__(self, stype, method, node, part, path, resp, host=None): + # host can be used to override the node ip and port reported in + # the exception + host = host if host is not None else node full_path = quote('/%s/%s%s' % (node['device'], part, path)) msg = '%s server %s:%s direct %s %r gave status %s' % ( - stype, node['ip'], node['port'], method, full_path, resp.status) + stype, host['ip'], host['port'], method, full_path, resp.status) headers = HeaderKeyDict(resp.getheaders()) super(DirectClientException, self).__init__( - msg, http_host=node['ip'], http_port=node['port'], + msg, http_host=host['ip'], http_port=host['port'], http_device=node['device'], http_status=resp.status, http_reason=resp.reason, http_headers=headers) @@ -489,7 +492,10 @@ def direct_get_suffix_hashes(node, part, suffixes, conn_timeout=5, resp = conn.getresponse() if not is_success(resp.status): raise DirectClientException('Object', 'REPLICATE', - node, part, path, resp) + node, part, path, resp, + host={'ip': node['replication_ip'], + 'port': node['replication_port']} + ) return pickle.loads(resp.read()) diff --git a/test/unit/common/test_direct_client.py b/test/unit/common/test_direct_client.py index 2bcc94c13d..503a941186 100644 --- a/test/unit/common/test_direct_client.py +++ b/test/unit/common/test_direct_client.py @@ -26,9 +26,10 @@ import six from six.moves import urllib from swift.common import direct_client +from swift.common.direct_client import DirectClientException from swift.common.exceptions import ClientException from swift.common.header_key_dict import HeaderKeyDict -from swift.common.utils import Timestamp +from swift.common.utils import Timestamp, quote from swift.common.swob import RESPONSE_REASONS from swift.common.storage_policy import POLICIES from six.moves.http_client import HTTPException @@ -631,6 +632,28 @@ class TestDirectClient(unittest.TestCase): self.assertEqual(conn.port, '7000') self.assertEqual(data, resp) + def _test_direct_get_suffix_hashes_fail(self, status_code): + with mocked_http_conn(status_code): + with self.assertRaises(DirectClientException) as cm: + direct_client.direct_get_suffix_hashes( + self.node, self.part, ['a83', 'b52']) + self.assertIn('REPLICATE', cm.exception.message) + self.assertIn(quote('/%s/%s/a83-b52' + % (self.node['device'], self.part)), + cm.exception.message) + self.assertIn(self.node['replication_ip'], cm.exception.message) + self.assertIn(self.node['replication_port'], cm.exception.message) + self.assertEqual(self.node['replication_ip'], cm.exception.http_host) + self.assertEqual(self.node['replication_port'], cm.exception.http_port) + self.assertEqual(self.node['device'], cm.exception.http_device) + self.assertEqual(status_code, cm.exception.http_status) + + def test_direct_get_suffix_hashes_503(self): + self._test_direct_get_suffix_hashes_fail(503) + + def test_direct_get_suffix_hashes_507(self): + self._test_direct_get_suffix_hashes_fail(507) + def test_direct_put_object_with_content_length(self): contents = six.StringIO('123456') @@ -720,6 +743,17 @@ class TestDirectClient(unittest.TestCase): retries=2, error_log=logger.error) self.assertEqual('DELETE', conn.method) self.assertEqual(err_ctx.exception.http_status, 500) + self.assertIn('DELETE', err_ctx.exception.message) + self.assertIn(quote('/%s/%s/%s/%s/%s' + % (self.node['device'], self.part, self.account, + self.container, self.obj)), + err_ctx.exception.message) + self.assertIn(self.node['ip'], err_ctx.exception.message) + self.assertIn(self.node['port'], err_ctx.exception.message) + self.assertEqual(self.node['ip'], err_ctx.exception.http_host) + self.assertEqual(self.node['port'], err_ctx.exception.http_port) + self.assertEqual(self.node['device'], err_ctx.exception.http_device) + self.assertEqual(500, err_ctx.exception.http_status) self.assertEqual([mock.call(1), mock.call(2)], mock_sleep.call_args_list) error_lines = logger.get_lines_for_level('error') From 0bf518e3b0eeaf66653db6972525701cacfe6333 Mon Sep 17 00:00:00 2001 From: Thiago da Silva Date: Wed, 6 Apr 2016 16:58:36 -0400 Subject: [PATCH 075/141] remove unused current_status method Change-Id: I574919eaa14cadc800f3a1f6014221ee382ee7e0 Signed-off-by: Thiago da Silva --- swift/proxy/controllers/obj.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py index 93ffa9a845..70400fc143 100644 --- a/swift/proxy/controllers/obj.py +++ b/swift/proxy/controllers/obj.py @@ -1635,16 +1635,6 @@ class ECPutter(object): self.queue = None self.state = NO_DATA_SENT - def current_status(self): - """ - Returns the current status of the response. - - A response starts off with no current status, then may or may not have - a status of 100 for some time, and then ultimately has a final status - like 200, 404, et cetera. - """ - return self.resp.status - def await_response(self, timeout, informational=False): """ Get 100-continue response indicating the end of 1st phase of a 2-phase From 95efd3f9035ec4141e1b182516f040a59a3e5aa6 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Wed, 23 Mar 2016 13:51:47 -0700 Subject: [PATCH 076/141] Fix infinite recursion during logging when syslog is down Change-Id: Ia9ecffc88ce43616977e141498e5ee404f2c29c4 --- swift/common/utils.py | 57 +++++++++++++++++++++++++++------- test/unit/common/test_utils.py | 42 +++++++++++++++++++++++++ 2 files changed, 88 insertions(+), 11 deletions(-) diff --git a/swift/common/utils.py b/swift/common/utils.py index fcfe86a8dd..68c6025f29 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -1233,24 +1233,59 @@ class NullLogger(object): class LoggerFileObject(object): + # Note: this is greenthread-local storage + _cls_thread_local = threading.local() + def __init__(self, logger, log_type='STDOUT'): self.logger = logger self.log_type = log_type def write(self, value): - value = value.strip() - if value: - if 'Connection reset by peer' in value: - self.logger.error( - _('%s: Connection reset by peer'), self.log_type) - else: - self.logger.error(_('%(type)s: %(value)s'), - {'type': self.log_type, 'value': value}) + # We can get into a nasty situation when logs are going to syslog + # and syslog dies. + # + # It's something like this: + # + # (A) someone logs something + # + # (B) there's an exception in sending to /dev/log since syslog is + # not working + # + # (C) logging takes that exception and writes it to stderr (see + # logging.Handler.handleError) + # + # (D) stderr was replaced with a LoggerFileObject at process start, + # so the LoggerFileObject takes the provided string and tells + # its logger to log it (to syslog, naturally). + # + # Then, steps B through D repeat until we run out of stack. + if getattr(self._cls_thread_local, 'already_called_write', False): + return + + self._cls_thread_local.already_called_write = True + try: + value = value.strip() + if value: + if 'Connection reset by peer' in value: + self.logger.error( + _('%s: Connection reset by peer'), self.log_type) + else: + self.logger.error(_('%(type)s: %(value)s'), + {'type': self.log_type, 'value': value}) + finally: + self._cls_thread_local.already_called_write = False def writelines(self, values): - self.logger.error(_('%(type)s: %(value)s'), - {'type': self.log_type, - 'value': '#012'.join(values)}) + if getattr(self._cls_thread_local, 'already_called_writelines', False): + return + + self._cls_thread_local.already_called_writelines = True + try: + self.logger.error(_('%(type)s: %(value)s'), + {'type': self.log_type, + 'value': '#012'.join(values)}) + finally: + self._cls_thread_local.already_called_writelines = False def close(self): pass diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index 409990ad4a..c2a9e93cc3 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -1264,6 +1264,48 @@ class TestUtils(unittest.TestCase): self.assertRaises(IOError, lfo.readline, 1024) lfo.tell() + def test_LoggerFileObject_recursion(self): + crashy_calls = [0] + + class CrashyLogger(logging.Handler): + def emit(self, record): + crashy_calls[0] += 1 + try: + # Pretend to be trying to send to syslog, but syslogd is + # dead. We need the raise here to set sys.exc_info. + raise socket.error(errno.ENOTCONN, "This is an ex-syslog") + except socket.error: + self.handleError(record) + + logger = logging.getLogger() + logger.addHandler(CrashyLogger()) + + # Set up some real file descriptors for stdio. If you run + # nosetests with "-s", you already have real files there, but + # otherwise they're StringIO objects. + # + # In any case, since capture_stdio() closes sys.stdin and friends, + # we'd want to set up some sacrificial files so as to not goof up + # the testrunner. + new_stdin = open(os.devnull, 'r+b') + new_stdout = open(os.devnull, 'w+b') + new_stderr = open(os.devnull, 'w+b') + + with contextlib.closing(new_stdin), contextlib.closing(new_stdout), \ + contextlib.closing(new_stderr): + # logging.raiseExceptions is set to False in test/__init__.py, but + # is True in Swift daemons, and the error doesn't manifest without + # it. + with mock.patch('sys.stdin', new_stdin), \ + mock.patch('sys.stdout', new_stdout), \ + mock.patch('sys.stderr', new_stderr), \ + mock.patch.object(logging, 'raiseExceptions', True): + # Note: since stdio is hooked up to /dev/null in here, using + # pdb is basically impossible. Sorry about that. + utils.capture_stdio(logger) + logger.info("I like ham") + self.assertTrue(crashy_calls[0], 1) + def test_parse_options(self): # Get a file that is definitely on disk with NamedTemporaryFile() as f: From edc413b85ec2b703d7506be9c4801eb347611c58 Mon Sep 17 00:00:00 2001 From: Nguyen Hung Phuong Date: Thu, 7 Apr 2016 13:31:26 +0700 Subject: [PATCH 077/141] Fix typos in Swift files Change-Id: I39dbf55c094c42347b57ef67520abff9e6fc24bc --- doc/source/cors.rst | 2 +- etc/swift.conf-sample | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/cors.rst b/doc/source/cors.rst index 1de1d5f6a0..3dc07d3e3a 100644 --- a/doc/source/cors.rst +++ b/doc/source/cors.rst @@ -24,7 +24,7 @@ The supported headers are, +------------------------------------------------+------------------------------+ | X-Container-Meta-Access-Control-Expose-Headers | Headers exposed to the user | | | agent (e.g. browser) in the | -| | the actual request response. | +| | actual request response. | | | Space separated. | +------------------------------------------------+------------------------------+ diff --git a/etc/swift.conf-sample b/etc/swift.conf-sample index 5bd57e6864..78684730e2 100644 --- a/etc/swift.conf-sample +++ b/etc/swift.conf-sample @@ -1,7 +1,7 @@ [swift-hash] # swift_hash_path_suffix and swift_hash_path_prefix are used as part of the -# the hashing algorithm when determining data placement in the cluster. +# hashing algorithm when determining data placement in the cluster. # These values should remain secret and MUST NOT change # once a cluster has been deployed. # Use only printable chars (python -c "import string; print(string.printable)") From 7c0f58ec2ed020186ca3f269153b184fc02bf37a Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 7 Apr 2016 07:00:08 +0000 Subject: [PATCH 078/141] Imported Translations from Zanata For more information about this automatic import see: https://wiki.openstack.org/wiki/Translations/Infrastructure Change-Id: Ib80e3a759fa1e4a99576710607ad07fc5f259527 --- swift/locale/es/LC_MESSAGES/swift.po | 178 +++++++++++- swift/locale/ja/LC_MESSAGES/swift.po | 19 +- swift/locale/ko_KR/LC_MESSAGES/swift.po | 173 ++++++++++- swift/locale/ru/LC_MESSAGES/swift.po | 19 +- swift/locale/swift.pot | 369 ++++++++++++------------ 5 files changed, 558 insertions(+), 200 deletions(-) diff --git a/swift/locale/es/LC_MESSAGES/swift.po b/swift/locale/es/LC_MESSAGES/swift.po index a7c0ab2a8a..5800ed20c0 100644 --- a/swift/locale/es/LC_MESSAGES/swift.po +++ b/swift/locale/es/LC_MESSAGES/swift.po @@ -6,16 +6,17 @@ # Carlos A. Muñoz , 2015. #zanata # OpenStack Infra , 2015. #zanata # Tom Cocozzello , 2015. #zanata +# Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev176\n" +"Project-Id-Version: swift 2.6.1.dev244\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-08 04:09+0000\n" +"POT-Creation-Date: 2016-03-22 06:16+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2015-09-09 05:36+0000\n" -"Last-Translator: Carlos A. Muñoz \n" +"PO-Revision-Date: 2016-03-29 11:20+0000\n" +"Last-Translator: Eugènia Torrella \n" "Language: es\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" @@ -52,6 +53,16 @@ msgstr "%(ip)s/%(device)s han respondido como desmontados" msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" +#, python-format +msgid "" +"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of %(device)d/" +"%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in %(time).2fs " +"(%(rate).2f/sec, %(remaining)s remaining)" +msgstr "" +"%(reconstructed)d/%(total)d (%(percentage).2f%%) particiones de %(device)d/" +"%(dtotal)d (%(dpercentage).2f%%) dispositivos reconstruidas en %(time).2fs " +"(%(rate).2f/sec, %(remaining)s remaining)" + #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " @@ -88,6 +99,10 @@ msgstr "%s no existe" msgid "%s is not mounted" msgstr "%s no está montado" +#, python-format +msgid "%s responded as unmounted" +msgstr "%s ha respondido como desmontado" + #, python-format msgid "%s running (%s - %s)" msgstr "%s en ejecución (%s - %s)" @@ -211,6 +226,14 @@ msgstr "No se puede acceder al archivo %s." msgid "Can not load profile data from %s." msgstr "No se pueden cargar los datos de perfil desde %s." +#, python-format +msgid "Cannot read %s (%s)" +msgstr "No se puede leer %s (%s)" + +#, python-format +msgid "Cannot write %s (%s)" +msgstr "No se puede escribir en %s (%s)" + #, python-format msgid "Client did not read from proxy within %ss" msgstr "El cliente pudo realizar la lectura desde el proxy en %ss" @@ -221,6 +244,9 @@ msgstr "El cliente se ha desconectado de la lectura" msgid "Client disconnected without sending enough data" msgstr "El cliente se ha desconectado sin enviar suficientes datos" +msgid "Client disconnected without sending last chunk" +msgstr "El cliente se ha desconectado sin enviar el último fragmento" + #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" @@ -228,6 +254,14 @@ msgstr "" "La vía de acceso de cliente %(client)s no coincide con la vía de acceso " "almacenada en los metadatos de objeto %(meta)s" +msgid "" +"Configuration option internal_client_conf_path not defined. Using default " +"configuration, See internal-client.conf-sample for options" +msgstr "" +"La opción de configuración internal_client_conf_path no está definida. Se " +"utilizará la configuración predeterminada, Consulte internal-client.conf-" +"sample para ver las opciones" + msgid "Connection refused" msgstr "Conexión rechazada" @@ -288,6 +322,10 @@ msgstr "Error de descarga de datos: %s" msgid "Devices pass completed: %.02fs" msgstr "Paso de dispositivos finalizado: %.02fs" +#, python-format +msgid "Directory %r does not map to a valid policy (%s)" +msgstr "El directory %r no está correlacionado con una política válida (%s)" + #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "ERROR %(db_file)s: %(validate_sync_to_err)s" @@ -367,6 +405,10 @@ msgstr "" msgid "ERROR Exception causing client disconnect" msgstr "ERROR Excepción que provoca la desconexión del cliente" +#, python-format +msgid "ERROR Exception transferring data to object servers %s" +msgstr "ERROR Excepción al transferir datos a los servidores de objetos %s" + msgid "ERROR Failed to get my own IPs?" msgstr "ERROR ¿No puedo obtener mis propias IP?" @@ -545,6 +587,11 @@ msgstr "Error al sincronizar la partición" msgid "Error syncing with node: %s" msgstr "Error en la sincronización con el nodo: %s" +#, python-format +msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" +msgstr "" +"Error al intentar reconstruir %(path)s policy#%(policy)d frag#%(frag_index)s" + msgid "Error: An error occurred" msgstr "Error: se ha producido un error" @@ -564,6 +611,9 @@ msgstr "Excepción en el bucle cosechador de cuenta de nivel superior" msgid "Exception in top-level replication loop" msgstr "Excepción en el bucle de réplica de nivel superior" +msgid "Exception in top-levelreconstruction loop" +msgstr "Excepción en el bucle de reconstrucción de nivel superior" + #, python-format msgid "Exception while deleting container %s %s" msgstr "Excepción al suprimir el contenedor %s %s" @@ -602,6 +652,13 @@ msgstr "Siguiente cadena CNAME de %(given_domain)s a %(found_domain)s" msgid "Found configs:" msgstr "Configuraciones encontradas:" +msgid "" +"Handoffs first mode still has handoffs remaining. Aborting current " +"replication pass." +msgstr "" +"El modo de transferencias primero aún tiene transferencias restantes. " +"Abortando el pase de réplica actual." + msgid "Host unreachable" msgstr "Host no alcanzable" @@ -621,6 +678,10 @@ msgstr "Host no válido %r en X-Container-Sync-To" msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "Entrada pendiente no válida %(file)s: %(entry)s" +#, python-format +msgid "Invalid response %(resp)s from %(full_path)s" +msgstr "Respuesta no válida %(resp)s de %(full_path)s" + #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "Respuesta no válida %(resp)s desde %(ip)s" @@ -637,6 +698,10 @@ msgstr "" msgid "Killing long-running rsync: %s" msgstr "Interrumpiendo resincronización (rsync) de larga duración: %s" +#, python-format +msgid "Loading JSON from %s failed (%s)" +msgstr "Error al cargar JSON desde %s (%s)" + msgid "Lockup detected.. killing live coros." msgstr "Bloqueo detectado. Interrumpiendo coros activos." @@ -656,14 +721,26 @@ msgstr "No hay punto final de clúster para %r %r" msgid "No permission to signal PID %d" msgstr "No hay permiso para señalar el PID %d" +#, python-format +msgid "No policy with index %s" +msgstr "No hay ninguna política que tenga el índice %s" + #, python-format msgid "No realm key for %r" msgstr "No hay clave de dominio para %r" +#, python-format +msgid "No space left on device for %s (%s)" +msgstr "No queda espacio libre en el dispositivo para %s (%s)" + #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Error de nodo limitado %(ip)s:%(port)s (%(device)s)" +#, python-format +msgid "Not enough object servers ack'ed (got %d)" +msgstr "No hay suficientes servidores de objetos reconocidos (constan %d)" + #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " @@ -672,6 +749,10 @@ msgstr "" "No se ha encontrado %(sync_from)r => %(sync_to)r - " "objeto %(obj_name)rd" +#, python-format +msgid "Nothing reconstructed for %s seconds." +msgstr "No se ha reconstruido nada durante %s segundos." + #, python-format msgid "Nothing replicated for %s seconds." msgstr "No se ha replicado nada durante %s segundos." @@ -704,10 +785,30 @@ msgstr "" "segundo: %(brate).2f, Tiempo de auditoría: %(audit).2f, Velocidad: " "%(audit_rate).2f" +#, python-format +msgid "" +"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " +"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " +"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " +"%(audit_rate).2f" +msgstr "" +"Auditoría de objetos (%(type)s). Desde %(start_time)s: Localmente: " +"%(passes)d han pasado, %(quars)d en cuarentena, %(errors)d errores, archivos " +"por segundo: %(frate).2f , bytes por segundo: %(brate).2f, Tiempo total: " +"%(total).2f, Tiempo de auditoría: %(audit).2f, Velocidad: %(audit_rate).2f" + #, python-format msgid "Object audit stats: %s" msgstr "Estadísticas de auditoría de objetos: %s" +#, python-format +msgid "Object reconstruction complete (once). (%.02f minutes)" +msgstr "Reconstrucción de objeto finalizada (una vez). (%.02f minutos)" + +#, python-format +msgid "Object reconstruction complete. (%.02f minutes)" +msgstr "Reconstrucción de objeto finalizada. (%.02f minutos)" + #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "Réplica de objeto finalizada (una vez). (%.02f minutos)" @@ -769,6 +870,14 @@ msgstr "Vía de acceso necesaria en X-Container-Sync-To" msgid "Problem cleaning up %s" msgstr "Problema al limpiar %s" +#, python-format +msgid "Problem cleaning up %s (%s)" +msgstr "Problema al limpiar %s (%s)" + +#, python-format +msgid "Problem writing durable state file %s (%s)" +msgstr "Problema al escribir en el archivo de estado durable %s (%s)" + #, python-format msgid "Profiling Error: %s" msgstr "Error de perfil: %s" @@ -812,6 +921,15 @@ msgstr "Eliminando %s objetos" msgid "Removing partition: %s" msgstr "Eliminando partición: %s" +#, python-format +msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" +msgstr "" +"Eliminando el archivo PID %(pid_file)s que tiene el PID no válido %(pid)d" + +#, python-format +msgid "Removing pid file %s with invalid pid" +msgstr "Eliminando el archivo PID %s, que tiene un PID no válido" + #, python-format msgid "Removing stale pid file %s" msgstr "Eliminando fichero de identificación positiva obsoleto %s" @@ -831,6 +949,10 @@ msgstr "" "Se devuelven 498 de %(meth)s a %(acc)s/%(cont)s/%(obj)s. Ajuste de límite " "(suspensión máxima) %(e)s" +msgid "Ring change detected. Aborting current reconstruction pass." +msgstr "" +"Cambio de anillo detectado. Abortando el pase de reconstrucción actual." + msgid "Ring change detected. Aborting current replication pass." msgstr "Cambio de anillo detectado. Abortando el pase de réplica actual." @@ -838,6 +960,9 @@ msgstr "Cambio de anillo detectado. Abortando el pase de réplica actual." msgid "Running %s once" msgstr "Ejecutando %s una vez" +msgid "Running object reconstructor in script mode." +msgstr "Ejecutando reconstructor de objeto en modo script." + msgid "Running object replicator in script mode." msgstr "Ejecutando replicador de objeto en modalidad de script." @@ -881,6 +1006,12 @@ msgstr "Omitiendo %s, ya que no está montado" msgid "Starting %s" msgstr "Iniciando %s" +msgid "Starting object reconstruction pass." +msgstr "Iniciando el paso de reconstrucción de objeto." + +msgid "Starting object reconstructor in daemon mode." +msgstr "Iniciando reconstructor de objeto en modo daemon." + msgid "Starting object replication pass." msgstr "Iniciando el paso de réplica de objeto." @@ -907,10 +1038,22 @@ msgstr "" msgid "Timeout %(action)s to memcached: %(server)s" msgstr "%(action)s de tiempo de espera para memcached: %(server)s" +#, python-format +msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" +msgstr "Excepción de tiempo de espera superado con %(ip)s:%(port)s/%(device)s" + #, python-format msgid "Trying to %(method)s %(path)s" msgstr "Intentando %(method)s %(path)s" +#, python-format +msgid "Trying to GET %(full_path)s" +msgstr "Intentando hacer un GET de %(full_path)s" + +#, python-format +msgid "Trying to get %s status of PUT to %s" +msgstr "Intentando obtener el estado %s de PUT en %s" + #, python-format msgid "Trying to get final status of PUT to %s" msgstr "Intentando obtener el estado final de PUT en %s" @@ -924,6 +1067,10 @@ msgstr "Intentando leer durante GET (reintento)" msgid "Trying to send to client" msgstr "Intentando enviar al cliente" +#, python-format +msgid "Trying to sync suffixes with %s" +msgstr "Intentando sincronizar los sufijos con %s" + #, python-format msgid "Trying to write to %s" msgstr "Intentando escribir en %s" @@ -935,10 +1082,23 @@ msgstr "UNCAUGHT EXCEPTION" msgid "Unable to find %s config section in %s" msgstr "No se ha podido encontrar la sección de configuración %s en %s" +#, python-format +msgid "Unable to load internal client from config: %r (%s)" +msgstr "" +"No se puede cargar el cliente interno a partir de la configuración: %r (%s)" + #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "No se ha podido localizar %s en libc. Se dejará como no operativo." +#, python-format +msgid "Unable to locate config for %s" +msgstr "No se ha podido encontrar el número de configuración de %s" + +#, python-format +msgid "Unable to locate config number %s for %s" +msgstr "No se ha podido encontrar el número de configuración %s de %s" + msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" @@ -964,6 +1124,12 @@ msgstr "Respuesta inesperada : %s " msgid "Unhandled exception" msgstr "Excepción no controlada" +#, python-format +msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" +msgstr "" +"Se ha producido una excepción desconocida al intentar hacer un GET de: " +"%(account)r %(container)r %(object)r" + #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "Informe de actualización fallido para %(container)s %(dbfile)s" @@ -998,6 +1164,10 @@ msgstr "" msgid "Waited %s seconds for %s to die; giving up" msgstr "Se han esperado %s segundos a que muriera %s; abandonando" +#, python-format +msgid "Waited %s seconds for %s to die; killing" +msgstr "Se han esperado %s segundos a que muriera %s; terminando" + msgid "Warning: Cannot ratelimit without a memcached client" msgstr "" "Aviso: no se puede ajustar el límite sin un cliente almacenado en memoria " diff --git a/swift/locale/ja/LC_MESSAGES/swift.po b/swift/locale/ja/LC_MESSAGES/swift.po index 53fbce29ca..323191ced3 100644 --- a/swift/locale/ja/LC_MESSAGES/swift.po +++ b/swift/locale/ja/LC_MESSAGES/swift.po @@ -7,16 +7,17 @@ # Akihiro Motoki , 2015. #zanata # OpenStack Infra , 2015. #zanata # Tom Cocozzello , 2015. #zanata +# Andreas Jaeger , 2016. #zanata # 笹原 昌美 , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.7.1.dev4\n" +"Project-Id-Version: swift 2.6.1.dev244\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-25 11:23+0000\n" +"POT-Creation-Date: 2016-03-22 06:16+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-25 07:46+0000\n" +"PO-Revision-Date: 2016-03-29 05:40+0000\n" "Last-Translator: 笹原 昌美 \n" "Language: ja\n" "Plural-Forms: nplurals=1; plural=0;\n" @@ -158,9 +159,9 @@ msgstr "アカウント %s は %s 以降リープされていません" msgid "Account audit \"once\" mode completed: %.02fs" msgstr "アカウント監査 \"once\" モードが完了しました: %.02fs" -#, fuzzy, python-format +#, python-format msgid "Account audit pass completed: %.02fs" -msgstr "アカウント監査パスが完了しました: %.02fs" +msgstr "アカウント監査の処理が完了しました: %.02fs" #, python-format msgid "" @@ -273,9 +274,9 @@ msgstr "コンテナー" msgid "Container audit \"once\" mode completed: %.02fs" msgstr "コンテナー監査「once」モードが完了しました: %.02fs" -#, fuzzy, python-format +#, python-format msgid "Container audit pass completed: %.02fs" -msgstr "コンテナー監査パスが完了しました: %.02fs" +msgstr "コンテナー監査の処理が完了しました: %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" @@ -313,9 +314,9 @@ msgstr "%r をロードできませんでした: %s" msgid "Data download error: %s" msgstr "データダウンロードエラー: %s" -#, fuzzy, python-format +#, python-format msgid "Devices pass completed: %.02fs" -msgstr "デバイスパスが完了しました: %.02fs" +msgstr "デバイスの処理が完了しました: %.02fs" #, python-format msgid "Directory %r does not map to a valid policy (%s)" diff --git a/swift/locale/ko_KR/LC_MESSAGES/swift.po b/swift/locale/ko_KR/LC_MESSAGES/swift.po index 7bcc4c960b..f84a5088cf 100644 --- a/swift/locale/ko_KR/LC_MESSAGES/swift.po +++ b/swift/locale/ko_KR/LC_MESSAGES/swift.po @@ -10,14 +10,14 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev176\n" +"Project-Id-Version: swift 2.6.1.dev244\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-08 04:09+0000\n" +"POT-Creation-Date: 2016-03-22 06:16+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-01-30 06:54+0000\n" -"Last-Translator: Andreas Jaeger \n" +"PO-Revision-Date: 2016-03-29 02:16+0000\n" +"Last-Translator: SeYeon Lee \n" "Language: ko-KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" @@ -54,6 +54,16 @@ msgstr "%(ip)s/%(device)s에서 마운트 해제된 것으로 응답함" msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" +#, python-format +msgid "" +"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of %(device)d/" +"%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in %(time).2fs " +"(%(rate).2f/sec, %(remaining)s remaining)" +msgstr "" +"%(device)d/%(dtotal)d (%(dpercentage).2f%%) 장치 중 %(reconstructed)d/" +"%(total)d (%(percentage).2f%%)개의 파티션이 %(time).2fs (%(rate).2f/sec, " +"%(remaining)s 남음)에 재구성됨" + #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " @@ -90,6 +100,10 @@ msgstr "%s이(가) 존재하지 않음" msgid "%s is not mounted" msgstr "%s이(가) 마운트되지 않음" +#, python-format +msgid "%s responded as unmounted" +msgstr "%s이(가) 마운트 해제된 것으로 응답" + #, python-format msgid "%s running (%s - %s)" msgstr "%s 실행 중(%s - %s)" @@ -211,6 +225,14 @@ msgstr "파일 %s에 액세스할 수 없습니다." msgid "Can not load profile data from %s." msgstr "%s에서 프로파일 데이터를 로드할 수 없습니다." +#, python-format +msgid "Cannot read %s (%s)" +msgstr "%s을(를) 읽을 수 없음(%s)" + +#, python-format +msgid "Cannot write %s (%s)" +msgstr "%s을(를) 쓸 수 없음(%s)" + #, python-format msgid "Client did not read from proxy within %ss" msgstr "클라이언트에서 %ss 내에 프록시를 읽을 수 없었음" @@ -221,6 +243,9 @@ msgstr "읽기 시 클라이언트 연결이 끊어짐" msgid "Client disconnected without sending enough data" msgstr "데이터를 모두 전송하기 전에 클라이언트 연결이 끊어짐" +msgid "Client disconnected without sending last chunk" +msgstr "마지막 청크를 전송하기 전에 클라이언트 연결이 끊어짐" + #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" @@ -228,6 +253,13 @@ msgstr "" "클라이언트 경로 %(client)s이(가) 오브젝트 메타데이터 %(meta)s에 저장된 경로" "와 일치하지 않음" +msgid "" +"Configuration option internal_client_conf_path not defined. Using default " +"configuration, See internal-client.conf-sample for options" +msgstr "" +"구성 옵션 internal_client_conf_path가 정의되지 않았습니다. 기본 구성 사용 시 " +"internal-client.conf-sample에서 옵션을 참조하십시오." + msgid "Connection refused" msgstr "연결이 거부됨" @@ -285,6 +317,10 @@ msgstr "데이터 다운로드 오류: %s" msgid "Devices pass completed: %.02fs" msgstr "장치 패스 완료 : %.02fs" +#, python-format +msgid "Directory %r does not map to a valid policy (%s)" +msgstr "%r 디렉토리가 올바른 정책(%s)에 맵핑되지 않음" + #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "오류 %(db_file)s: %(validate_sync_to_err)s" @@ -360,6 +396,10 @@ msgstr "오류. 디스크 파일 %(data_file)s 닫기 실패: %(exc)s : %(stack) msgid "ERROR Exception causing client disconnect" msgstr "오류. 예외로 인해 클라이언트 연결이 끊어짐" +#, python-format +msgid "ERROR Exception transferring data to object servers %s" +msgstr "ERROR 오브젝트 서버 %s에 데이터를 전송하는 중에 예외 발생" + msgid "ERROR Failed to get my own IPs?" msgstr "오류. 자체 IP를 가져오는 중 오류 발생 여부" @@ -531,6 +571,12 @@ msgstr "파티션 동기 오류 " msgid "Error syncing with node: %s" msgstr "노드 동기 오류: %s" +#, python-format +msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" +msgstr "" +"%(path)s policy#%(policy)d frag#%(frag_index)s을(를) 다시 빌드하려는 중 오류 " +"발생" + msgid "Error: An error occurred" msgstr "오류: 오류 발생" @@ -550,6 +596,9 @@ msgstr "최상위 계정 루프의 예외 " msgid "Exception in top-level replication loop" msgstr "최상위 레벨 복제 루프에서 예외 발생" +msgid "Exception in top-levelreconstruction loop" +msgstr "최상위 레벨 재구성 루프에서 예외 발생" + #, python-format msgid "Exception while deleting container %s %s" msgstr "컨테이너 %s %s 삭제 중 예외 발생" @@ -586,6 +635,13 @@ msgstr "%(given_domain)s에서 %(found_domain)s(으)로의 다음 CNAME 체인" msgid "Found configs:" msgstr "구성 발견:" +msgid "" +"Handoffs first mode still has handoffs remaining. Aborting current " +"replication pass." +msgstr "" +"핸드오프 첫 모드에 여전히 핸드오프가 남아 있습니다. 현재 복제 전달을 중단합니" +"다." + msgid "Host unreachable" msgstr "호스트 도달 불가능" @@ -605,6 +661,10 @@ msgstr "X-Container-Sync-To에 올바르지 않은 호스트 %r이(가) 있음" msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "올바르지 않은 보류 항목 %(file)s: %(entry)s" +#, python-format +msgid "Invalid response %(resp)s from %(full_path)s" +msgstr "%(full_path)s에서 올바르지 않은 응답 %(resp)s" + #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "%(ip)s의 올바르지 않은 응답 %(resp)s" @@ -621,6 +681,10 @@ msgstr "" msgid "Killing long-running rsync: %s" msgstr "장기 실행 중인 rsync 강제 종료: %s" +#, python-format +msgid "Loading JSON from %s failed (%s)" +msgstr "%s에서 JSON 로드 실패(%s)" + msgid "Lockup detected.. killing live coros." msgstr "잠금 발견.. 활성 coros를 강제 종료합니다." @@ -640,14 +704,26 @@ msgstr "%r %r에 대한 클러스터 엔드포인트가 없음" msgid "No permission to signal PID %d" msgstr "PID %d을(를) 표시할 권한이 없음" +#, python-format +msgid "No policy with index %s" +msgstr "인덱스가 %s인 정책이 없음" + #, python-format msgid "No realm key for %r" msgstr "%r에 대한 영역 키가 없음" +#, python-format +msgid "No space left on device for %s (%s)" +msgstr "%s의 장치 왼쪽에 공백이 없음(%s)" + #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "노드 오류로 %(ip)s:%(port)s(%(device)s)이(가) 제한됨" +#, python-format +msgid "Not enough object servers ack'ed (got %d)" +msgstr "승인된 오브젝트 서버가 부족함(%d을(를) 받음)" + #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " @@ -656,6 +732,10 @@ msgstr "" "찾을 수 없음 %(sync_from)r => %(sync_to)r - 오브젝" "트%(obj_name)r" +#, python-format +msgid "Nothing reconstructed for %s seconds." +msgstr "%s초 동안 재구성된 것이 없습니다." + #, python-format msgid "Nothing replicated for %s seconds." msgstr "%s초 동안 복제된 것이 없습니다." @@ -686,10 +766,30 @@ msgstr "" "목: %(quars)d, 총 오류 수: %(errors)d, 총 파일/초: %(frate).2f, 총 바이트/" "초: %(brate).2f, 감사 시간: %(audit).2f, 속도: %(audit_rate).2f" +#, python-format +msgid "" +"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " +"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " +"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " +"%(audit_rate).2f" +msgstr "" +"오브젝트 감사(%(type)s). %(start_time)s 이후: 로컬: %(passes)d개 통과, " +"%(quars)d개 격리, %(errors)d개 오류, 파일/초: %(frate).2f, 바이트/초: " +"%(brate).2f, 총 시간: %(total).2f, 감사 시간: %(audit).2f, 속도: " +"%(audit_rate).2f" + #, python-format msgid "Object audit stats: %s" msgstr "오브젝트 감사 통계: %s" +#, python-format +msgid "Object reconstruction complete (once). (%.02f minutes)" +msgstr "오브젝트 재구성 완료(일 회). (%.02f분)" + +#, python-format +msgid "Object reconstruction complete. (%.02f minutes)" +msgstr "오브젝트 재구성 완료. (%.02f분)" + #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "오브젝트 복제 완료(일 회). (%.02f분)" @@ -748,6 +848,14 @@ msgstr "X-Container-Sync-To에 경로가 필요함" msgid "Problem cleaning up %s" msgstr "%s 정리 문제 발생" +#, python-format +msgid "Problem cleaning up %s (%s)" +msgstr "%s 정리 문제 발생(%s)" + +#, python-format +msgid "Problem writing durable state file %s (%s)" +msgstr "지속적인 상태 파일 %s 쓰기 오류(%s)" + #, python-format msgid "Profiling Error: %s" msgstr "프로파일링 오류: %s" @@ -786,6 +894,14 @@ msgstr "%s 오브젝트 제거 중" msgid "Removing partition: %s" msgstr "파티션 제거: %s" +#, python-format +msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" +msgstr "잘못된 pid %(pid)d의 pid 파일 %(pid_file)s 제거" + +#, python-format +msgid "Removing pid file %s with invalid pid" +msgstr "pid가 올바르지 않은 pid 파일 %s 제거" + #, python-format msgid "Removing stale pid file %s" msgstr "시간이 경과된 pid 파일 %s을(를) 제거하는 중 " @@ -805,6 +921,9 @@ msgstr "" "%(acc)s/%(cont)s/%(obj)s(으)로 %(meth)s에 대한 498을 리턴합니다. 전송률 제한" "(최대 휴면) %(e)s" +msgid "Ring change detected. Aborting current reconstruction pass." +msgstr "링 변경이 발견되었습니다. 현재 재구성 전달을 중단합니다." + msgid "Ring change detected. Aborting current replication pass." msgstr "링 변경이 발견되었습니다. 현재 복제 전달을 중단합니다." @@ -812,6 +931,9 @@ msgstr "링 변경이 발견되었습니다. 현재 복제 전달을 중단합 msgid "Running %s once" msgstr "%s을(를) 한 번 실행" +msgid "Running object reconstructor in script mode." +msgstr "오브젝트 재구성자를 스크립트 모드로 실행 중입니다." + msgid "Running object replicator in script mode." msgstr "오브젝트 복제자를 스크립트 모드로 실행 중입니다." @@ -851,6 +973,12 @@ msgstr "마운트되지 않는 %s를 건너 뛰기" msgid "Starting %s" msgstr "%s 시작 중" +msgid "Starting object reconstruction pass." +msgstr "오브젝트 재구성 전달을 시작합니다." + +msgid "Starting object reconstructor in daemon mode." +msgstr "오브젝트 재구성자를 디먼 모드로 시작합니다." + msgid "Starting object replication pass." msgstr "오브젝트 복제 전달을 시작합니다." @@ -876,10 +1004,22 @@ msgstr "" msgid "Timeout %(action)s to memcached: %(server)s" msgstr "memcached에 대한 %(action)s 제한시간 초과: %(server)s" +#, python-format +msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" +msgstr "%(ip)s:%(port)s/%(device)s에서 제한시간 초과 예외 발생" + #, python-format msgid "Trying to %(method)s %(path)s" msgstr "%(method)s %(path)s 시도 중" +#, python-format +msgid "Trying to GET %(full_path)s" +msgstr "GET %(full_path)s 시도 중" + +#, python-format +msgid "Trying to get %s status of PUT to %s" +msgstr "PUT의 %s 상태를 %s(으)로 가져오는 중" + #, python-format msgid "Trying to get final status of PUT to %s" msgstr "PUT의 최종 상태를 %s(으)로 가져오는 중" @@ -893,6 +1033,10 @@ msgstr "가져오기(재시도) 중 읽기를 시도함" msgid "Trying to send to client" msgstr "클라이언트로 전송 시도 중" +#, python-format +msgid "Trying to sync suffixes with %s" +msgstr "%s과(와) 접미사를 동기화하려고 시도" + #, python-format msgid "Trying to write to %s" msgstr "%s에 쓰기 시도 중" @@ -904,10 +1048,22 @@ msgstr "미발견 예외" msgid "Unable to find %s config section in %s" msgstr "%s 구성 섹션을 %s에서 찾을 수 없음" +#, python-format +msgid "Unable to load internal client from config: %r (%s)" +msgstr "구성에서 내부 클라이언트를 로드할 수 없음: %r (%s)" + #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "libc에서 %s을(를) 찾을 수 없습니다. no-op로 남겨 둡니다." +#, python-format +msgid "Unable to locate config for %s" +msgstr "%s의 구성을 찾을 수 없음" + +#, python-format +msgid "Unable to locate config number %s for %s" +msgstr "구성 번호 %s을(를) 찾을 수 없음(대상: %s)" + msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" @@ -932,6 +1088,11 @@ msgstr "예상치 않은 응답: %s" msgid "Unhandled exception" msgstr "처리되지 않은 예외" +#, python-format +msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" +msgstr "" +"GET을 시도하는 중 알 수 없는 예외 발생: %(account)r %(container)r %(object)r" + #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "%(container)s %(dbfile)s의 업데이트 보고서 실패" @@ -965,6 +1126,10 @@ msgstr "" msgid "Waited %s seconds for %s to die; giving up" msgstr "%s초 동안 %s의 종료를 대기함, 포기하는 중" +#, python-format +msgid "Waited %s seconds for %s to die; killing" +msgstr "%s초 동안 %s을(를) 대기, 강제 종료 중" + msgid "Warning: Cannot ratelimit without a memcached client" msgstr "경고: memcached 클라이언트 없이 전송률을 제한할 수 없음" diff --git a/swift/locale/ru/LC_MESSAGES/swift.po b/swift/locale/ru/LC_MESSAGES/swift.po index 764e83ba6f..7511d0eaf9 100644 --- a/swift/locale/ru/LC_MESSAGES/swift.po +++ b/swift/locale/ru/LC_MESSAGES/swift.po @@ -5,18 +5,19 @@ # Translators: # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata +# Andreas Jaeger , 2016. #zanata # Filatov Sergey , 2016. #zanata # Grigory Mokhin , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift 2.6.1.dev244\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-22 03:44+0000\n" +"POT-Creation-Date: 2016-03-22 06:16+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-21 07:06+0000\n" -"Last-Translator: Grigory Mokhin \n" +"PO-Revision-Date: 2016-03-27 11:17+0000\n" +"Last-Translator: Ilya Alekseyev \n" "Language: ru\n" "Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n" @@ -226,6 +227,14 @@ msgstr "Отсутствует доступ к файлу %s." msgid "Can not load profile data from %s." msgstr "Не удается загрузить данные профайла из %s." +#, python-format +msgid "Cannot read %s (%s)" +msgstr "Невозможно прочитать %s (%s)" + +#, python-format +msgid "Cannot write %s (%s)" +msgstr "Невозможно записать %s (%s)" + #, python-format msgid "Client did not read from proxy within %ss" msgstr "Клиент не прочитал данные из proxy в %ss" @@ -690,6 +699,10 @@ msgstr "" msgid "Killing long-running rsync: %s" msgstr "Принудительное завершение долго выполняющегося rsync: %s" +#, python-format +msgid "Loading JSON from %s failed (%s)" +msgstr "Загрузка JSON из %s провалилась (%s)" + msgid "Lockup detected.. killing live coros." msgstr "Обнаружена блокировка.. принудительное завершение работающих модулей." diff --git a/swift/locale/swift.pot b/swift/locale/swift.pot index ec3baaa9fb..5bf3b2de26 100644 --- a/swift/locale/swift.pot +++ b/swift/locale/swift.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev244\n" +"Project-Id-Version: swift 2.7.1.dev21\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-22 06:16+0000\n" +"POT-Creation-Date: 2016-04-07 07:00+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -55,15 +55,15 @@ msgstr "" #: swift/account/auditor.py:148 #, python-format -msgid "Audit Failed for %s: %s" +msgid "Audit Failed for %(path)s: %(err)s" msgstr "" -#: swift/account/auditor.py:152 +#: swift/account/auditor.py:153 #, python-format msgid "ERROR Could not get account info %s" msgstr "" -#: swift/account/reaper.py:139 swift/common/utils.py:2342 +#: swift/account/reaper.py:139 swift/common/utils.py:2357 #: swift/obj/diskfile.py:361 swift/obj/updater.py:88 swift/obj/updater.py:131 #, python-format msgid "Skipping %s as it is not mounted" @@ -78,83 +78,83 @@ msgstr "" msgid "Devices pass completed: %.02fs" msgstr "" -#: swift/account/reaper.py:254 +#: swift/account/reaper.py:256 #, python-format msgid "Beginning pass on account %s" msgstr "" -#: swift/account/reaper.py:279 +#: swift/account/reaper.py:281 #, python-format msgid "Exception with containers for account %s" msgstr "" -#: swift/account/reaper.py:286 +#: swift/account/reaper.py:288 #, python-format msgid "Exception with account %s" msgstr "" -#: swift/account/reaper.py:287 +#: swift/account/reaper.py:289 #, python-format msgid "Incomplete pass on account %s" msgstr "" -#: swift/account/reaper.py:289 +#: swift/account/reaper.py:291 #, python-format msgid ", %s containers deleted" msgstr "" -#: swift/account/reaper.py:291 +#: swift/account/reaper.py:293 #, python-format msgid ", %s objects deleted" msgstr "" -#: swift/account/reaper.py:293 +#: swift/account/reaper.py:295 #, python-format msgid ", %s containers remaining" msgstr "" -#: swift/account/reaper.py:296 +#: swift/account/reaper.py:298 #, python-format msgid ", %s objects remaining" msgstr "" -#: swift/account/reaper.py:298 +#: swift/account/reaper.py:300 #, python-format msgid ", %s containers possibly remaining" msgstr "" -#: swift/account/reaper.py:301 +#: swift/account/reaper.py:303 #, python-format msgid ", %s objects possibly remaining" msgstr "" -#: swift/account/reaper.py:304 +#: swift/account/reaper.py:306 msgid ", return codes: " msgstr "" -#: swift/account/reaper.py:308 +#: swift/account/reaper.py:310 #, python-format msgid ", elapsed: %.02fs" msgstr "" -#: swift/account/reaper.py:314 +#: swift/account/reaper.py:317 #, python-format -msgid "Account %s has not been reaped since %s" +msgid "Account %(account)s has not been reaped since %(time)s" msgstr "" -#: swift/account/reaper.py:373 swift/account/reaper.py:427 -#: swift/account/reaper.py:503 swift/container/updater.py:307 +#: swift/account/reaper.py:376 swift/account/reaper.py:430 +#: swift/account/reaper.py:506 swift/container/updater.py:307 #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "" -#: swift/account/reaper.py:380 swift/account/reaper.py:436 -#: swift/account/reaper.py:514 +#: swift/account/reaper.py:383 swift/account/reaper.py:439 +#: swift/account/reaper.py:517 #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "" -#: swift/account/reaper.py:397 +#: swift/account/reaper.py:400 #, python-format msgid "Exception with objects for container %(container)s for account %(account)s" msgstr "" @@ -183,14 +183,14 @@ msgstr "" #: swift/common/db.py:353 #, python-format -msgid "Quarantined %s to %s due to %s database" +msgid "Quarantined %(db_dir)s to %(quar_path)s due to %(exc_hint)s database" msgstr "" -#: swift/common/db.py:408 +#: swift/common/db.py:410 msgid "Broker error trying to rollback locked connection" msgstr "" -#: swift/common/db.py:611 +#: swift/common/db.py:613 #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "" @@ -210,7 +210,7 @@ msgstr "" msgid "Removed %(remove)d dbs" msgstr "" -#: swift/common/db_replicator.py:215 swift/obj/replicator.py:514 +#: swift/common/db_replicator.py:215 swift/obj/replicator.py:516 #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "" @@ -294,92 +294,96 @@ msgid "" "user quit" msgstr "" -#: swift/common/manager.py:278 swift/common/manager.py:622 +#: swift/common/manager.py:278 swift/common/manager.py:630 #, python-format msgid "No %s running" msgstr "" #: swift/common/manager.py:291 #, python-format -msgid "%s (%s) appears to have stopped" +msgid "%(server)s (%(pid)s) appears to have stopped" msgstr "" -#: swift/common/manager.py:303 +#: swift/common/manager.py:304 #, python-format -msgid "Waited %s seconds for %s to die; killing" +msgid "Waited %(kill_wait)s seconds for %(server)s to die; killing" msgstr "" -#: swift/common/manager.py:307 swift/common/manager.py:559 +#: swift/common/manager.py:309 swift/common/manager.py:565 #, python-format -msgid "Signal %s pid: %s signal: %s" +msgid "Signal %(server)s pid: %(pid)s signal: %(signal)s" msgstr "" -#: swift/common/manager.py:317 +#: swift/common/manager.py:321 #, python-format -msgid "Waited %s seconds for %s to die; giving up" +msgid "Waited %(kill_wait)s seconds for %(server)s to die; giving up" msgstr "" -#: swift/common/manager.py:501 +#: swift/common/manager.py:506 #, python-format -msgid "Unable to locate config number %s for %s" +msgid "Unable to locate config number %(number)s for %(server)s" msgstr "" -#: swift/common/manager.py:504 +#: swift/common/manager.py:510 #, python-format msgid "Unable to locate config for %s" msgstr "" -#: swift/common/manager.py:507 +#: swift/common/manager.py:513 msgid "Found configs:" msgstr "" -#: swift/common/manager.py:554 +#: swift/common/manager.py:560 #, python-format msgid "Removing pid file %s with invalid pid" msgstr "" -#: swift/common/manager.py:564 +#: swift/common/manager.py:571 #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "" -#: swift/common/manager.py:571 +#: swift/common/manager.py:578 #, python-format msgid "Removing stale pid file %s" msgstr "" -#: swift/common/manager.py:574 +#: swift/common/manager.py:581 #, python-format msgid "No permission to signal PID %d" msgstr "" -#: swift/common/manager.py:619 +#: swift/common/manager.py:626 #, python-format -msgid "%s #%d not running (%s)" +msgid "%(server)s #%(number)d not running (%(conf)s)" msgstr "" -#: swift/common/manager.py:626 swift/common/manager.py:719 -#: swift/common/manager.py:723 +#: swift/common/manager.py:634 swift/common/manager.py:728 #, python-format -msgid "%s running (%s - %s)" +msgid "%(server)s running (%(pid)s - %(conf)s)" msgstr "" -#: swift/common/manager.py:726 +#: swift/common/manager.py:732 #, python-format -msgid "%s already started..." -msgstr "" - -#: swift/common/manager.py:735 -#, python-format -msgid "Running %s once" +msgid "%(server)s running (%(pid)s - %(pid_file)s)" msgstr "" #: swift/common/manager.py:737 #, python-format +msgid "%s already started..." +msgstr "" + +#: swift/common/manager.py:746 +#, python-format +msgid "Running %s once" +msgstr "" + +#: swift/common/manager.py:748 +#, python-format msgid "Starting %s" msgstr "" -#: swift/common/manager.py:744 +#: swift/common/manager.py:755 #, python-format msgid "%s does not exist" msgstr "" @@ -419,90 +423,90 @@ msgstr "" #: swift/common/utils.py:675 #, python-format -msgid "Unable to perform fsync() on directory %s: %s" +msgid "Unable to perform fsync() on directory %(dir)s: %(err)s" msgstr "" -#: swift/common/utils.py:1244 +#: swift/common/utils.py:1245 #, python-format msgid "%s: Connection reset by peer" msgstr "" -#: swift/common/utils.py:1246 swift/common/utils.py:1249 +#: swift/common/utils.py:1247 swift/common/utils.py:1251 #, python-format -msgid "%s: %s" -msgstr "" - -#: swift/common/utils.py:1497 -msgid "Connection refused" -msgstr "" - -#: swift/common/utils.py:1499 -msgid "Host unreachable" +msgid "%(type)s: %(value)s" msgstr "" #: swift/common/utils.py:1501 +msgid "Connection refused" +msgstr "" + +#: swift/common/utils.py:1503 +msgid "Host unreachable" +msgstr "" + +#: swift/common/utils.py:1505 msgid "Connection timeout" msgstr "" -#: swift/common/utils.py:1779 +#: swift/common/utils.py:1783 msgid "UNCAUGHT EXCEPTION" msgstr "" -#: swift/common/utils.py:1834 +#: swift/common/utils.py:1838 msgid "Error: missing config path argument" msgstr "" -#: swift/common/utils.py:1839 +#: swift/common/utils.py:1843 #, python-format msgid "Error: unable to locate %s" msgstr "" -#: swift/common/utils.py:2200 +#: swift/common/utils.py:2215 #, python-format msgid "Unable to read config from %s" msgstr "" -#: swift/common/utils.py:2206 +#: swift/common/utils.py:2221 #, python-format -msgid "Unable to find %s config section in %s" +msgid "Unable to find %(section)s config section in %(conf)s" msgstr "" -#: swift/common/utils.py:2591 +#: swift/common/utils.py:2606 #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "" -#: swift/common/utils.py:2596 +#: swift/common/utils.py:2611 #, python-format msgid "No realm key for %r" msgstr "" -#: swift/common/utils.py:2600 +#: swift/common/utils.py:2615 #, python-format msgid "No cluster endpoint for %r %r" msgstr "" -#: swift/common/utils.py:2609 +#: swift/common/utils.py:2624 #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" -#: swift/common/utils.py:2613 +#: swift/common/utils.py:2628 msgid "Path required in X-Container-Sync-To" msgstr "" -#: swift/common/utils.py:2616 +#: swift/common/utils.py:2631 msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "" -#: swift/common/utils.py:2621 +#: swift/common/utils.py:2636 #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "" -#: swift/common/utils.py:2815 +#: swift/common/utils.py:2830 msgid "Exception dumping recon cache" msgstr "" @@ -679,61 +683,61 @@ msgid "" "later)" msgstr "" -#: swift/container/sync.py:225 +#: swift/container/sync.py:218 msgid "" "Configuration option internal_client_conf_path not defined. Using default" " configuration, See internal-client.conf-sample for options" msgstr "" -#: swift/container/sync.py:238 +#: swift/container/sync.py:231 #, python-format msgid "Unable to load internal client from config: %r (%s)" msgstr "" -#: swift/container/sync.py:269 +#: swift/container/sync.py:253 msgid "Begin container sync \"once\" mode" msgstr "" -#: swift/container/sync.py:278 +#: swift/container/sync.py:262 #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "" -#: swift/container/sync.py:286 +#: swift/container/sync.py:270 #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], " "%(skip)s skipped, %(fail)s failed" msgstr "" -#: swift/container/sync.py:352 +#: swift/container/sync.py:336 #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "" -#: swift/container/sync.py:408 +#: swift/container/sync.py:390 #, python-format msgid "ERROR Syncing %s" msgstr "" -#: swift/container/sync.py:492 +#: swift/container/sync.py:539 #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "" -#: swift/container/sync.py:525 +#: swift/container/sync.py:564 #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "" -#: swift/container/sync.py:531 +#: swift/container/sync.py:570 #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" -#: swift/container/sync.py:538 swift/container/sync.py:545 +#: swift/container/sync.py:577 swift/container/sync.py:584 #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "" @@ -744,7 +748,7 @@ msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "" #: swift/container/updater.py:92 swift/obj/reconstructor.py:822 -#: swift/obj/replicator.py:598 swift/obj/replicator.py:715 +#: swift/obj/replicator.py:600 swift/obj/replicator.py:717 #, python-format msgid "%s is not mounted" msgstr "" @@ -798,22 +802,22 @@ msgid "" "later): " msgstr "" -#: swift/obj/auditor.py:78 +#: swift/obj/auditor.py:104 #, python-format msgid " - parallel, %s" msgstr "" -#: swift/obj/auditor.py:80 +#: swift/obj/auditor.py:106 #, python-format msgid " - %s" msgstr "" -#: swift/obj/auditor.py:81 +#: swift/obj/auditor.py:107 #, python-format -msgid "Begin object audit \"%s\" mode (%s%s)" +msgid "Begin object audit \"%(mode)s\" mode (%(audi_type)s%(description)s)" msgstr "" -#: swift/obj/auditor.py:110 +#: swift/obj/auditor.py:138 #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d " @@ -822,7 +826,7 @@ msgid "" "%(audit).2f, Rate: %(audit_rate).2f" msgstr "" -#: swift/obj/auditor.py:144 +#: swift/obj/auditor.py:172 #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. " @@ -831,95 +835,100 @@ msgid "" "Rate: %(audit_rate).2f" msgstr "" -#: swift/obj/auditor.py:159 +#: swift/obj/auditor.py:187 #, python-format msgid "Object audit stats: %s" msgstr "" -#: swift/obj/auditor.py:190 +#: swift/obj/auditor.py:218 #, python-format msgid "ERROR Trying to audit %s" msgstr "" -#: swift/obj/auditor.py:227 +#: swift/obj/auditor.py:258 #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "" -#: swift/obj/auditor.py:279 +#: swift/obj/auditor.py:319 #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "" -#: swift/obj/auditor.py:350 swift/obj/auditor.py:371 +#: swift/obj/auditor.py:397 swift/obj/auditor.py:418 #, python-format msgid "ERROR auditing: %s" msgstr "" -#: swift/obj/diskfile.py:371 swift/obj/updater.py:162 +#: swift/obj/diskfile.py:370 +#, python-format +msgid "Skipping %(dir)s: %(err)s" +msgstr "" + +#: swift/obj/diskfile.py:380 swift/obj/updater.py:162 #, python-format msgid "Directory %r does not map to a valid policy (%s)" msgstr "" -#: swift/obj/diskfile.py:413 +#: swift/obj/diskfile.py:422 #, python-format -msgid "Cannot read %s (%s)" +msgid "Cannot read %(auditor_status)s (%(err)s)" msgstr "" -#: swift/obj/diskfile.py:418 +#: swift/obj/diskfile.py:428 #, python-format -msgid "Loading JSON from %s failed (%s)" +msgid "Loading JSON from %(auditor_status)s failed (%(err)s)" msgstr "" -#: swift/obj/diskfile.py:433 +#: swift/obj/diskfile.py:444 #, python-format -msgid "Cannot write %s (%s)" +msgid "Cannot write %(auditor_status)s (%(err)s)" msgstr "" -#: swift/obj/diskfile.py:904 +#: swift/obj/diskfile.py:918 #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" -#: swift/obj/diskfile.py:1024 +#: swift/obj/diskfile.py:1038 msgid "Error hashing suffix" msgstr "" -#: swift/obj/diskfile.py:1188 +#: swift/obj/diskfile.py:1202 #, python-format msgid "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" -#: swift/obj/diskfile.py:1441 +#: swift/obj/diskfile.py:1455 #, python-format msgid "Problem cleaning up %s" msgstr "" -#: swift/obj/diskfile.py:1786 +#: swift/obj/diskfile.py:1800 #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "" -#: swift/obj/diskfile.py:2114 +#: swift/obj/diskfile.py:2128 #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata " "%(meta)s" msgstr "" -#: swift/obj/diskfile.py:2522 +#: swift/obj/diskfile.py:2536 #, python-format -msgid "No space left on device for %s (%s)" +msgid "No space left on device for %(file)s (%(err)s)" msgstr "" -#: swift/obj/diskfile.py:2531 +#: swift/obj/diskfile.py:2545 #, python-format -msgid "Problem cleaning up %s (%s)" +msgid "Problem cleaning up %(datadir)s (%(err)s)" msgstr "" -#: swift/obj/diskfile.py:2534 +#: swift/obj/diskfile.py:2548 #, python-format -msgid "Problem writing durable state file %s (%s)" +msgid "Problem writing durable state file %(file)s (%(err)s)" msgstr "" #: swift/obj/expirer.py:80 @@ -939,16 +948,16 @@ msgstr "" #: swift/obj/expirer.py:197 #, python-format -msgid "Exception while deleting container %s %s" +msgid "Exception while deleting container %(container)s %(err)s" msgstr "" -#: swift/obj/expirer.py:202 swift/obj/expirer.py:219 +#: swift/obj/expirer.py:203 swift/obj/expirer.py:220 msgid "Unhandled exception" msgstr "" -#: swift/obj/expirer.py:269 +#: swift/obj/expirer.py:270 #, python-format -msgid "Exception while deleting object %s %s %s" +msgid "Exception while deleting object %(container)s %(obj)s %(err)s" msgstr "" #: swift/obj/reconstructor.py:213 swift/obj/reconstructor.py:499 @@ -974,14 +983,14 @@ msgid "" "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" -#: swift/obj/reconstructor.py:376 swift/obj/replicator.py:519 +#: swift/obj/reconstructor.py:376 swift/obj/replicator.py:521 #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% " "synced" msgstr "" -#: swift/obj/reconstructor.py:383 swift/obj/replicator.py:526 +#: swift/obj/reconstructor.py:383 swift/obj/replicator.py:528 #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "" @@ -991,7 +1000,7 @@ msgstr "" msgid "Nothing reconstructed for %s seconds." msgstr "" -#: swift/obj/reconstructor.py:420 swift/obj/replicator.py:563 +#: swift/obj/reconstructor.py:420 swift/obj/replicator.py:565 msgid "Lockup detected.. killing live coros." msgstr "" @@ -1005,7 +1014,7 @@ msgstr "" msgid "%s responded as unmounted" msgstr "" -#: swift/obj/reconstructor.py:893 swift/obj/replicator.py:369 +#: swift/obj/reconstructor.py:893 swift/obj/replicator.py:371 #, python-format msgid "Removing partition: %s" msgstr "" @@ -1040,93 +1049,93 @@ msgstr "" msgid "Object reconstruction complete. (%.02f minutes)" msgstr "" -#: swift/obj/replicator.py:183 +#: swift/obj/replicator.py:185 #, python-format msgid "Killing long-running rsync: %s" msgstr "" -#: swift/obj/replicator.py:197 +#: swift/obj/replicator.py:199 #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "" -#: swift/obj/replicator.py:204 swift/obj/replicator.py:208 +#: swift/obj/replicator.py:206 swift/obj/replicator.py:210 #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "" -#: swift/obj/replicator.py:335 +#: swift/obj/replicator.py:337 #, python-format msgid "Removing %s objects" msgstr "" -#: swift/obj/replicator.py:356 +#: swift/obj/replicator.py:358 msgid "Error syncing handoff partition" msgstr "" -#: swift/obj/replicator.py:434 +#: swift/obj/replicator.py:436 #, python-format msgid "%(ip)s/%(device)s responded as unmounted" msgstr "" -#: swift/obj/replicator.py:441 +#: swift/obj/replicator.py:443 #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "" -#: swift/obj/replicator.py:485 +#: swift/obj/replicator.py:487 #, python-format msgid "Error syncing with node: %s" msgstr "" -#: swift/obj/replicator.py:490 +#: swift/obj/replicator.py:492 msgid "Error syncing partition" msgstr "" -#: swift/obj/replicator.py:505 +#: swift/obj/replicator.py:507 #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" -#: swift/obj/replicator.py:534 +#: swift/obj/replicator.py:536 #, python-format msgid "Nothing replicated for %s seconds." msgstr "" -#: swift/obj/replicator.py:721 +#: swift/obj/replicator.py:723 msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "" -#: swift/obj/replicator.py:727 +#: swift/obj/replicator.py:729 msgid "Ring change detected. Aborting current replication pass." msgstr "" -#: swift/obj/replicator.py:755 +#: swift/obj/replicator.py:757 msgid "Exception in top-level replication loop" msgstr "" -#: swift/obj/replicator.py:765 +#: swift/obj/replicator.py:767 msgid "Running object replicator in script mode." msgstr "" -#: swift/obj/replicator.py:783 +#: swift/obj/replicator.py:785 #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "" -#: swift/obj/replicator.py:794 +#: swift/obj/replicator.py:796 msgid "Starting object replicator in daemon mode." msgstr "" -#: swift/obj/replicator.py:798 +#: swift/obj/replicator.py:800 msgid "Starting object replication pass." msgstr "" -#: swift/obj/replicator.py:803 +#: swift/obj/replicator.py:805 #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "" @@ -1223,11 +1232,11 @@ msgid "Account" msgstr "" #: swift/proxy/controllers/base.py:813 swift/proxy/controllers/base.py:852 -#: swift/proxy/controllers/base.py:944 swift/proxy/controllers/obj.py:340 +#: swift/proxy/controllers/base.py:948 swift/proxy/controllers/obj.py:340 #: swift/proxy/controllers/obj.py:885 swift/proxy/controllers/obj.py:934 -#: swift/proxy/controllers/obj.py:948 swift/proxy/controllers/obj.py:1769 -#: swift/proxy/controllers/obj.py:2007 swift/proxy/controllers/obj.py:2145 -#: swift/proxy/controllers/obj.py:2379 +#: swift/proxy/controllers/obj.py:948 swift/proxy/controllers/obj.py:1774 +#: swift/proxy/controllers/obj.py:2013 swift/proxy/controllers/obj.py:2176 +#: swift/proxy/controllers/obj.py:2410 msgid "Object" msgstr "" @@ -1235,45 +1244,45 @@ msgstr "" msgid "Trying to read during GET (retrying)" msgstr "" -#: swift/proxy/controllers/base.py:945 +#: swift/proxy/controllers/base.py:949 msgid "Trying to read during GET" msgstr "" -#: swift/proxy/controllers/base.py:949 +#: swift/proxy/controllers/base.py:953 #, python-format msgid "Client did not read from proxy within %ss" msgstr "" -#: swift/proxy/controllers/base.py:954 +#: swift/proxy/controllers/base.py:958 msgid "Client disconnected on read" msgstr "" -#: swift/proxy/controllers/base.py:956 +#: swift/proxy/controllers/base.py:960 msgid "Trying to send to client" msgstr "" -#: swift/proxy/controllers/base.py:998 swift/proxy/controllers/base.py:1437 +#: swift/proxy/controllers/base.py:1002 swift/proxy/controllers/base.py:1441 #, python-format msgid "Trying to %(method)s %(path)s" msgstr "" -#: swift/proxy/controllers/base.py:1037 swift/proxy/controllers/base.py:1425 +#: swift/proxy/controllers/base.py:1041 swift/proxy/controllers/base.py:1429 #: swift/proxy/controllers/obj.py:364 swift/proxy/controllers/obj.py:925 -#: swift/proxy/controllers/obj.py:2137 swift/proxy/controllers/obj.py:2424 +#: swift/proxy/controllers/obj.py:2168 swift/proxy/controllers/obj.py:2455 msgid "ERROR Insufficient Storage" msgstr "" -#: swift/proxy/controllers/base.py:1040 +#: swift/proxy/controllers/base.py:1044 #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "" -#: swift/proxy/controllers/base.py:1428 +#: swift/proxy/controllers/base.py:1432 #, python-format msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" msgstr "" -#: swift/proxy/controllers/base.py:1558 +#: swift/proxy/controllers/base.py:1562 #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "" @@ -1287,7 +1296,7 @@ msgstr "" msgid "Trying to get final status of PUT to %s" msgstr "" -#: swift/proxy/controllers/obj.py:368 swift/proxy/controllers/obj.py:2429 +#: swift/proxy/controllers/obj.py:368 swift/proxy/controllers/obj.py:2460 #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "" @@ -1302,40 +1311,40 @@ msgstr "" msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" -#: swift/proxy/controllers/obj.py:929 swift/proxy/controllers/obj.py:2140 +#: swift/proxy/controllers/obj.py:929 swift/proxy/controllers/obj.py:2171 #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "" -#: swift/proxy/controllers/obj.py:935 swift/proxy/controllers/obj.py:2146 +#: swift/proxy/controllers/obj.py:935 swift/proxy/controllers/obj.py:2177 #, python-format msgid "Expect: 100-continue on %s" msgstr "" -#: swift/proxy/controllers/obj.py:949 swift/proxy/controllers/obj.py:1770 +#: swift/proxy/controllers/obj.py:949 swift/proxy/controllers/obj.py:1775 #, python-format msgid "Trying to write to %s" msgstr "" -#: swift/proxy/controllers/obj.py:1000 swift/proxy/controllers/obj.py:2311 +#: swift/proxy/controllers/obj.py:1000 swift/proxy/controllers/obj.py:2342 #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "" -#: swift/proxy/controllers/obj.py:1008 swift/proxy/controllers/obj.py:2317 +#: swift/proxy/controllers/obj.py:1008 swift/proxy/controllers/obj.py:2348 msgid "Client disconnected without sending last chunk" msgstr "" -#: swift/proxy/controllers/obj.py:1013 swift/proxy/controllers/obj.py:2324 +#: swift/proxy/controllers/obj.py:1013 swift/proxy/controllers/obj.py:2355 msgid "ERROR Exception causing client disconnect" msgstr "" -#: swift/proxy/controllers/obj.py:1017 swift/proxy/controllers/obj.py:2328 +#: swift/proxy/controllers/obj.py:1017 swift/proxy/controllers/obj.py:2359 #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "" -#: swift/proxy/controllers/obj.py:1023 swift/proxy/controllers/obj.py:2242 +#: swift/proxy/controllers/obj.py:1023 swift/proxy/controllers/obj.py:2273 msgid "Client disconnected without sending enough data" msgstr "" @@ -1344,18 +1353,18 @@ msgstr "" msgid "Object servers returned %s mismatched etags" msgstr "" -#: swift/proxy/controllers/obj.py:1073 swift/proxy/controllers/obj.py:2288 -#: swift/proxy/controllers/obj.py:2513 +#: swift/proxy/controllers/obj.py:1073 swift/proxy/controllers/obj.py:2319 +#: swift/proxy/controllers/obj.py:2544 msgid "Object PUT" msgstr "" -#: swift/proxy/controllers/obj.py:2281 +#: swift/proxy/controllers/obj.py:2312 #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "" -#: swift/proxy/controllers/obj.py:2380 +#: swift/proxy/controllers/obj.py:2411 #, python-format -msgid "Trying to get %s status of PUT to %s" +msgid "Trying to get %(status_type)s status of PUT to %(path)s" msgstr "" From d09ef0da62b64067b04a980c643f77526a9078ac Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Wed, 6 Apr 2016 15:40:42 +0100 Subject: [PATCH 079/141] Assert that ChunkWriteTimouts are not raised Follow up for change Ibbc89449e7878fc4215e47e3f7dfe4ae58a2d638 to add a test assertion that the ChunkWriteTimeout contexts are exited without raising the timeout exception in iter_bytes_from_response_part(). Change-Id: I6d323cb26779e457fb5940093a81b349b333a0af --- test/unit/proxy/test_server.py | 59 +++++++++++++++++++++++----------- 1 file changed, 41 insertions(+), 18 deletions(-) diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 631551d337..dbbd7cbf5c 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -22,6 +22,7 @@ import math import os import pickle import sys +import traceback import unittest from contextlib import closing, contextmanager from gzip import GzipFile @@ -65,7 +66,7 @@ from swift.obj import server as object_server from swift.common.middleware import proxy_logging, versioned_writes from swift.common.middleware.acl import parse_acl, format_acl from swift.common.exceptions import ChunkReadTimeout, DiskFileNotExist, \ - APIVersionError + APIVersionError, ChunkWriteTimeout from swift.common import utils, constraints from swift.common.ring import RingData from swift.common.utils import mkdirs, normalize_timestamp, NullLogger @@ -5827,30 +5828,52 @@ class TestObjectController(unittest.TestCase): exp = 'HTTP/1.1 201' self.assertEqual(headers[:len(exp)], exp) - with mock.patch.object(_test_servers[0], 'client_timeout', new=5): - # get object - fd.write('GET /v1/a/ec-discon/test HTTP/1.1\r\n' - 'Host: localhost\r\n' - 'Connection: close\r\n' - 'X-Storage-Token: t\r\n' - '\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 200' - self.assertEqual(headers[:len(exp)], exp) + class WrappedTimeout(ChunkWriteTimeout): + def __enter__(self): + timeouts[self] = traceback.extract_stack() + return super(WrappedTimeout, self).__enter__() - # read most of the object, and disconnect - fd.read(10) - sock.fd._sock.close() - condition = \ - lambda: _test_servers[0].logger.get_lines_for_level('warning') - self._sleep_enough(condition) + def __exit__(self, typ, value, tb): + timeouts[self] = None + return super(WrappedTimeout, self).__exit__(typ, value, tb) + + timeouts = {} + with mock.patch('swift.proxy.controllers.base.ChunkWriteTimeout', + WrappedTimeout): + with mock.patch.object(_test_servers[0], 'client_timeout', new=5): + # get object + fd.write('GET /v1/a/ec-discon/test HTTP/1.1\r\n' + 'Host: localhost\r\n' + 'Connection: close\r\n' + 'X-Storage-Token: t\r\n' + '\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 200' + self.assertEqual(headers[:len(exp)], exp) + + # read most of the object, and disconnect + fd.read(10) + sock.fd._sock.close() + self._sleep_enough( + lambda: + _test_servers[0].logger.get_lines_for_level('warning')) # check for disconnect message! expected = ['Client disconnected on read'] * 2 self.assertEqual( _test_servers[0].logger.get_lines_for_level('warning'), expected) + # check that no coro was left waiting to write + self.assertTrue(timeouts) # sanity - WrappedTimeout did get called + missing_exits = filter(lambda tb: tb is not None, timeouts.values()) + self.assertFalse( + missing_exits, 'Failed to exit all ChunkWriteTimeouts.\n' + + ''.join(['No exit from ChunkWriteTimeout entered at:\n' + + ''.join(traceback.format_list(tb)[:-1]) + for tb in missing_exits])) + # and check that the ChunkWriteTimeouts did not raise Exceptions + self.assertFalse(_test_servers[0].logger.get_lines_for_level('error')) @unpatch_policies def test_ec_client_put_disconnect(self): From a057c409ec8a23290bc72c4fa45d55a1178f4828 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 8 Apr 2016 07:02:33 +0000 Subject: [PATCH 080/141] Imported Translations from Zanata For more information about this automatic import see: https://wiki.openstack.org/wiki/Translations/Infrastructure Change-Id: I9f4330ec20463e4d303e8ba3b67f86813a914ac5 --- swift/locale/de/LC_MESSAGES/swift.po | 97 +--------------------- swift/locale/es/LC_MESSAGES/swift.po | 96 +-------------------- swift/locale/fr/LC_MESSAGES/swift.po | 97 +--------------------- swift/locale/it/LC_MESSAGES/swift.po | 106 ++---------------------- swift/locale/ja/LC_MESSAGES/swift.po | 96 +-------------------- swift/locale/ko_KR/LC_MESSAGES/swift.po | 96 +-------------------- swift/locale/pt_BR/LC_MESSAGES/swift.po | 96 +-------------------- swift/locale/ru/LC_MESSAGES/swift.po | 97 +--------------------- swift/locale/tr_TR/LC_MESSAGES/swift.po | 80 +----------------- swift/locale/zh_CN/LC_MESSAGES/swift.po | 96 +-------------------- swift/locale/zh_TW/LC_MESSAGES/swift.po | 96 +-------------------- 11 files changed, 27 insertions(+), 1026 deletions(-) diff --git a/swift/locale/de/LC_MESSAGES/swift.po b/swift/locale/de/LC_MESSAGES/swift.po index 26bf545973..9a4724a549 100644 --- a/swift/locale/de/LC_MESSAGES/swift.po +++ b/swift/locale/de/LC_MESSAGES/swift.po @@ -7,12 +7,11 @@ # Ettore Atalan , 2014-2015 # Jonas John , 2015 # Frank Kloeker , 2016. #zanata -# Monika Wolf , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev268\n" +"Project-Id-Version: swift 2.7.1.dev28\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-24 22:25+0000\n" +"POT-Creation-Date: 2016-04-07 22:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -80,14 +79,6 @@ msgstr "%(success)s Erfolge, %(failure)s Fehlschläge" msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s gab 503 für %(statuses)s zurück" -#, python-format -msgid "%s #%d not running (%s)" -msgstr "%s #%d läuft nicht (%s)" - -#, python-format -msgid "%s (%s) appears to have stopped" -msgstr "%s (%s) scheinbar gestoppt" - #, python-format msgid "%s already started..." msgstr "%s bereits gestartet..." @@ -104,14 +95,6 @@ msgstr "%s ist nicht eingehängt" msgid "%s responded as unmounted" msgstr "%s zurückgemeldet als ausgehängt" -#, python-format -msgid "%s running (%s - %s)" -msgstr "%s läuft (%s - %s)" - -#, python-format -msgid "%s: %s" -msgstr "%s: %s" - #, python-format msgid "%s: Connection reset by peer" msgstr "%s: Verbindung zurückgesetzt durch Peer" @@ -150,10 +133,6 @@ msgstr ", Rückgabecodes: " msgid "Account" msgstr "Konto" -#, python-format -msgid "Account %s has not been reaped since %s" -msgstr "Konto %s wurde nicht aufgeräumt seit %s" - #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Kontoprüfungsmodus \"once\" abgeschlossen: %.02fs" @@ -169,10 +148,6 @@ msgstr "" "Versuch, %(count)d Datenbanken in %(time).5f Sekunden zu replizieren " "(%(rate).5f/s)" -#, python-format -msgid "Audit Failed for %s: %s" -msgstr "Prüfung fehlgeschlagen für %s: %s" - #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Falscher rsync-Rückgabecode: %(ret)d <- %(args)s" @@ -198,10 +173,6 @@ msgstr "Einzelthread-Scanvorgang für Containeraktualisierung wird gestartet" msgid "Begin container update sweep" msgstr "Scanvorgang für Containeraktualisierung wird gestartet" -#, python-format -msgid "Begin object audit \"%s\" mode (%s%s)" -msgstr "Objektprüfung mit \"%s\"-Modus wird gestartet (%s%s)" - msgid "Begin object update single threaded sweep" msgstr "Einzelthread-Scanvorgang für Objektaktualisierung wird gestartet" @@ -228,14 +199,6 @@ msgstr "Kann nicht auf die Datei %s zugreifen." msgid "Can not load profile data from %s." msgstr "Die Profildaten von %s können nicht geladen werden." -#, python-format -msgid "Cannot read %s (%s)" -msgstr "%s (%s) kann nicht gelesen werden." - -#, python-format -msgid "Cannot write %s (%s)" -msgstr "Schreiben von %s (%s) nicht möglich." - #, python-format msgid "Client did not read from proxy within %ss" msgstr "Client konnte nicht innerhalb von %ss vom Proxy lesen" @@ -618,14 +581,6 @@ msgstr "Ausnahme in Replizierungsloop der höchsten Ebene" msgid "Exception in top-levelreconstruction loop" msgstr "Ausnahme in Rekonstruktionsloop der höchsten Ebene" -#, python-format -msgid "Exception while deleting container %s %s" -msgstr "Ausnahme beim Löschen von Container %s %s" - -#, python-format -msgid "Exception while deleting object %s %s %s" -msgstr "Ausnahme beim Löschen von Objekt %s %s %s" - #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "Ausnahme bei %(ip)s:%(port)s/%(device)s" @@ -701,10 +656,6 @@ msgstr "" msgid "Killing long-running rsync: %s" msgstr "Lange laufendes rsync wird gekillt: %s" -#, python-format -msgid "Loading JSON from %s failed (%s)" -msgstr "Laden von JSON aus %s fehlgeschlagen: (%s)" - msgid "Lockup detected.. killing live coros." msgstr "Suche erkannt. Live-Coros werden gelöscht." @@ -732,10 +683,6 @@ msgstr "Keine Richtlinie mit Index %s" msgid "No realm key for %r" msgstr "Kein Bereichsschlüssel für %r" -#, python-format -msgid "No space left on device for %s (%s)" -msgstr "Kein freier Speicherplatz im Gerät für %s (%s) vorhanden." - #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Knotenfehler begrenzt %(ip)s:%(port)s (%(device)s)" @@ -872,14 +819,6 @@ msgstr "Pfad in X-Container-Sync-To ist erforderlich" msgid "Problem cleaning up %s" msgstr "Problem bei der Bereinigung von %s" -#, python-format -msgid "Problem cleaning up %s (%s)" -msgstr "Problem bei der Bereinigung von %s (%s)" - -#, python-format -msgid "Problem writing durable state file %s (%s)" -msgstr "Problem beim Schreiben der langlebigen Statusdatei %s (%s)" - #, python-format msgid "Profiling Error: %s" msgstr "Fehler bei der Profilerstellung: %s" @@ -897,10 +836,6 @@ msgstr "" "%(object_path)s bis %(quar_path)s wurden unter Quarantäne gestellt, da es " "sich nicht um ein Verzeichnis handelt" -#, python-format -msgid "Quarantined %s to %s due to %s database" -msgstr "%s unter Quarantäne gestellt in %s aufgrund von %s-Datenbank" - #, python-format msgid "Quarantining DB %s" msgstr "Datenbank %s wird unter Quarantäne gestellt" @@ -968,10 +903,6 @@ msgstr "Objektrekonstruktor läuft im Skriptmodus." msgid "Running object replicator in script mode." msgstr "Objektreplikator läuft im Skriptmodus." -#, python-format -msgid "Signal %s pid: %s signal: %s" -msgstr "Signal %s PID: %s Signal: %s" - #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " @@ -1051,10 +982,6 @@ msgstr "Versuch, %(method)s %(path)s" msgid "Trying to GET %(full_path)s" msgstr "Versuch, %(full_path)s mit GET abzurufen" -#, python-format -msgid "Trying to get %s status of PUT to %s" -msgstr "Es wird versucht, %s-Status von PUT für %s abzurufen." - #, python-format msgid "Trying to get final status of PUT to %s" msgstr "Versuch, den finalen Status von PUT für %s abzurufen" @@ -1079,10 +1006,6 @@ msgstr "Versuch, an %s zu schreiben" msgid "UNCAUGHT EXCEPTION" msgstr "NICHT ABGEFANGENE AUSNAHME" -#, python-format -msgid "Unable to find %s config section in %s" -msgstr "%s-Konfigurationsabschnitt in %s kann nicht gefunden werden" - #, python-format msgid "Unable to load internal client from config: %r (%s)" msgstr "" @@ -1097,20 +1020,12 @@ msgstr "" msgid "Unable to locate config for %s" msgstr "Konfiguration für %s wurde nicht gefunden." -#, python-format -msgid "Unable to locate config number %s for %s" -msgstr "Konfigurationsnummer %s für %s wurde nicht gefunden." - msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "fallocate, posix_fallocate konnte nicht in libc gefunden werden. Wird als " "Nullbefehl verlassen." -#, python-format -msgid "Unable to perform fsync() on directory %s: %s" -msgstr "fsync() kann für Verzeichnis %s nicht ausgeführt werden: %s" - #, python-format msgid "Unable to read config from %s" msgstr "Konfiguration aus %s kann nicht gelesen werden" @@ -1161,14 +1076,6 @@ msgstr "" "WARNUNG: Grenzwert für Speicher kann nicht geändert werden. Wird nicht als " "Root ausgeführt?" -#, python-format -msgid "Waited %s seconds for %s to die; giving up" -msgstr "Hat %s Sekunden für %s zum Erlöschen gewartet; Gibt auf" - -#, python-format -msgid "Waited %s seconds for %s to die; killing" -msgstr "Hat %s Sekunden für %s zum Erlöschen gewartet. Wird abgebrochen." - msgid "Warning: Cannot ratelimit without a memcached client" msgstr "" "Warnung: Geschwindigkeitsbegrenzung kann nicht ohne memcached-Client " diff --git a/swift/locale/es/LC_MESSAGES/swift.po b/swift/locale/es/LC_MESSAGES/swift.po index 5800ed20c0..1c3cbef87e 100644 --- a/swift/locale/es/LC_MESSAGES/swift.po +++ b/swift/locale/es/LC_MESSAGES/swift.po @@ -9,9 +9,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev244\n" +"Project-Id-Version: swift 2.7.1.dev28\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-22 06:16+0000\n" +"POT-Creation-Date: 2016-04-07 22:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -79,14 +79,6 @@ msgstr "%(success)s éxitos, %(failure)s fallos" msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s devuelve 503 para %(statuses)s" -#, python-format -msgid "%s #%d not running (%s)" -msgstr "%s #%d no está en ejecución (%s)" - -#, python-format -msgid "%s (%s) appears to have stopped" -msgstr "%s (%s) parece haberse detenido" - #, python-format msgid "%s already started..." msgstr "%s ya está iniciado..." @@ -103,14 +95,6 @@ msgstr "%s no está montado" msgid "%s responded as unmounted" msgstr "%s ha respondido como desmontado" -#, python-format -msgid "%s running (%s - %s)" -msgstr "%s en ejecución (%s - %s)" - -#, python-format -msgid "%s: %s" -msgstr "%s: %s" - #, python-format msgid "%s: Connection reset by peer" msgstr "%s: Restablecimiento de conexión por igual" @@ -150,10 +134,6 @@ msgstr ", códigos de retorno:" msgid "Account" msgstr "Cuenta" -#, python-format -msgid "Account %s has not been reaped since %s" -msgstr "La cuenta %s no se ha cosechado desde %s" - #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Auditoría de cuenta en modalidad de \"una vez\" finalizada: %.02fs" @@ -169,10 +149,6 @@ msgstr "" "Se han intentado replicar %(count)d bases de datos en %(time).5f segundos " "(%(rate).5f/s)" -#, python-format -msgid "Audit Failed for %s: %s" -msgstr "La auditoría ha fallado para %s: %s" - #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Código de retorno de resincronización erróneo: %(ret)d <- %(args)s" @@ -198,10 +174,6 @@ msgstr "Comenzar el barrido de hebra única de actualización del contenedor" msgid "Begin container update sweep" msgstr "Comenzar el barrido de actualización del contenedor" -#, python-format -msgid "Begin object audit \"%s\" mode (%s%s)" -msgstr "Comenzar auditoría de objetos en modalidad \"%s\" (%s%s)" - msgid "Begin object update single threaded sweep" msgstr "Comenzar el barrido de hebra única de actualización del objeto" @@ -226,14 +198,6 @@ msgstr "No se puede acceder al archivo %s." msgid "Can not load profile data from %s." msgstr "No se pueden cargar los datos de perfil desde %s." -#, python-format -msgid "Cannot read %s (%s)" -msgstr "No se puede leer %s (%s)" - -#, python-format -msgid "Cannot write %s (%s)" -msgstr "No se puede escribir en %s (%s)" - #, python-format msgid "Client did not read from proxy within %ss" msgstr "El cliente pudo realizar la lectura desde el proxy en %ss" @@ -614,14 +578,6 @@ msgstr "Excepción en el bucle de réplica de nivel superior" msgid "Exception in top-levelreconstruction loop" msgstr "Excepción en el bucle de reconstrucción de nivel superior" -#, python-format -msgid "Exception while deleting container %s %s" -msgstr "Excepción al suprimir el contenedor %s %s" - -#, python-format -msgid "Exception while deleting object %s %s %s" -msgstr "Excepción al suprimir el objeto %s %s %s" - #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "Excepción con %(ip)s:%(port)s/%(device)s" @@ -698,10 +654,6 @@ msgstr "" msgid "Killing long-running rsync: %s" msgstr "Interrumpiendo resincronización (rsync) de larga duración: %s" -#, python-format -msgid "Loading JSON from %s failed (%s)" -msgstr "Error al cargar JSON desde %s (%s)" - msgid "Lockup detected.. killing live coros." msgstr "Bloqueo detectado. Interrumpiendo coros activos." @@ -729,10 +681,6 @@ msgstr "No hay ninguna política que tenga el índice %s" msgid "No realm key for %r" msgstr "No hay clave de dominio para %r" -#, python-format -msgid "No space left on device for %s (%s)" -msgstr "No queda espacio libre en el dispositivo para %s (%s)" - #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Error de nodo limitado %(ip)s:%(port)s (%(device)s)" @@ -870,14 +818,6 @@ msgstr "Vía de acceso necesaria en X-Container-Sync-To" msgid "Problem cleaning up %s" msgstr "Problema al limpiar %s" -#, python-format -msgid "Problem cleaning up %s (%s)" -msgstr "Problema al limpiar %s (%s)" - -#, python-format -msgid "Problem writing durable state file %s (%s)" -msgstr "Problema al escribir en el archivo de estado durable %s (%s)" - #, python-format msgid "Profiling Error: %s" msgstr "Error de perfil: %s" @@ -895,10 +835,6 @@ msgstr "" "Se ha puesto en cuarentena %(object_path)s en %(quar_path)s debido a que no " "es un directorio" -#, python-format -msgid "Quarantined %s to %s due to %s database" -msgstr "%s de %s en cuarentena debido a la base de datos %s" - #, python-format msgid "Quarantining DB %s" msgstr "Poniendo en cuarentena la base de datos %s" @@ -966,10 +902,6 @@ msgstr "Ejecutando reconstructor de objeto en modo script." msgid "Running object replicator in script mode." msgstr "Ejecutando replicador de objeto en modalidad de script." -#, python-format -msgid "Signal %s pid: %s signal: %s" -msgstr "Señal %s pid: %s señal: %s" - #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " @@ -1050,10 +982,6 @@ msgstr "Intentando %(method)s %(path)s" msgid "Trying to GET %(full_path)s" msgstr "Intentando hacer un GET de %(full_path)s" -#, python-format -msgid "Trying to get %s status of PUT to %s" -msgstr "Intentando obtener el estado %s de PUT en %s" - #, python-format msgid "Trying to get final status of PUT to %s" msgstr "Intentando obtener el estado final de PUT en %s" @@ -1078,10 +1006,6 @@ msgstr "Intentando escribir en %s" msgid "UNCAUGHT EXCEPTION" msgstr "UNCAUGHT EXCEPTION" -#, python-format -msgid "Unable to find %s config section in %s" -msgstr "No se ha podido encontrar la sección de configuración %s en %s" - #, python-format msgid "Unable to load internal client from config: %r (%s)" msgstr "" @@ -1095,20 +1019,12 @@ msgstr "No se ha podido localizar %s en libc. Se dejará como no operativo." msgid "Unable to locate config for %s" msgstr "No se ha podido encontrar el número de configuración de %s" -#, python-format -msgid "Unable to locate config number %s for %s" -msgstr "No se ha podido encontrar el número de configuración %s de %s" - msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "No se ha podido localizar fallocate, posix_fallocate en libc. Se dejará como " "no operativo." -#, python-format -msgid "Unable to perform fsync() on directory %s: %s" -msgstr "No se puede realizar fsync() en el directorio %s: %s" - #, python-format msgid "Unable to read config from %s" msgstr "No se ha podido leer la configuración de %s" @@ -1160,14 +1076,6 @@ msgstr "" "AVISO: no se ha podido modificar el límite de memoria. ¿Está en ejecución " "como no root?" -#, python-format -msgid "Waited %s seconds for %s to die; giving up" -msgstr "Se han esperado %s segundos a que muriera %s; abandonando" - -#, python-format -msgid "Waited %s seconds for %s to die; killing" -msgstr "Se han esperado %s segundos a que muriera %s; terminando" - msgid "Warning: Cannot ratelimit without a memcached client" msgstr "" "Aviso: no se puede ajustar el límite sin un cliente almacenado en memoria " diff --git a/swift/locale/fr/LC_MESSAGES/swift.po b/swift/locale/fr/LC_MESSAGES/swift.po index 0e7868a728..bba51cb434 100644 --- a/swift/locale/fr/LC_MESSAGES/swift.po +++ b/swift/locale/fr/LC_MESSAGES/swift.po @@ -10,9 +10,9 @@ # Gael Rehault , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.7.1.dev4\n" +"Project-Id-Version: swift 2.7.1.dev28\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-25 11:23+0000\n" +"POT-Creation-Date: 2016-04-07 22:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -80,14 +80,6 @@ msgstr "%(success)s succès, %(failure)s échec(s)" msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s : renvoi de l'erreur 503 pour %(statuses)s" -#, python-format -msgid "%s #%d not running (%s)" -msgstr "%s #%d n'est pas demarré (%s)" - -#, python-format -msgid "%s (%s) appears to have stopped" -msgstr "%s (%s) semble s'être arrêté" - #, python-format msgid "%s already started..." msgstr "%s déjà démarré..." @@ -104,14 +96,6 @@ msgstr "%s n'est pas monté" msgid "%s responded as unmounted" msgstr "%s ont été identifié(es) comme étant démonté(es)" -#, python-format -msgid "%s running (%s - %s)" -msgstr "%s en cours d'exécution (%s - %s)" - -#, python-format -msgid "%s: %s" -msgstr "%s : %s" - #, python-format msgid "%s: Connection reset by peer" msgstr "%s : Connexion réinitialisée par l'homologue" @@ -150,10 +134,6 @@ msgstr ", return codes: " msgid "Account" msgstr "Compte" -#, python-format -msgid "Account %s has not been reaped since %s" -msgstr "Le compte %s n'a pas été collecté depuis %s" - #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Audit de compte en mode \"Once\" terminé : %.02fs" @@ -169,10 +149,6 @@ msgstr "" "Tentative de réplication de %(count)d bases de données en %(time).5f " "secondes (%(rate).5f/s)" -#, python-format -msgid "Audit Failed for %s: %s" -msgstr "Echec de l'audit pour %s : %s" - #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Code retour Rsync non valide : %(ret)d <- %(args)s" @@ -199,10 +175,6 @@ msgstr "" msgid "Begin container update sweep" msgstr "Démarrer le balayage des mises à jour du conteneur" -#, python-format -msgid "Begin object audit \"%s\" mode (%s%s)" -msgstr "Démarrer l'audit d'objet en mode \"%s\" (%s%s)" - msgid "Begin object update single threaded sweep" msgstr "" "Démarrer le balayage des mises à jour d'objet (unité d'exécution unique)" @@ -230,14 +202,6 @@ msgstr "Ne peut pas accéder au fichier %s." msgid "Can not load profile data from %s." msgstr "Impossible de charger des données de profil depuis %s." -#, python-format -msgid "Cannot read %s (%s)" -msgstr "Impossible de lire %s (%s)" - -#, python-format -msgid "Cannot write %s (%s)" -msgstr "Impossible d'écrire %s (%s)" - #, python-format msgid "Client did not read from proxy within %ss" msgstr "Le client n'a pas lu les données du proxy en %s s" @@ -622,14 +586,6 @@ msgstr "Exception dans la boucle de réplication de niveau supérieur" msgid "Exception in top-levelreconstruction loop" msgstr "Exception dans la boucle de reconstruction de niveau supérieur" -#, python-format -msgid "Exception while deleting container %s %s" -msgstr "Exception lors de la suppression du conteneur %s %s" - -#, python-format -msgid "Exception while deleting object %s %s %s" -msgstr "Exception lors de la suppression de l'objet %s %s %s" - #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "Exception liée à %(ip)s:%(port)s/%(device)s" @@ -707,10 +663,6 @@ msgstr "" msgid "Killing long-running rsync: %s" msgstr "Arrêt de l'opération Rsync à exécution longue : %s" -#, python-format -msgid "Loading JSON from %s failed (%s)" -msgstr "Echec du chargement du fichier JSON depuis %s (%s)" - msgid "Lockup detected.. killing live coros." msgstr "Blocage détecté. Arrêt des coroutines actives." @@ -738,10 +690,6 @@ msgstr "Aucune statégie avec un index de type %s" msgid "No realm key for %r" msgstr "Aucune clé de domaine pour %r" -#, python-format -msgid "No space left on device for %s (%s)" -msgstr "Plus d'espace disponible sur le périphérique pour %s (%s)" - #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "" @@ -886,15 +834,6 @@ msgstr "Chemin requis dans X-Container-Sync-To" msgid "Problem cleaning up %s" msgstr "Problème lors du nettoyage de %s" -#, python-format -msgid "Problem cleaning up %s (%s)" -msgstr "Problème lors du nettoyage de %s (%s)" - -#, python-format -msgid "Problem writing durable state file %s (%s)" -msgstr "" -"Un problème est survenu lors de l'écriture du fichier d'état durable %s (%s)" - #, python-format msgid "Profiling Error: %s" msgstr "Erreur de profilage : %s" @@ -912,10 +851,6 @@ msgstr "" "%(object_path)s n'est pas un répertoire et a donc été mis en quarantaine " "dans %(quar_path)s" -#, python-format -msgid "Quarantined %s to %s due to %s database" -msgstr "En quarantaine de %s à %s en raison de la base de données %s" - #, python-format msgid "Quarantining DB %s" msgstr "Mise en quarantaine de la base de données %s" @@ -985,10 +920,6 @@ msgstr "Exécution du reconstructeur d'objet en mode script." msgid "Running object replicator in script mode." msgstr "Exécution du réplicateur d'objet en mode script." -#, python-format -msgid "Signal %s pid: %s signal: %s" -msgstr "Signal %s pid: %s signal: %s" - #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " @@ -1068,10 +999,6 @@ msgstr "Tentative d'exécution de %(method)s %(path)s" msgid "Trying to GET %(full_path)s" msgstr "Tentative de lecture de %(full_path)s" -#, python-format -msgid "Trying to get %s status of PUT to %s" -msgstr "Tentative d'obtention du statut de l'opération PUT %s sur %s" - #, python-format msgid "Trying to get final status of PUT to %s" msgstr "Tentative d'obtention du statut final de l'opération PUT sur %s" @@ -1096,10 +1023,6 @@ msgstr "Tentative d'écriture sur %s" msgid "UNCAUGHT EXCEPTION" msgstr "EXCEPTION NON INTERCEPTEE" -#, python-format -msgid "Unable to find %s config section in %s" -msgstr "Impossible de trouver la section de configuration %s dans %s" - #, python-format msgid "Unable to load internal client from config: %r (%s)" msgstr "" @@ -1114,20 +1037,12 @@ msgstr "" msgid "Unable to locate config for %s" msgstr "Impossible de trouver la configuration pour %s" -#, python-format -msgid "Unable to locate config number %s for %s" -msgstr "Impossible de trouver la configuration portant le numéro %s pour %s" - msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "Impossible de localiser fallocate, posix_fallocate dans libc. Laissé comme " "action nulle (no-op)." -#, python-format -msgid "Unable to perform fsync() on directory %s: %s" -msgstr "Impossible d'exécuter fsync() dans le répertoire %s : %s" - #, python-format msgid "Unable to read config from %s" msgstr "Impossible de lire le fichier de configuration depuis %s" @@ -1179,14 +1094,6 @@ msgstr "" "AVERTISSEMENT : Impossible de modifier la limite de mémoire. Exécution en " "tant que non root ?" -#, python-format -msgid "Waited %s seconds for %s to die; giving up" -msgstr "Attente de %s secondes pour la fin de %s ; abandon" - -#, python-format -msgid "Waited %s seconds for %s to die; killing" -msgstr "Attente de %s secondes pour la fin de %s . En cours d'arrêt" - msgid "Warning: Cannot ratelimit without a memcached client" msgstr "Avertissement : impossible d'appliquer Ratelimit sans client memcached" diff --git a/swift/locale/it/LC_MESSAGES/swift.po b/swift/locale/it/LC_MESSAGES/swift.po index e7f30cac7f..092641a79e 100644 --- a/swift/locale/it/LC_MESSAGES/swift.po +++ b/swift/locale/it/LC_MESSAGES/swift.po @@ -6,17 +6,17 @@ # OpenStack Infra , 2015. #zanata # Tom Cocozzello , 2015. #zanata # Alessandra , 2016. #zanata -# Remo Mattei , 2016. #zanata +# Remo Mattei , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev254\n" +"Project-Id-Version: swift 2.7.1.dev28\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-22 19:48+0000\n" +"POT-Creation-Date: 2016-04-07 22:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-22 05:31+0000\n" -"Last-Translator: Remo Mattei \n" +"PO-Revision-Date: 2016-04-07 03:43+0000\n" +"Last-Translator: Remo Mattei \n" "Language: it\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" @@ -79,14 +79,6 @@ msgstr "%(success)s operazioni con esito positivo, %(failure)s errori" msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s restituisce 503 per %(statuses)s" -#, python-format -msgid "%s #%d not running (%s)" -msgstr "%s #%d non in esecuzione (%s)" - -#, python-format -msgid "%s (%s) appears to have stopped" -msgstr "%s (%s) sembra essere stato arrestato" - #, python-format msgid "%s already started..." msgstr "%s già avviato..." @@ -103,14 +95,6 @@ msgstr "%s non è montato" msgid "%s responded as unmounted" msgstr "%s ha risposto come smontato" -#, python-format -msgid "%s running (%s - %s)" -msgstr "%s in esecuzione (%s - %s)" - -#, python-format -msgid "%s: %s" -msgstr "%s: %s" - #, python-format msgid "%s: Connection reset by peer" msgstr "%s: Connessione reimpostata dal peer" @@ -150,8 +134,8 @@ msgid "Account" msgstr "Conto" #, python-format -msgid "Account %s has not been reaped since %s" -msgstr "Account %s non utilizzato da %s" +msgid "Account %(account)s has not been reaped since %(time)s" +msgstr "Il conto %(account)s non è stato verificato dal %(time)s" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" @@ -168,10 +152,6 @@ msgstr "" "È stato eseguito un tentativo di replicare %(count)d dbs in %(time).5f " "secondi (%(rate).5f/s)" -#, python-format -msgid "Audit Failed for %s: %s" -msgstr "Verifica non riuscita per %s: %s" - #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Codice di ritorno rsync errato: %(ret)d <- %(args)s" @@ -197,10 +177,6 @@ msgstr "Avvio pulizia a singolo thread aggiornamento contenitore" msgid "Begin container update sweep" msgstr "Avvio pulizia aggiornamento contenitore" -#, python-format -msgid "Begin object audit \"%s\" mode (%s%s)" -msgstr "Avvio modalità \"%s\" verifica oggetto (%s%s)" - msgid "Begin object update single threaded sweep" msgstr "Avvio pulizia a singolo thread aggiornamento oggetto" @@ -227,14 +203,6 @@ msgstr "Impossibile accedere al file %s." msgid "Can not load profile data from %s." msgstr "Impossibile caricare i dati del profilo da %s." -#, python-format -msgid "Cannot read %s (%s)" -msgstr "Non e' possibile leggere %s (%s)" - -#, python-format -msgid "Cannot write %s (%s)" -msgstr "Non e' possibile scriver %s (%s)" - #, python-format msgid "Client did not read from proxy within %ss" msgstr "Il client non ha eseguito la lettura dal proxy in %ss" @@ -617,14 +585,6 @@ msgstr "Eccezione nel loop di replica di livello superiore" msgid "Exception in top-levelreconstruction loop" msgstr "Eccezione nel loop di ricostruzione di livello superiore" -#, python-format -msgid "Exception while deleting container %s %s" -msgstr "Eccezione durante l'eliminazione del contenitore %s %s" - -#, python-format -msgid "Exception while deleting object %s %s %s" -msgstr "Eccezione durante l'eliminazione dell'oggetto %s %s %s" - #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "Eccezione relativa a %(ip)s:%(port)s/%(device)s" @@ -702,10 +662,6 @@ msgstr "" msgid "Killing long-running rsync: %s" msgstr "Chiusura rsync ad elaborazione prolungata: %s" -#, python-format -msgid "Loading JSON from %s failed (%s)" -msgstr "Caricamento JSON dal %s fallito (%s)" - msgid "Lockup detected.. killing live coros." msgstr "Blocco rilevato... chiusura dei coros attivi." @@ -733,10 +689,6 @@ msgstr "Nessuna politica con indice %s" msgid "No realm key for %r" msgstr "Nessuna chiave dell'area di autenticazione per %r" -#, python-format -msgid "No space left on device for %s (%s)" -msgstr "Nessuno spazio rimasto sul dispositivo per %s (%s)" - #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Errore del nodo limitato %(ip)s:%(port)s (%(device)s)" @@ -870,14 +822,6 @@ msgstr "Percorso richiesto in X-Container-Sync-To" msgid "Problem cleaning up %s" msgstr "Problema durante la ripulitura di %s" -#, python-format -msgid "Problem cleaning up %s (%s)" -msgstr "Problema durante la ripulitura di %s (%s)" - -#, python-format -msgid "Problem writing durable state file %s (%s)" -msgstr "Problema durante la scrittura del file obsoleto duraturo %s (%s)" - #, python-format msgid "Profiling Error: %s" msgstr "Errore di creazione dei profili: %s" @@ -895,10 +839,6 @@ msgstr "" "%(object_path)s inserito in quarantena in %(quar_path)s perché non è una " "directory" -#, python-format -msgid "Quarantined %s to %s due to %s database" -msgstr "%s inserito in quarantena in %s a causa del database %s" - #, python-format msgid "Quarantining DB %s" msgstr "Inserimento in quarantena del DB %s" @@ -969,10 +909,6 @@ msgstr "" msgid "Running object replicator in script mode." msgstr "Esecuzione del programma di replica dell'oggetto in modalità script." -#, python-format -msgid "Signal %s pid: %s signal: %s" -msgstr "Segnale %s pid: %s segnale: %s" - #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " @@ -1052,10 +988,6 @@ msgstr "Tentativo di %(method)s %(path)s" msgid "Trying to GET %(full_path)s" msgstr "Tentativo di eseguire GET %(full_path)s" -#, python-format -msgid "Trying to get %s status of PUT to %s" -msgstr "Tentativo di acquisire lo stato %s di PUT su %s" - #, python-format msgid "Trying to get final status of PUT to %s" msgstr "Tentativo di acquisire lo stato finale di PUT su %s" @@ -1080,10 +1012,6 @@ msgstr "Tentativo di scrittura in %s" msgid "UNCAUGHT EXCEPTION" msgstr "ECCEZIONE NON RILEVATA" -#, python-format -msgid "Unable to find %s config section in %s" -msgstr "Impossibile trovare la sezione di configurazione %s in %s" - #, python-format msgid "Unable to load internal client from config: %r (%s)" msgstr "Impossibile caricare il client interno dalla configurazione: %r (%s)" @@ -1096,20 +1024,12 @@ msgstr "Impossibile individuare %s in libc. Lasciato come no-op." msgid "Unable to locate config for %s" msgstr "Impossibile individuare la configurazione per %s" -#, python-format -msgid "Unable to locate config number %s for %s" -msgstr "Impossibile individuare il numero di configurazione %s per %s" - msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "Impossibile individuare fallocate, posix_fallocate in libc. Lasciato come " "no-op." -#, python-format -msgid "Unable to perform fsync() on directory %s: %s" -msgstr "Impossibile eseguire fsync() sulla directory %s: %s" - #, python-format msgid "Unable to read config from %s" msgstr "Impossibile leggere la configurazione da %s" @@ -1161,18 +1081,6 @@ msgstr "" "AVVERTENZA: Impossibile modificare il limite di memoria. Eseguire come non-" "root?" -#, python-format -msgid "Waited %s seconds for %s to die; giving up" -msgstr "" -"Sono trascorsi %s secondi in attesa che %s venga interrotto; operazione " -"terminata" - -#, python-format -msgid "Waited %s seconds for %s to die; killing" -msgstr "" -"Sono trascorsi %s secondi in attesa che %s venga interrotto; operazione " -"terminata" - msgid "Warning: Cannot ratelimit without a memcached client" msgstr "Avvertenza: impossibile eseguire ratelimit senza un client memcached" diff --git a/swift/locale/ja/LC_MESSAGES/swift.po b/swift/locale/ja/LC_MESSAGES/swift.po index 323191ced3..d328e5570c 100644 --- a/swift/locale/ja/LC_MESSAGES/swift.po +++ b/swift/locale/ja/LC_MESSAGES/swift.po @@ -11,9 +11,9 @@ # 笹原 昌美 , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev244\n" +"Project-Id-Version: swift 2.7.1.dev28\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-22 06:16+0000\n" +"POT-Creation-Date: 2016-04-07 22:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -81,14 +81,6 @@ msgstr "成功 %(success)s、失敗 %(failure)s" msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s が %(statuses)s について 503 を返しています" -#, python-format -msgid "%s #%d not running (%s)" -msgstr "%s #%d が実行されていません (%s)" - -#, python-format -msgid "%s (%s) appears to have stopped" -msgstr "%s (%s) が停止された可能性があります" - #, python-format msgid "%s already started..." msgstr "%s は既に開始されています..." @@ -105,14 +97,6 @@ msgstr "%s がマウントされていません" msgid "%s responded as unmounted" msgstr "%s はアンマウントとして応答しました" -#, python-format -msgid "%s running (%s - %s)" -msgstr "%s が実行中 (%s - %s)" - -#, python-format -msgid "%s: %s" -msgstr "%s: %s" - #, python-format msgid "%s: Connection reset by peer" msgstr "%s: 接続がピアによってリセットされました" @@ -151,10 +135,6 @@ msgstr "、戻りコード: " msgid "Account" msgstr "アカウント" -#, python-format -msgid "Account %s has not been reaped since %s" -msgstr "アカウント %s は %s 以降リープされていません" - #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "アカウント監査 \"once\" モードが完了しました: %.02fs" @@ -168,10 +148,6 @@ msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "%(time).5f 秒で %(count)d 個の DB の複製を試行しました (%(rate).5f/s)" -#, python-format -msgid "Audit Failed for %s: %s" -msgstr "%s の監査が失敗しました: %s" - #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "正しくない再同期戻りコード: %(ret)d <- %(args)s" @@ -197,10 +173,6 @@ msgstr "コンテナー更新単一スレッド化スイープの開始" msgid "Begin container update sweep" msgstr "コンテナー更新スイープの開始" -#, python-format -msgid "Begin object audit \"%s\" mode (%s%s)" -msgstr "オブジェクト監査「%s」モードの開始 (%s%s)" - msgid "Begin object update single threaded sweep" msgstr "オブジェクト更新単一スレッド化スイープの開始" @@ -225,14 +197,6 @@ msgstr "ファイル %s にアクセスできません。" msgid "Can not load profile data from %s." msgstr "プロファイルデータを %s からロードできません。" -#, python-format -msgid "Cannot read %s (%s)" -msgstr "%s を読み取ることができません (%s)" - -#, python-format -msgid "Cannot write %s (%s)" -msgstr "%s を書き込むことができません (%s)" - #, python-format msgid "Client did not read from proxy within %ss" msgstr "クライアントは %s 内のプロキシーからの読み取りを行いませんでした" @@ -604,14 +568,6 @@ msgstr "最上位複製ループで例外が発生しました" msgid "Exception in top-levelreconstruction loop" msgstr "最上位再構成ループで例外が発生しました" -#, python-format -msgid "Exception while deleting container %s %s" -msgstr "コンテナー %s %s の削除中に例外が発生しました" - -#, python-format -msgid "Exception while deleting object %s %s %s" -msgstr "オブジェクト %s %s %s の削除中に例外が発生しました" - #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s で例外が発生しました" @@ -688,10 +644,6 @@ msgstr "" msgid "Killing long-running rsync: %s" msgstr "長期実行の再同期を強制終了中: %s" -#, python-format -msgid "Loading JSON from %s failed (%s)" -msgstr "%s からの JSON のロードが失敗しました (%s)" - msgid "Lockup detected.. killing live coros." msgstr "ロックが検出されました.. ライブ coros を強制終了中" @@ -719,10 +671,6 @@ msgstr "インデックス %s のポリシーはありません" msgid "No realm key for %r" msgstr "%r のレルムキーがありません" -#, python-format -msgid "No space left on device for %s (%s)" -msgstr "%s 用のデバイス容量が残っていません (%s)" - #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "ノードエラー制限 %(ip)s:%(port)s (%(device)s)" @@ -859,14 +807,6 @@ msgstr "X-Container-Sync-To にパスが必要です" msgid "Problem cleaning up %s" msgstr "%s のクリーンアップ中に問題が発生しました" -#, python-format -msgid "Problem cleaning up %s (%s)" -msgstr "%s のクリーンアップ中に問題が発生しました (%s)" - -#, python-format -msgid "Problem writing durable state file %s (%s)" -msgstr "永続状態ファイル %s の書き込み中に問題が発生しました (%s)" - #, python-format msgid "Profiling Error: %s" msgstr "プロファイル作成エラー: %s" @@ -882,10 +822,6 @@ msgid "" msgstr "" "ディレクトリーではないため、%(object_path)s は %(quar_path)s へ検疫されました" -#, python-format -msgid "Quarantined %s to %s due to %s database" -msgstr "%s から %s が検疫されました (%s データベースが原因)" - #, python-format msgid "Quarantining DB %s" msgstr "DB %s の検疫中" @@ -950,10 +886,6 @@ msgstr "スクリプトモードでオブジェクトリコンストラクター msgid "Running object replicator in script mode." msgstr "スクリプトモードでオブジェクトレプリケーターを実行中です。" -#, python-format -msgid "Signal %s pid: %s signal: %s" -msgstr "%s のシグナル通知、pid: %s シグナル: %s" - #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " @@ -1032,10 +964,6 @@ msgstr "%(method)s %(path)s を試行中" msgid "Trying to GET %(full_path)s" msgstr "GET %(full_path)s を試行中" -#, python-format -msgid "Trying to get %s status of PUT to %s" -msgstr "%s への PUT の状況 %s の取得を試行中" - #, python-format msgid "Trying to get final status of PUT to %s" msgstr "%s への PUT の最終状況の取得を試行中" @@ -1060,10 +988,6 @@ msgstr "%s への書き込みを試行中" msgid "UNCAUGHT EXCEPTION" msgstr "キャッチされていない例外" -#, python-format -msgid "Unable to find %s config section in %s" -msgstr "%s 構成セクションが %s に見つかりません" - #, python-format msgid "Unable to load internal client from config: %r (%s)" msgstr "設定から内部クライアントをロードできません: %r (%s)" @@ -1076,19 +1000,11 @@ msgstr "%s が libc に見つかりません。no-op として終了します。 msgid "Unable to locate config for %s" msgstr "%s の設定が見つかりません" -#, python-format -msgid "Unable to locate config number %s for %s" -msgstr "%s の設定番号 %s が見つかりません" - msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "fallocate、posix_fallocate が libc に見つかりません。no-op として終了します。" -#, python-format -msgid "Unable to perform fsync() on directory %s: %s" -msgstr "ディレクトリー %s で fsync() を実行できません: %s" - #, python-format msgid "Unable to read config from %s" msgstr "構成を %s から読み取ることができません" @@ -1133,14 +1049,6 @@ msgstr "警告: 最大処理限界を変更できません。非ルートとし msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "警告: メモリー制限を変更できません。非ルートとして実行しますか?" -#, python-format -msgid "Waited %s seconds for %s to die; giving up" -msgstr "%s 秒間、%s の停止を待機しました。中止します" - -#, python-format -msgid "Waited %s seconds for %s to die; killing" -msgstr "%s 秒間、%s の停止を待機しました。強制終了します" - msgid "Warning: Cannot ratelimit without a memcached client" msgstr "警告: memcached クライアントなしで ratelimit を行うことはできません" diff --git a/swift/locale/ko_KR/LC_MESSAGES/swift.po b/swift/locale/ko_KR/LC_MESSAGES/swift.po index f84a5088cf..b5fb2426df 100644 --- a/swift/locale/ko_KR/LC_MESSAGES/swift.po +++ b/swift/locale/ko_KR/LC_MESSAGES/swift.po @@ -10,9 +10,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev244\n" +"Project-Id-Version: swift 2.7.1.dev28\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-22 06:16+0000\n" +"POT-Creation-Date: 2016-04-07 22:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -80,14 +80,6 @@ msgstr "%(success)s개 성공, %(failure)s개 실패" msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s에서 %(statuses)s에 대해 503을 리턴함" -#, python-format -msgid "%s #%d not running (%s)" -msgstr "%s #%d이(가) 실행되지 않음(%s)" - -#, python-format -msgid "%s (%s) appears to have stopped" -msgstr "%s(%s)이(가) 중지됨" - #, python-format msgid "%s already started..." msgstr "%s이(가) 이미 시작되었음..." @@ -104,14 +96,6 @@ msgstr "%s이(가) 마운트되지 않음" msgid "%s responded as unmounted" msgstr "%s이(가) 마운트 해제된 것으로 응답" -#, python-format -msgid "%s running (%s - %s)" -msgstr "%s 실행 중(%s - %s)" - -#, python-format -msgid "%s: %s" -msgstr "%s: %s" - #, python-format msgid "%s: Connection reset by peer" msgstr "%s: 피어에서 연결 재설정" @@ -150,10 +134,6 @@ msgstr ", 반환 코드들:" msgid "Account" msgstr "계정" -#, python-format -msgid "Account %s has not been reaped since %s" -msgstr "계정 %s을(를) %s 이후에 얻지 못함" - #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "계정 감사 \"한 번\"모드가 완료: %.02fs" @@ -168,10 +148,6 @@ msgid "" msgstr "" "%(time).5f초(%(rate).5f/s)에 %(count)d개의 데이터베이스를 복제하려고 함" -#, python-format -msgid "Audit Failed for %s: %s" -msgstr "검사 중 오류 %s: %s" - #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "잘못된 rsync 리턴 코드: %(ret)d <- %(args)s" @@ -197,10 +173,6 @@ msgstr "컨테이너 업데이트 단일 스레드 스윕 시작" msgid "Begin container update sweep" msgstr "컨테이너 업데이트 스윕 시작" -#, python-format -msgid "Begin object audit \"%s\" mode (%s%s)" -msgstr "오브젝트 감사 \"%s\" 모드(%s%s) 시작" - msgid "Begin object update single threaded sweep" msgstr "오브젝트 업데이트 단일 스레드 스윕 시작" @@ -225,14 +197,6 @@ msgstr "파일 %s에 액세스할 수 없습니다." msgid "Can not load profile data from %s." msgstr "%s에서 프로파일 데이터를 로드할 수 없습니다." -#, python-format -msgid "Cannot read %s (%s)" -msgstr "%s을(를) 읽을 수 없음(%s)" - -#, python-format -msgid "Cannot write %s (%s)" -msgstr "%s을(를) 쓸 수 없음(%s)" - #, python-format msgid "Client did not read from proxy within %ss" msgstr "클라이언트에서 %ss 내에 프록시를 읽을 수 없었음" @@ -599,14 +563,6 @@ msgstr "최상위 레벨 복제 루프에서 예외 발생" msgid "Exception in top-levelreconstruction loop" msgstr "최상위 레벨 재구성 루프에서 예외 발생" -#, python-format -msgid "Exception while deleting container %s %s" -msgstr "컨테이너 %s %s 삭제 중 예외 발생" - -#, python-format -msgid "Exception while deleting object %s %s %s" -msgstr "오브젝트 %s %s %s 삭제 중 예외 발생" - #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s 예외" @@ -681,10 +637,6 @@ msgstr "" msgid "Killing long-running rsync: %s" msgstr "장기 실행 중인 rsync 강제 종료: %s" -#, python-format -msgid "Loading JSON from %s failed (%s)" -msgstr "%s에서 JSON 로드 실패(%s)" - msgid "Lockup detected.. killing live coros." msgstr "잠금 발견.. 활성 coros를 강제 종료합니다." @@ -712,10 +664,6 @@ msgstr "인덱스가 %s인 정책이 없음" msgid "No realm key for %r" msgstr "%r에 대한 영역 키가 없음" -#, python-format -msgid "No space left on device for %s (%s)" -msgstr "%s의 장치 왼쪽에 공백이 없음(%s)" - #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "노드 오류로 %(ip)s:%(port)s(%(device)s)이(가) 제한됨" @@ -848,14 +796,6 @@ msgstr "X-Container-Sync-To에 경로가 필요함" msgid "Problem cleaning up %s" msgstr "%s 정리 문제 발생" -#, python-format -msgid "Problem cleaning up %s (%s)" -msgstr "%s 정리 문제 발생(%s)" - -#, python-format -msgid "Problem writing durable state file %s (%s)" -msgstr "지속적인 상태 파일 %s 쓰기 오류(%s)" - #, python-format msgid "Profiling Error: %s" msgstr "프로파일링 오류: %s" @@ -869,10 +809,6 @@ msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "디렉토리가 아니어서 %(object_path)s을(를) %(quar_path)s에 격리함" -#, python-format -msgid "Quarantined %s to %s due to %s database" -msgstr "%s을(를) %s에 격리. 원인: %s 데이터베이스" - #, python-format msgid "Quarantining DB %s" msgstr "데이터베이스 %s 격리" @@ -937,10 +873,6 @@ msgstr "오브젝트 재구성자를 스크립트 모드로 실행 중입니다. msgid "Running object replicator in script mode." msgstr "오브젝트 복제자를 스크립트 모드로 실행 중입니다." -#, python-format -msgid "Signal %s pid: %s signal: %s" -msgstr "신호 %s pid: %s 신호: %s" - #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " @@ -1016,10 +948,6 @@ msgstr "%(method)s %(path)s 시도 중" msgid "Trying to GET %(full_path)s" msgstr "GET %(full_path)s 시도 중" -#, python-format -msgid "Trying to get %s status of PUT to %s" -msgstr "PUT의 %s 상태를 %s(으)로 가져오는 중" - #, python-format msgid "Trying to get final status of PUT to %s" msgstr "PUT의 최종 상태를 %s(으)로 가져오는 중" @@ -1044,10 +972,6 @@ msgstr "%s에 쓰기 시도 중" msgid "UNCAUGHT EXCEPTION" msgstr "미발견 예외" -#, python-format -msgid "Unable to find %s config section in %s" -msgstr "%s 구성 섹션을 %s에서 찾을 수 없음" - #, python-format msgid "Unable to load internal client from config: %r (%s)" msgstr "구성에서 내부 클라이언트를 로드할 수 없음: %r (%s)" @@ -1060,19 +984,11 @@ msgstr "libc에서 %s을(를) 찾을 수 없습니다. no-op로 남겨 둡니다 msgid "Unable to locate config for %s" msgstr "%s의 구성을 찾을 수 없음" -#, python-format -msgid "Unable to locate config number %s for %s" -msgstr "구성 번호 %s을(를) 찾을 수 없음(대상: %s)" - msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "libc에서 fallocate, posix_fallocate를 찾을 수 없습니다. no-op로 남겨 둡니다." -#, python-format -msgid "Unable to perform fsync() on directory %s: %s" -msgstr "%s 디렉토리에서 fsync()를 수행할 수 없음: %s" - #, python-format msgid "Unable to read config from %s" msgstr "%s에서 구성을 읽을 수 없음" @@ -1122,14 +1038,6 @@ msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "" "경고: 메모리 한계를 수정할 수 없습니다. 비루트로 실행 중인지 확인하십시오." -#, python-format -msgid "Waited %s seconds for %s to die; giving up" -msgstr "%s초 동안 %s의 종료를 대기함, 포기하는 중" - -#, python-format -msgid "Waited %s seconds for %s to die; killing" -msgstr "%s초 동안 %s을(를) 대기, 강제 종료 중" - msgid "Warning: Cannot ratelimit without a memcached client" msgstr "경고: memcached 클라이언트 없이 전송률을 제한할 수 없음" diff --git a/swift/locale/pt_BR/LC_MESSAGES/swift.po b/swift/locale/pt_BR/LC_MESSAGES/swift.po index 9cffa44b2a..61005cbfae 100644 --- a/swift/locale/pt_BR/LC_MESSAGES/swift.po +++ b/swift/locale/pt_BR/LC_MESSAGES/swift.po @@ -12,9 +12,9 @@ # Carlos Marques , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev254\n" +"Project-Id-Version: swift 2.7.1.dev28\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-22 19:48+0000\n" +"POT-Creation-Date: 2016-04-07 22:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -82,14 +82,6 @@ msgstr "%(success)s sucessos, %(failure)s falhas" msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s retornando 503 para %(statuses)s" -#, python-format -msgid "%s #%d not running (%s)" -msgstr "%s #%d não está em execução (%s)" - -#, python-format -msgid "%s (%s) appears to have stopped" -msgstr "%s (%s) parece ter parado" - #, python-format msgid "%s already started..." msgstr "%s já iniciado..." @@ -106,14 +98,6 @@ msgstr "%s não está montado" msgid "%s responded as unmounted" msgstr "%s respondeu como não montado" -#, python-format -msgid "%s running (%s - %s)" -msgstr "%s em execução (%s - %s)" - -#, python-format -msgid "%s: %s" -msgstr "%s: %s" - #, python-format msgid "%s: Connection reset by peer" msgstr "%s: Reconfiguração da conexão por peer" @@ -152,10 +136,6 @@ msgstr ", códigos de retorno:" msgid "Account" msgstr "Conta" -#, python-format -msgid "Account %s has not been reaped since %s" -msgstr "As contas %s não foram colhidas desde %s" - #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Auditoria de conta em modo \"único\" finalizado: %.02fs" @@ -170,10 +150,6 @@ msgid "" msgstr "" "Tentativa de replicação do %(count)d dbs em%(time).5f segundos (%(rate).5f/s)" -#, python-format -msgid "Audit Failed for %s: %s" -msgstr "Auditoria Falhou para %s: %s" - #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Código de retorno de ressincronização inválido: %(ret)d <-%(args)s" @@ -199,10 +175,6 @@ msgstr "Inicie a varredura de encadeamento único da atualização do contêiner msgid "Begin container update sweep" msgstr "Inicie a varredura de atualização do contêiner" -#, python-format -msgid "Begin object audit \"%s\" mode (%s%s)" -msgstr "Inicie o modo \"%s\" da auditoria (%s%s)" - msgid "Begin object update single threaded sweep" msgstr "Inicie a varredura de encadeamento único da atualização do objeto" @@ -227,14 +199,6 @@ msgstr "Não é possível acessar o arquivo %s." msgid "Can not load profile data from %s." msgstr "Não é possível carregar dados do perfil a partir de %s." -#, python-format -msgid "Cannot read %s (%s)" -msgstr "Não é possível ler %s (%s)" - -#, python-format -msgid "Cannot write %s (%s)" -msgstr "Não é possível gravar %s (%s)" - #, python-format msgid "Client did not read from proxy within %ss" msgstr "O cliente não leu no proxy dentro de %ss" @@ -604,14 +568,6 @@ msgstr "Exceção no loop de replicação de nível superior" msgid "Exception in top-levelreconstruction loop" msgstr "Exceção no loop de reconstrução de nível superior" -#, python-format -msgid "Exception while deleting container %s %s" -msgstr "Exceção ao excluir contêiner %s %s" - -#, python-format -msgid "Exception while deleting object %s %s %s" -msgstr "Exceção ao excluir objeto %s %s %s" - #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "Exceção com %(ip)s:%(port)s/%(device)s" @@ -687,10 +643,6 @@ msgstr "" msgid "Killing long-running rsync: %s" msgstr "Eliminando a ressincronização de longa execução: %s" -#, python-format -msgid "Loading JSON from %s failed (%s)" -msgstr "Falha ao carregar JSON a partir do %s (%s)" - msgid "Lockup detected.. killing live coros." msgstr "Bloqueio detectado... eliminando núcleos em tempo real." @@ -718,10 +670,6 @@ msgstr "Nenhuma política com índice %s" msgid "No realm key for %r" msgstr "Nenhuma chave do domínio para %r" -#, python-format -msgid "No space left on device for %s (%s)" -msgstr "Nenhum espaço deixado no dispositivo para %s (%s)" - #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Erro de nó limitado %(ip)s:%(port)s (%(device)s)" @@ -859,14 +807,6 @@ msgstr "Caminho necessário em X-Container-Sync-To" msgid "Problem cleaning up %s" msgstr "Problema ao limpar %s" -#, python-format -msgid "Problem cleaning up %s (%s)" -msgstr "Problema ao limpar %s (%s)" - -#, python-format -msgid "Problem writing durable state file %s (%s)" -msgstr "Problema ao gravar arquivo de estado durável %s (%s)" - #, python-format msgid "Profiling Error: %s" msgstr "Erro da Criação de Perfil: %s" @@ -883,10 +823,6 @@ msgstr "" "Em quarentena %(object_path)s para %(quar_path)s porque ele não é um " "diretório" -#, python-format -msgid "Quarantined %s to %s due to %s database" -msgstr "Em quarentena %s para %s devido a %s do banco de dados" - #, python-format msgid "Quarantining DB %s" msgstr "Quarentenando BD %s" @@ -954,10 +890,6 @@ msgstr "Executando o reconstrutor do objeto no modo de script." msgid "Running object replicator in script mode." msgstr "Executando replicador do objeto no modo de script." -#, python-format -msgid "Signal %s pid: %s signal: %s" -msgstr "PID %s do sinal: %s sinal: %s" - #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " @@ -1037,10 +969,6 @@ msgstr "Tentando %(method)s %(path)s" msgid "Trying to GET %(full_path)s" msgstr "Tentando GET %(full_path)s" -#, python-format -msgid "Trying to get %s status of PUT to %s" -msgstr "Tentando obter o status %s do PUT para o %s" - #, python-format msgid "Trying to get final status of PUT to %s" msgstr "Tentando obter o status final do PUT para o %s" @@ -1065,10 +993,6 @@ msgstr "Tentando escrever para %s" msgid "UNCAUGHT EXCEPTION" msgstr "EXCEÇÃO NÃO CAPTURADA" -#, python-format -msgid "Unable to find %s config section in %s" -msgstr "Não é possível localizar %s da seção de configuração em %s" - #, python-format msgid "Unable to load internal client from config: %r (%s)" msgstr "" @@ -1082,20 +1006,12 @@ msgstr "Não é possível localizar %s em libc. Saindo como um não operacional. msgid "Unable to locate config for %s" msgstr "Não é possível localizar configuração para %s" -#, python-format -msgid "Unable to locate config number %s for %s" -msgstr "Não é possível localizar o número de configuração %s para %s" - msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "Não é possível localizar fallocate, posix_fallocate em libc. Saindo como um " "não operacional." -#, python-format -msgid "Unable to perform fsync() on directory %s: %s" -msgstr "Não é possível executar fsync() no diretório %s: %s" - #, python-format msgid "Unable to read config from %s" msgstr "Não é possível ler a configuração a partir de %s" @@ -1144,14 +1060,6 @@ msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "" "AVISO: Não é possível modificar o limite de memória. Executar como não raiz?" -#, python-format -msgid "Waited %s seconds for %s to die; giving up" -msgstr "Esperou %s segundos para %s eliminar; desistindo" - -#, python-format -msgid "Waited %s seconds for %s to die; killing" -msgstr "Esperou %s segundos para %s eliminar; eliminando" - msgid "Warning: Cannot ratelimit without a memcached client" msgstr "Aviso: Não é possível um limite de taxa sem um cliente memcached" diff --git a/swift/locale/ru/LC_MESSAGES/swift.po b/swift/locale/ru/LC_MESSAGES/swift.po index 7511d0eaf9..a074dc851d 100644 --- a/swift/locale/ru/LC_MESSAGES/swift.po +++ b/swift/locale/ru/LC_MESSAGES/swift.po @@ -5,14 +5,13 @@ # Translators: # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata -# Andreas Jaeger , 2016. #zanata # Filatov Sergey , 2016. #zanata # Grigory Mokhin , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev244\n" +"Project-Id-Version: swift 2.7.1.dev28\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-22 06:16+0000\n" +"POT-Creation-Date: 2016-04-07 22:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -82,14 +81,6 @@ msgstr "%(success)s успешно, %(failure)s с ошибками" msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s возвратил 503 для %(statuses)s" -#, python-format -msgid "%s #%d not running (%s)" -msgstr "%s #%d не запущен (%s)" - -#, python-format -msgid "%s (%s) appears to have stopped" -msgstr "Возможно, %s (%s) остановлен" - #, python-format msgid "%s already started..." msgstr "%s уже запущен..." @@ -106,14 +97,6 @@ msgstr "%s не смонтирован" msgid "%s responded as unmounted" msgstr "%s ответил как размонтированный" -#, python-format -msgid "%s running (%s - %s)" -msgstr "%s выполняется (%s - %s)" - -#, python-format -msgid "%s: %s" -msgstr "%s: %s" - #, python-format msgid "%s: Connection reset by peer" msgstr "%s: соединение сброшено на другой стороне" @@ -152,10 +135,6 @@ msgstr ", коды возврата: " msgid "Account" msgstr "Учетная запись" -#, python-format -msgid "Account %s has not been reaped since %s" -msgstr "Учетная запись %s не очищалась после %s" - #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Проверка учетной записи в \"однократном\" режиме завершена: %.02fs" @@ -170,10 +149,6 @@ msgid "" msgstr "" "Попытка репликации %(count)d баз данных за %(time).5f секунд (%(rate).5f/s)" -#, python-format -msgid "Audit Failed for %s: %s" -msgstr "Контроль %s не выполнен: %s" - #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Неправильный код возврата rsync: %(ret)d <- %(args)s" @@ -199,10 +174,6 @@ msgstr "Начать однонитевую сплошную проверку о msgid "Begin container update sweep" msgstr "Начать сплошную проверку обновлений контейнера" -#, python-format -msgid "Begin object audit \"%s\" mode (%s%s)" -msgstr "Начать проверку объекта в режиме \"%s\" (%s%s)" - msgid "Begin object update single threaded sweep" msgstr "Начать однонитевую сплошную проверку обновлений объекта" @@ -227,14 +198,6 @@ msgstr "Отсутствует доступ к файлу %s." msgid "Can not load profile data from %s." msgstr "Не удается загрузить данные профайла из %s." -#, python-format -msgid "Cannot read %s (%s)" -msgstr "Невозможно прочитать %s (%s)" - -#, python-format -msgid "Cannot write %s (%s)" -msgstr "Невозможно записать %s (%s)" - #, python-format msgid "Client did not read from proxy within %ss" msgstr "Клиент не прочитал данные из proxy в %ss" @@ -615,14 +578,6 @@ msgstr "Исключительная ситуация в цикле реплик msgid "Exception in top-levelreconstruction loop" msgstr "Исключение в цикле реконструкции верхнего уровня" -#, python-format -msgid "Exception while deleting container %s %s" -msgstr "Исключительная ситуация во время удаления контейнера %s %s" - -#, python-format -msgid "Exception while deleting object %s %s %s" -msgstr "Исключительная ситуация во время удаления объекта %s %s %s" - #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "Исключительная ситуация в %(ip)s:%(port)s/%(device)s" @@ -699,10 +654,6 @@ msgstr "" msgid "Killing long-running rsync: %s" msgstr "Принудительное завершение долго выполняющегося rsync: %s" -#, python-format -msgid "Loading JSON from %s failed (%s)" -msgstr "Загрузка JSON из %s провалилась (%s)" - msgid "Lockup detected.. killing live coros." msgstr "Обнаружена блокировка.. принудительное завершение работающих модулей." @@ -730,10 +681,6 @@ msgstr "Не найдено стратегии с индексом %s" msgid "No realm key for %r" msgstr "Отсутствует ключ области для %r" -#, python-format -msgid "No space left on device for %s (%s)" -msgstr "Не устройстве %s (%s) закончилось место" - #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Ограниченная ошибка узла %(ip)s:%(port)s (%(device)s)" @@ -869,14 +816,6 @@ msgstr "Требуется путь в X-Container-Sync-To" msgid "Problem cleaning up %s" msgstr "Неполадка при очистке %s" -#, python-format -msgid "Problem cleaning up %s (%s)" -msgstr "Возникла проблема при очистке %s (%s)" - -#, python-format -msgid "Problem writing durable state file %s (%s)" -msgstr "Возникла неполадка при записи файла сохраняемого состояния %s (%s)" - #, python-format msgid "Profiling Error: %s" msgstr "Ошибка профилирования: %s" @@ -894,10 +833,6 @@ msgstr "" "%(object_path)s помещен в карантин в %(quar_path)s, так как не является " "каталогом" -#, python-format -msgid "Quarantined %s to %s due to %s database" -msgstr "%s помещено в карантин %s из-за базы данных %s" - #, python-format msgid "Quarantining DB %s" msgstr "БД %s помещена в карантин" @@ -967,10 +902,6 @@ msgstr "Запуск утилиты реконструкции объектов msgid "Running object replicator in script mode." msgstr "Запуск утилиты репликации объектов в режиме сценариев." -#, python-format -msgid "Signal %s pid: %s signal: %s" -msgstr "Сигнал: %s, pid: %s, сигнал: %s" - #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " @@ -1050,10 +981,6 @@ msgstr "Попытка выполнения метода %(method)s %(path)s" msgid "Trying to GET %(full_path)s" msgstr "Попытка GET-запроса %(full_path)s" -#, python-format -msgid "Trying to get %s status of PUT to %s" -msgstr "Попытка получения состояния %s операции PUT в %s" - #, python-format msgid "Trying to get final status of PUT to %s" msgstr "Попытка получения конечного состояния PUT в %s" @@ -1078,10 +1005,6 @@ msgstr "Попытка записи в %s" msgid "UNCAUGHT EXCEPTION" msgstr "Необрабатываемая исключительная ситуация" -#, python-format -msgid "Unable to find %s config section in %s" -msgstr "Не удалось найти раздел конфигурации %s в %s" - #, python-format msgid "Unable to load internal client from config: %r (%s)" msgstr "Не удалось загрузить клиент из конфигурации: %r (%s)" @@ -1094,19 +1017,11 @@ msgstr "Не удалось найти %s в libc. Оставлено как no msgid "Unable to locate config for %s" msgstr "Не удалось найти конфигурационный файл для %s" -#, python-format -msgid "Unable to locate config number %s for %s" -msgstr "Не удается найти конфигурации с номером %s для %s" - msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "Не удалось найти fallocate, posix_fallocate в libc. Оставлено как no-op." -#, python-format -msgid "Unable to perform fsync() on directory %s: %s" -msgstr "Не удалось выполнить функцию fsync() для каталога %s: %s" - #, python-format msgid "Unable to read config from %s" msgstr "Не удалось прочитать конфигурацию из %s" @@ -1157,14 +1072,6 @@ msgstr "" "Предупреждение: не удалось изменить предельное значение для памяти. Запущен " "без прав доступа root?" -#, python-format -msgid "Waited %s seconds for %s to die; giving up" -msgstr "Система ожидала %s секунд для %s завершения; освобождение" - -#, python-format -msgid "Waited %s seconds for %s to die; killing" -msgstr "Система ожидала %s секунд для %s завершения; Принудительное завершение" - msgid "Warning: Cannot ratelimit without a memcached client" msgstr "" "Предупреждение: не удается ограничить скорость без клиента с кэшированием " diff --git a/swift/locale/tr_TR/LC_MESSAGES/swift.po b/swift/locale/tr_TR/LC_MESSAGES/swift.po index 2235019090..31d8b02756 100644 --- a/swift/locale/tr_TR/LC_MESSAGES/swift.po +++ b/swift/locale/tr_TR/LC_MESSAGES/swift.po @@ -7,9 +7,9 @@ # OpenStack Infra , 2015. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev235\n" +"Project-Id-Version: swift 2.7.1.dev28\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-18 23:11+0000\n" +"POT-Creation-Date: 2016-04-07 22:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -77,14 +77,6 @@ msgstr "%(success)s başarı, %(failure)s başarısızlık" msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s %(statuses)s için 503 döndürüyor" -#, python-format -msgid "%s #%d not running (%s)" -msgstr "%s #%d çalışmıyor (%s)" - -#, python-format -msgid "%s (%s) appears to have stopped" -msgstr "%s (%s) durmuş gibi görünüyor" - #, python-format msgid "%s already started..." msgstr "%s zaten başlatıldı..." @@ -101,14 +93,6 @@ msgstr "%s bağlı değil" msgid "%s responded as unmounted" msgstr "%s bağlı değil olarak yanıt verdi" -#, python-format -msgid "%s running (%s - %s)" -msgstr "%s çalışıyor (%s - %s)" - -#, python-format -msgid "%s: %s" -msgstr "%s: %s" - #, python-format msgid "%s: Connection reset by peer" msgstr "%s: Bağlantı eş tarafından sıfırlandı" @@ -147,10 +131,6 @@ msgstr ", dönen kodlar: " msgid "Account" msgstr "Hesap" -#, python-format -msgid "Account %s has not been reaped since %s" -msgstr "Hesap %s %s'den beri biçilmedi" - #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Hesap denetimi \"bir kere\" kipi tamamlandı: %.02fs" @@ -164,10 +144,6 @@ msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "%(count)d db %(time).5f saniyede çoğaltılmaya çalışıldı (%(rate).5f/s)" -#, python-format -msgid "Audit Failed for %s: %s" -msgstr "Denetim %s için başarısız: %s" - #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Kötü rsync dönüş kodu: %(ret)d <- %(args)s" @@ -193,10 +169,6 @@ msgstr "Kap güncelleme tek iş iplikli süpürmeye başla" msgid "Begin container update sweep" msgstr "Kap güncelleme süpürmesine başla" -#, python-format -msgid "Begin object audit \"%s\" mode (%s%s)" -msgstr "Nesne denetimini \"%s\" kipinde başlat (%s%s)" - msgid "Begin object update single threaded sweep" msgstr "Nesne güncelleme tek iş iplikli süpürmeye başla" @@ -583,14 +555,6 @@ msgstr "Üst seviye çoğaltma döngüsünde istisna" msgid "Exception in top-levelreconstruction loop" msgstr "Üst seviye yeniden oluşturma döngüsünde istisna" -#, python-format -msgid "Exception while deleting container %s %s" -msgstr "%s %s kabı silinirken istisna" - -#, python-format -msgid "Exception while deleting object %s %s %s" -msgstr "%s %s %s nesnesi silinirken istisna" - #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s ile istisna" @@ -685,10 +649,6 @@ msgstr "%s indisine sahip ilke yok" msgid "No realm key for %r" msgstr "%r için realm anahtarı yok" -#, python-format -msgid "No space left on device for %s (%s)" -msgstr "Aygıtta %s için boş alan kalmadı (%s)" - #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Düğüm hatası sınırlandı %(ip)s:%(port)s (%(device)s)" @@ -809,14 +769,6 @@ msgstr "X-Container-Sync-To'de yol gerekli" msgid "Problem cleaning up %s" msgstr "%s temizliğinde problem" -#, python-format -msgid "Problem cleaning up %s (%s)" -msgstr "%s temizlemede problem (%s)" - -#, python-format -msgid "Problem writing durable state file %s (%s)" -msgstr "Dayanıklı durum dosyas %s ile ilgili problem (%s)" - #, python-format msgid "Profiling Error: %s" msgstr "Profilleme Hatası: %s" @@ -831,10 +783,6 @@ msgid "" msgstr "" "Bir dizin olmadığından %(object_path)s %(quar_path)s e karantinaya alındı" -#, python-format -msgid "Quarantined %s to %s due to %s database" -msgstr "%s %s'e karantinaya alındı %s veri tabanı sebebiyle" - #, python-format msgid "Quarantining DB %s" msgstr "DB %s karantinaya alınıyor" @@ -896,10 +844,6 @@ msgstr "Nesne yeniden oluşturma betik kipinde çalıştırılıyor." msgid "Running object replicator in script mode." msgstr "Nesne çoğaltıcı betik kipinde çalıştırılıyor." -#, python-format -msgid "Signal %s pid: %s signal: %s" -msgstr "Sinyal %s pid: %s sinyal: %s" - #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " @@ -979,10 +923,6 @@ msgstr "%(method)s %(path)s deneniyor" msgid "Trying to GET %(full_path)s" msgstr "%(full_path)s GET deneniyor" -#, python-format -msgid "Trying to get %s status of PUT to %s" -msgstr "%s'e PUT'un %s durumu alınmaya çalışılıyor" - #, python-format msgid "Trying to get final status of PUT to %s" msgstr "%s'e PUT için son durum alınmaya çalışılıyor" @@ -1007,10 +947,6 @@ msgstr "%s'e yazmaya çalışılıyor" msgid "UNCAUGHT EXCEPTION" msgstr "YAKALANMAYAN İSTİSNA" -#, python-format -msgid "Unable to find %s config section in %s" -msgstr "%s yapılandırma kısmı %s'de bulunamıyor" - #, python-format msgid "Unable to load internal client from config: %r (%s)" msgstr "Yapılandırmadan dahili istemci yüklenemedi: %r (%s)" @@ -1023,19 +959,11 @@ msgstr "%s libc'de bulunamadı. No-op olarak çıkılıyor." msgid "Unable to locate config for %s" msgstr "%s için yapılandırma bulunamıyor" -#, python-format -msgid "Unable to locate config number %s for %s" -msgstr "Yapılandırma sayısı %s %s için bulunamıyor" - msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "fallocate, posix_fallocate libc'de bulunamadı. No-op olarak çıkılıyor." -#, python-format -msgid "Unable to perform fsync() on directory %s: %s" -msgstr "%s dizininde fsynıc() yapılamıyor: %s" - #, python-format msgid "Unable to read config from %s" msgstr "%s'den yapılandırma okunamıyor" @@ -1079,10 +1007,6 @@ msgstr "UYARI: Azami süreç limiti değiştirilemiyor. Root değil misiniz?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "UYARI: Hafıza sınırı değiştirilemiyor. Root değil misiniz?" -#, python-format -msgid "Waited %s seconds for %s to die; giving up" -msgstr "%s saniye %s'in ölmesi için beklendi; vaz geçiliyor" - msgid "Warning: Cannot ratelimit without a memcached client" msgstr "Uyarı: Memcached istemcisi olmadan oran sınırlama yapılamaz" diff --git a/swift/locale/zh_CN/LC_MESSAGES/swift.po b/swift/locale/zh_CN/LC_MESSAGES/swift.po index 2a6097fd27..1c1d4f82f4 100644 --- a/swift/locale/zh_CN/LC_MESSAGES/swift.po +++ b/swift/locale/zh_CN/LC_MESSAGES/swift.po @@ -9,9 +9,9 @@ # Linda , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev254\n" +"Project-Id-Version: swift 2.7.1.dev28\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-22 19:48+0000\n" +"POT-Creation-Date: 2016-04-07 22:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -77,14 +77,6 @@ msgstr "%(success)s成功,%(failure)s失败" msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s 返回 503 在 %(statuses)s" -#, python-format -msgid "%s #%d not running (%s)" -msgstr "%s #%d无法运行(%s)" - -#, python-format -msgid "%s (%s) appears to have stopped" -msgstr "%s (%s)显示已停止" - #, python-format msgid "%s already started..." msgstr "%s已启动..." @@ -101,14 +93,6 @@ msgstr "%s未挂载" msgid "%s responded as unmounted" msgstr "%s 响应为未安装" -#, python-format -msgid "%s running (%s - %s)" -msgstr "%s运行(%s - %s)" - -#, python-format -msgid "%s: %s" -msgstr "%s:%s" - #, python-format msgid "%s: Connection reset by peer" msgstr "%s:已由同级重置连接" @@ -147,10 +131,6 @@ msgstr ",返回代码:" msgid "Account" msgstr "账号" -#, python-format -msgid "Account %s has not been reaped since %s" -msgstr "账号%s自%s起未被reaped" - #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "账号审计\"once\"模式完成: %.02fs" @@ -164,10 +144,6 @@ msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "%(time).5f seconds (%(rate).5f/s)尝试复制%(count)d dbs" -#, python-format -msgid "Audit Failed for %s: %s" -msgstr "审计失败%s: %s" - #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Bad rsync返还代码:%(ret)d <- %(args)s" @@ -193,10 +169,6 @@ msgstr "开始容器更新单线程扫除" msgid "Begin container update sweep" msgstr "开始容器更新扫除" -#, python-format -msgid "Begin object audit \"%s\" mode (%s%s)" -msgstr "开始对象审计\\\"%s\\\" 模式 (%s%s)" - msgid "Begin object update single threaded sweep" msgstr "开始对象更新单线程扫除" @@ -221,14 +193,6 @@ msgstr "无法访问文件%s" msgid "Can not load profile data from %s." msgstr "无法从%s下载分析数据" -#, python-format -msgid "Cannot read %s (%s)" -msgstr "无法读取 %s (%s)" - -#, python-format -msgid "Cannot write %s (%s)" -msgstr "无法写入 %s (%s)" - #, python-format msgid "Client did not read from proxy within %ss" msgstr "客户尚未从代理处读取%ss" @@ -583,14 +547,6 @@ msgstr "top-level复制圈出现异常" msgid "Exception in top-levelreconstruction loop" msgstr " top-levelreconstruction 环中发生异常" -#, python-format -msgid "Exception while deleting container %s %s" -msgstr "执行删除容器时出现异常 %s %s" - -#, python-format -msgid "Exception while deleting object %s %s %s" -msgstr "执行删除对象时发生异常%s %s %s" - #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s出现异常" @@ -662,10 +618,6 @@ msgstr "" msgid "Killing long-running rsync: %s" msgstr "终止long-running同步: %s" -#, python-format -msgid "Loading JSON from %s failed (%s)" -msgstr "从 %s 读取 JSON 失败 (%s)" - msgid "Lockup detected.. killing live coros." msgstr "检测到lockup。终止正在执行的coros" @@ -693,10 +645,6 @@ msgstr "没有具备索引 %s 的策略" msgid "No realm key for %r" msgstr "%r权限key不存在" -#, python-format -msgid "No space left on device for %s (%s)" -msgstr "设备上没有可容纳 %s (%s) 的空间" - #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "节点错误极限 %(ip)s:%(port)s (%(device)s)" @@ -826,14 +774,6 @@ msgstr "在X-Container-Sync-To中路径是必须的" msgid "Problem cleaning up %s" msgstr "问题清除%s" -#, python-format -msgid "Problem cleaning up %s (%s)" -msgstr "清除 %s (%s) 时发生了问题" - -#, python-format -msgid "Problem writing durable state file %s (%s)" -msgstr "编写可持续状态文件 %s (%s) 时发生了问题" - #, python-format msgid "Profiling Error: %s" msgstr "分析代码时出现错误:%s" @@ -847,10 +787,6 @@ msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "隔离%(object_path)s和%(quar_path)s因为非目录" -#, python-format -msgid "Quarantined %s to %s due to %s database" -msgstr "隔离%s和%s 因为%s数据库" - #, python-format msgid "Quarantining DB %s" msgstr "隔离DB%s" @@ -914,10 +850,6 @@ msgstr "正以脚本方式运行对象重构程序。" msgid "Running object replicator in script mode." msgstr "在加密模式下执行对象复制" -#, python-format -msgid "Signal %s pid: %s signal: %s" -msgstr "发出信号%s pid: %s 信号: %s" - #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " @@ -991,10 +923,6 @@ msgstr "尝试执行%(method)s %(path)s" msgid "Trying to GET %(full_path)s" msgstr "正尝试获取 %(full_path)s" -#, python-format -msgid "Trying to get %s status of PUT to %s" -msgstr "正尝试将 PUT 的 %s 状态发送至 %s" - #, python-format msgid "Trying to get final status of PUT to %s" msgstr "尝试执行获取最后的PUT状态%s" @@ -1019,10 +947,6 @@ msgstr "尝试执行书写%s" msgid "UNCAUGHT EXCEPTION" msgstr "未捕获的异常" -#, python-format -msgid "Unable to find %s config section in %s" -msgstr "无法在%s中查找到%s设置部分" - #, python-format msgid "Unable to load internal client from config: %r (%s)" msgstr "无法从配置装入内部客户机:%r (%s)" @@ -1035,18 +959,10 @@ msgstr "无法查询到%s 保留为no-op" msgid "Unable to locate config for %s" msgstr "找不到 %s 的配置" -#, python-format -msgid "Unable to locate config number %s for %s" -msgstr "找不到 %s 的配置编号 %s" - msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "无法查询到fallocate, posix_fallocate。保存为no-op" -#, python-format -msgid "Unable to perform fsync() on directory %s: %s" -msgstr "无法在目录 %s 上执行 fsync():%s" - #, python-format msgid "Unable to read config from %s" msgstr "无法从%s读取设置" @@ -1088,14 +1004,6 @@ msgstr "警告:无法修改最大运行极限,是否按非root运行?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "警告:无法修改内存极限,是否按非root运行?" -#, python-format -msgid "Waited %s seconds for %s to die; giving up" -msgstr "等待%s秒直到%s停止;放弃" - -#, python-format -msgid "Waited %s seconds for %s to die; killing" -msgstr "已消耗 %s 秒等待 %s 终止;正在终止" - msgid "Warning: Cannot ratelimit without a memcached client" msgstr "警告:缺失缓存客户端 无法控制流量 " diff --git a/swift/locale/zh_TW/LC_MESSAGES/swift.po b/swift/locale/zh_TW/LC_MESSAGES/swift.po index 46ee202a44..0565e364c1 100644 --- a/swift/locale/zh_TW/LC_MESSAGES/swift.po +++ b/swift/locale/zh_TW/LC_MESSAGES/swift.po @@ -8,9 +8,9 @@ # Jennifer , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.6.1.dev268\n" +"Project-Id-Version: swift 2.7.1.dev28\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-03-24 22:25+0000\n" +"POT-Creation-Date: 2016-04-07 22:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -77,14 +77,6 @@ msgstr "%(success)s 個成功,%(failure)s 個失敗" msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s 針對 %(statuses)s 正在傳回 503" -#, python-format -msgid "%s #%d not running (%s)" -msgstr "%s #%d 未在執行中 (%s)" - -#, python-format -msgid "%s (%s) appears to have stopped" -msgstr "%s (%s) 似乎已停止" - #, python-format msgid "%s already started..." msgstr "%s 已啟動..." @@ -101,14 +93,6 @@ msgstr "未裝載 %s" msgid "%s responded as unmounted" msgstr "%s 已回應為未裝載" -#, python-format -msgid "%s running (%s - %s)" -msgstr "%s 在執行中 (%s - %s)" - -#, python-format -msgid "%s: %s" -msgstr "%s:%s" - #, python-format msgid "%s: Connection reset by peer" msgstr "%s:已由對等項目重設連線" @@ -147,10 +131,6 @@ msgstr ",回覆碼:" msgid "Account" msgstr "帳戶" -#, python-format -msgid "Account %s has not been reaped since %s" -msgstr "尚未回收帳戶 %s(自 %s 之後)" - #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "帳戶審核「一次性」模式已完成:%.02fs" @@ -164,10 +144,6 @@ msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "已嘗試在 %(time).5f 秒內抄寫 %(count)d 個資料庫 (%(rate).5f/s)" -#, python-format -msgid "Audit Failed for %s: %s" -msgstr "%s 的審核失敗:%s" - #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "不當的遠端同步回覆碼:%(ret)d <- %(args)s" @@ -193,10 +169,6 @@ msgstr "開始儲存器更新單一執行緒清理" msgid "Begin container update sweep" msgstr "開始儲存器更新清理" -#, python-format -msgid "Begin object audit \"%s\" mode (%s%s)" -msgstr "開始物件審核 \"%s\" 模式 (%s%s)" - msgid "Begin object update single threaded sweep" msgstr "開始物件更新單一執行緒清理" @@ -221,14 +193,6 @@ msgstr "無法存取檔案 %s。" msgid "Can not load profile data from %s." msgstr "無法從 %s 中載入設定檔資料。" -#, python-format -msgid "Cannot read %s (%s)" -msgstr "無法讀取 %s (%s)" - -#, python-format -msgid "Cannot write %s (%s)" -msgstr "無法寫入 %s (%s)" - #, python-format msgid "Client did not read from proxy within %ss" msgstr "用戶端未在 %s 秒內從 Proxy 中讀取" @@ -584,14 +548,6 @@ msgstr "最上層抄寫迴圈中發生異常狀況" msgid "Exception in top-levelreconstruction loop" msgstr "最上層重新建構迴圈中發生異常狀況" -#, python-format -msgid "Exception while deleting container %s %s" -msgstr "刪除儲存器 %s %s 時發生異常狀況" - -#, python-format -msgid "Exception while deleting object %s %s %s" -msgstr "刪除物件 %s %s %s 時發生異常狀況" - #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s 發生異常狀況" @@ -663,10 +619,6 @@ msgstr "" msgid "Killing long-running rsync: %s" msgstr "正在結束長時間執行的遠端同步:%s" -#, python-format -msgid "Loading JSON from %s failed (%s)" -msgstr "從 %s 載入 JSON 失敗 (%s)" - msgid "Lockup detected.. killing live coros." msgstr "偵測到鎖定。正在結束即時 coro。" @@ -694,10 +646,6 @@ msgstr "沒有具有索引 %s 的原則" msgid "No realm key for %r" msgstr "沒有 %r 的範圍金鑰" -#, python-format -msgid "No space left on device for %s (%s)" -msgstr "裝置上沒有用於 %s 的剩餘空間 (%s)" - #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "節點錯誤限制 %(ip)s:%(port)s (%(device)s)" @@ -828,14 +776,6 @@ msgstr "X-Container-Sync-To 中需要路徑" msgid "Problem cleaning up %s" msgstr "清除 %s 時發生問題" -#, python-format -msgid "Problem cleaning up %s (%s)" -msgstr "清除 %s 時發生問題 (%s)" - -#, python-format -msgid "Problem writing durable state file %s (%s)" -msgstr "寫入可延續狀態檔 %s 時發生問題 (%s)" - #, python-format msgid "Profiling Error: %s" msgstr "側寫錯誤:%s" @@ -849,10 +789,6 @@ msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "已將 %(object_path)s 隔離至 %(quar_path)s,原因是它不是目錄" -#, python-format -msgid "Quarantined %s to %s due to %s database" -msgstr "已將 %s 隔離至 %s,原因是 %s 資料庫" - #, python-format msgid "Quarantining DB %s" msgstr "正在隔離資料庫 %s" @@ -916,10 +852,6 @@ msgstr "正在 Script 模式下執行物件重新建構器。" msgid "Running object replicator in script mode." msgstr "正在 Script 模式下執行物件抄寫器" -#, python-format -msgid "Signal %s pid: %s signal: %s" -msgstr "信號 %s PID:%s 信號:%s" - #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " @@ -996,10 +928,6 @@ msgstr "正在嘗試 %(method)s %(path)s" msgid "Trying to GET %(full_path)s" msgstr "正在嘗試對 %(full_path)s 執行 GET 動作" -#, python-format -msgid "Trying to get %s status of PUT to %s" -msgstr "正在嘗試使 PUT 的 %s 狀態為 %s" - #, python-format msgid "Trying to get final status of PUT to %s" msgstr "正在嘗試使 PUT 的最終狀態為 %s" @@ -1024,10 +952,6 @@ msgstr "正在嘗試寫入至 %s" msgid "UNCAUGHT EXCEPTION" msgstr "未捕捉的異常狀況" -#, python-format -msgid "Unable to find %s config section in %s" -msgstr "找不到 %s 配置區段(在 %s 中)" - #, python-format msgid "Unable to load internal client from config: %r (%s)" msgstr "無法從配置載入內部用戶端:%r (%s)" @@ -1040,18 +964,10 @@ msgstr "在 libc 中找不到 %s。保留為 no-op。" msgid "Unable to locate config for %s" msgstr "找不到 %s 的配置" -#, python-format -msgid "Unable to locate config number %s for %s" -msgstr "找不到配置號碼 %s(針對 %s)" - msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "在 libc 中找不到 fallocate、posix_fallocate。保留為 no-op。" -#, python-format -msgid "Unable to perform fsync() on directory %s: %s" -msgstr "無法在目錄 %s 上執行 fsync():%s" - #, python-format msgid "Unable to read config from %s" msgstr "無法從 %s 讀取配置" @@ -1095,14 +1011,6 @@ msgstr "警告:無法修改處理程序數上限限制。以非 root 使用者 msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "警告:無法修改記憶體限制。以非 root 使用者身分執行?" -#, python-format -msgid "Waited %s seconds for %s to die; giving up" -msgstr "已等待 %s 秒以讓 %s 當掉;正在放棄" - -#, python-format -msgid "Waited %s seconds for %s to die; killing" -msgstr "已等待 %s 秒以讓 %s 當掉" - msgid "Warning: Cannot ratelimit without a memcached client" msgstr "警告:無法在沒有 memcached 用戶端的情況下限制速率" From a829bd59770681f9d6c1ef02a6e1d5e441587a23 Mon Sep 17 00:00:00 2001 From: Tin Lam Date: Fri, 8 Apr 2016 18:40:30 -0500 Subject: [PATCH 081/141] Convert CONTRIBUTING.md to CONTRIBUTING.rst Change-Id: I64c42c42db35a9f55a1df9d4ab6e97a2506b8c45 Closes-Bug: #1567027 --- CONTRIBUTING.md | 98 ---------------------------------------- CONTRIBUTING.rst | 113 +++++++++++++++++++++++++++++++++++++++++++++++ MANIFEST.in | 2 +- 3 files changed, 114 insertions(+), 99 deletions(-) delete mode 100644 CONTRIBUTING.md create mode 100644 CONTRIBUTING.rst diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 1f69a82562..0000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,98 +0,0 @@ -If you would like to contribute to the development of OpenStack, -you must follow the steps in this page: [http://docs.openstack.org/infra/manual/developers.html](http://docs.openstack.org/infra/manual/developers.html) - -Once those steps have been completed, changes to OpenStack -should be submitted for review via the Gerrit tool, following -the workflow documented at [http://docs.openstack.org/infra/manual/developers.html#development-workflow](http://docs.openstack.org/infra/manual/developers.html#development-workflow). - -Gerrit is the review system used in the OpenStack projects. We're sorry, but -we won't be able to respond to pull requests submitted through GitHub. - -Bugs should be filed [on Launchpad](https://bugs.launchpad.net/swift), -not in GitHub's issue tracker. - - -Swift Design Principles -======================= - - * [The Zen of Python](http://legacy.python.org/dev/peps/pep-0020/) - * Simple Scales - * Minimal dependencies - * Re-use existing tools and libraries when reasonable - * Leverage the economies of scale - * Small, loosely coupled RESTful services - * No single points of failure - * Start with the use case - * ... then design from the cluster operator up - * If you haven't argued about it, you don't have the right answer yet :) - * If it is your first implementation, you probably aren't done yet :) - -Please don't feel offended by difference of opinion. Be prepared to advocate -for your change and iterate on it based on feedback. Reach out to other people -working on the project on -[IRC](http://eavesdrop.openstack.org/irclogs/%23openstack-swift/) or the -[mailing list](http://lists.openstack.org/pipermail/openstack-dev/) - we want -to help. - -Recommended workflow -==================== - - * Set up a [Swift All-In-One VM](http://docs.openstack.org/developer/swift/development_saio.html)(SAIO). - - * Make your changes. Docs and tests for your patch must land before - or with your patch. - - * Run unit tests, functional tests, probe tests - ``./.unittests`` - ``./.functests`` - ``./.probetests`` - - * Run ``tox`` (no command-line args needed) - - * ``git review`` - -Notes on Testing -================ - -Running the tests above against Swift in your development environment (ie -your SAIO) will catch most issues. Any patch you propose is expected to be -both tested and documented and all tests should pass. - -If you want to run just a subset of the tests while you are developing, you -can use nosetests:: - - cd test/unit/common/middleware/ && nosetests test_healthcheck.py - -To check which parts of your code are being exercised by a test, you can run -tox and then point your browser to swift/cover/index.html:: - - tox -e py27 -- test.unit.common.middleware.test_healthcheck:TestHealthCheck.test_healthcheck - -Swift's unit tests are designed to test small parts of the code in isolation. -The functional tests validate that the entire system is working from an -external perspective (they are "black-box" tests). You can even run functional -tests against public Swift endpoints. The probetests are designed to test much -of Swift's internal processes. For example, a test may write data, -intentionally corrupt it, and then ensure that the correct processes detect -and repair it. - -When your patch is submitted for code review, it will automatically be tested -on the OpenStack CI infrastructure. In addition to many of the tests above, it -will also be tested by several other OpenStack test jobs. - -Once your patch has been reviewed and approved by two core reviewers and has -passed all automated tests, it will be merged into the Swift source tree. - -Specs -===== - -The [``swift-specs``](https://github.com/openstack/swift-specs) repo -can be used for collaborative design work before a feature is implemented. - -OpenStack's gerrit system is used to collaborate on the design spec. Once -approved OpenStack provides a doc site to easily read these [specs](http://specs.openstack.org/openstack/swift-specs/) - -A spec is needed for more impactful features. Coordinating a feature between -many devs (especially across companies) is a great example of when a spec is -needed. If you are unsure if a spec document is needed, please feel free to -ask in #openstack-swift on freenode IRC. diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst new file mode 100644 index 0000000000..0bec893829 --- /dev/null +++ b/CONTRIBUTING.rst @@ -0,0 +1,113 @@ +If you would like to contribute to the development of OpenStack, you +must follow the steps in this page: +http://docs.openstack.org/infra/manual/developers.html + +Once those steps have been completed, changes to OpenStack should be +submitted for review via the Gerrit tool, following the workflow +documented at +http://docs.openstack.org/infra/manual/developers.html#development-workflow. + +Gerrit is the review system used in the OpenStack projects. We're sorry, +but we won't be able to respond to pull requests submitted through +GitHub. + +Bugs should be filed `on +Launchpad `__, not in GitHub's issue +tracker. + +Swift Design Principles +======================= + +- `The Zen of Python `__ +- Simple Scales +- Minimal dependencies +- Re-use existing tools and libraries when reasonable +- Leverage the economies of scale +- Small, loosely coupled RESTful services +- No single points of failure +- Start with the use case +- ... then design from the cluster operator up +- If you haven't argued about it, you don't have the right answer yet + :) +- If it is your first implementation, you probably aren't done yet :) + +Please don't feel offended by difference of opinion. Be prepared to +advocate for your change and iterate on it based on feedback. Reach out +to other people working on the project on +`IRC `__ or +the `mailing +list `__ - we want +to help. + +Recommended workflow +==================== + +- Set up a `Swift All-In-One + VM `__\ (SAIO). + +- Make your changes. Docs and tests for your patch must land before or + with your patch. + +- Run unit tests, functional tests, probe tests ``./.unittests`` + ``./.functests`` ``./.probetests`` + +- Run ``tox`` (no command-line args needed) + +- ``git review`` + +Notes on Testing +================ + +Running the tests above against Swift in your development environment +(ie your SAIO) will catch most issues. Any patch you propose is expected +to be both tested and documented and all tests should pass. + +If you want to run just a subset of the tests while you are developing, +you can use nosetests: + +.. code-block:: console + + cd test/unit/common/middleware/ && nosetests test_healthcheck.py + +To check which parts of your code are being exercised by a test, you can +run tox and then point your browser to swift/cover/index.html: + +.. code-block:: console + + tox -e py27 -- test.unit.common.middleware.test_healthcheck:TestHealthCheck.test_healthcheck + +Swift's unit tests are designed to test small parts of the code in +isolation. The functional tests validate that the entire system is +working from an external perspective (they are "black-box" tests). You +can even run functional tests against public Swift endpoints. The +probetests are designed to test much of Swift's internal processes. For +example, a test may write data, intentionally corrupt it, and then +ensure that the correct processes detect and repair it. + +When your patch is submitted for code review, it will automatically be +tested on the OpenStack CI infrastructure. In addition to many of the +tests above, it will also be tested by several other OpenStack test +jobs. + +Once your patch has been reviewed and approved by two core reviewers and +has passed all automated tests, it will be merged into the Swift source +tree. + +Specs +===== + +.. |swift-specs| replace:: ``swift-specs`` +.. _swift-specs: https://github.com/openstack/swift-specs + +The |swift-specs|_ repo +can be used for collaborative design work before a feature is +implemented. + +OpenStack's gerrit system is used to collaborate on the design spec. +Once approved OpenStack provides a doc site to easily read these +`specs `__ + +A spec is needed for more impactful features. Coordinating a feature +between many devs (especially across companies) is a great example of +when a spec is needed. If you are unsure if a spec document is needed, +please feel free to ask in #openstack-swift on freenode IRC. diff --git a/MANIFEST.in b/MANIFEST.in index 0a2da4a617..bcbc015d33 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,5 +1,5 @@ include AUTHORS LICENSE .functests .unittests .probetests test/__init__.py -include CHANGELOG CONTRIBUTING.md README.md +include CHANGELOG CONTRIBUTING.rst README.md include babel.cfg include test/sample.conf include tox.ini From 33f06dc48f7bec2e128b44427fb429ad640cd486 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Nov=C3=BD?= Date: Sat, 9 Apr 2016 18:47:58 +0200 Subject: [PATCH 082/141] Fixed Sphinx errors doc/source/deployment_guide.rst:1372: ERROR: Malformed table. swift/obj/diskfile.py:docstring of swift.obj.diskfile.BaseDiskFileManager.yield_hashes:13: ERROR: Unexpected indentation. doc/source/ops_runbook/diagnose.rst:188: WARNING: Inline emphasis start-string without end-string. Change-Id: Id20eb62eb5baebb3814e7af5676badb94f17dee5 --- doc/source/deployment_guide.rst | 6 +++--- doc/source/ops_runbook/diagnose.rst | 2 +- swift/obj/diskfile.py | 8 ++++---- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/doc/source/deployment_guide.rst b/doc/source/deployment_guide.rst index 37e0a79708..083a298578 100644 --- a/doc/source/deployment_guide.rst +++ b/doc/source/deployment_guide.rst @@ -1276,9 +1276,9 @@ expiring_objects_account_name expiring_objects [proxy-server] -============================ =============== ============================= +============================ =============== ===================================== Option Default Description ----------------------------- --------------- ----------------------------- +---------------------------- --------------- ------------------------------------- use Entry point for paste.deploy for the proxy server. For most cases, this should be @@ -1398,7 +1398,7 @@ concurrency_timeout conn_timeout This parameter controls how long firing of the threads. This number should be between 0 and node_timeout. The default is conn_timeout (0.5). -============================ =============== ============================= +============================ =============== ===================================== [tempauth] diff --git a/doc/source/ops_runbook/diagnose.rst b/doc/source/ops_runbook/diagnose.rst index 9066112093..c1a2a2e99a 100644 --- a/doc/source/ops_runbook/diagnose.rst +++ b/doc/source/ops_runbook/diagnose.rst @@ -185,7 +185,7 @@ The following table lists known issues: - object-replicator .... responded as unmounted - A storage server disk is unavailable - Repair and remount the file system (on the remote node) - * - /var/log/swift/*.log + * - /var/log/swift/\*.log - STDOUT: EXCEPTION IN - A unexpected error occurred - Read the Traceback details, if it matches known issues diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py index 991dc5228a..27b19af58a 100644 --- a/swift/obj/diskfile.py +++ b/swift/obj/diskfile.py @@ -1289,10 +1289,10 @@ class BaseDiskFileManager(object): timestamps is a dict which may contain items mapping: - ts_data -> timestamp of data or tombstone file, - ts_meta -> timestamp of meta file, if one exists - ts_ctype -> timestamp of meta file containing most recent - content-type value, if one exists + - ts_data -> timestamp of data or tombstone file, + - ts_meta -> timestamp of meta file, if one exists + - ts_ctype -> timestamp of meta file containing most recent + content-type value, if one exists where timestamps are instances of :class:`~swift.common.utils.Timestamp` From b13a85367ef7f5fd64e285174859025d34b4e451 Mon Sep 17 00:00:00 2001 From: Thiago da Silva Date: Mon, 21 Dec 2015 16:25:45 -0200 Subject: [PATCH 083/141] decouple versioned writes from COPY This change removes the use of the COPY request in the versioned writes middleware. It changes the COPY verb for GETs and PUTs requests. The main reasoning for this change is to remove any dependency that versioning had on copy, which will allow for the COPY functionality to be moved to middleware and to be to the left of the versioned writes middleware in the proxy pipeline. In this way, no COPY request will ever arrive at the versioned writes middleware. A side benefit of this change is that it removes a HEAD request from the PUT path. Instead of checking if a current version exists, a GET request is sent, in case of success, a PUT is sent to the versions container. A unit test was removed that tested non-default storage policies. This test is no longer necessary, since it was used to test specific policy handling code in the COPY method in the proxy object controller. Closes-Bug: #1365862 Change-Id: Idf34fa8d04ff292df7134b6d4aa94ff40887b3a4 Co-Authored-By: Alistair Coles Co-Authored-By: Janie Richling Co-Authored-By: Kota Tsuyuzaki Signed-off-by: Thiago da Silva --- swift/common/middleware/versioned_writes.py | 238 +++++++------ test/functional/tests.py | 130 ++++++- test/unit/common/middleware/helpers.py | 2 + .../middleware/test_versioned_writes.py | 326 ++++++++++++------ 4 files changed, 489 insertions(+), 207 deletions(-) diff --git a/swift/common/middleware/versioned_writes.py b/swift/common/middleware/versioned_writes.py index 51497c7f8d..3cb0989bba 100644 --- a/swift/common/middleware/versioned_writes.py +++ b/swift/common/middleware/versioned_writes.py @@ -117,16 +117,19 @@ Disable versioning from a container (x is any value except empty):: import calendar import json -import six from six.moves.urllib.parse import quote, unquote import time + from swift.common.utils import get_logger, Timestamp, \ - register_swift_info, config_true_value -from swift.common.request_helpers import get_sys_meta_prefix + register_swift_info, config_true_value, close_if_possible, FileLikeIter +from swift.common.request_helpers import get_sys_meta_prefix, \ + copy_header_subset from swift.common.wsgi import WSGIContext, make_pre_authed_request -from swift.common.swob import Request, HTTPException +from swift.common.swob import ( + Request, HTTPException, HTTPRequestEntityTooLarge) from swift.common.constraints import ( - check_account_format, check_container_format, check_destination_header) + check_account_format, check_container_format, check_destination_header, + MAX_FILE_SIZE) from swift.proxy.controllers.base import get_container_info from swift.common.http import ( is_success, is_client_error, HTTP_NOT_FOUND) @@ -254,87 +257,122 @@ class VersionedWritesContext(WSGIContext): marker = last_item yield sublisting - def handle_obj_versions_put(self, req, object_versions, - object_name, policy_index): - ret = None - - # do a HEAD request to check object versions + def _get_source_object(self, req, path_info): + # make a GET request to check object versions _headers = {'X-Newest': 'True', - 'X-Backend-Storage-Policy-Index': policy_index, 'x-auth-token': req.headers.get('x-auth-token')} # make a pre_auth request in case the user has write access # to container, but not READ. This was allowed in previous version # (i.e., before middleware) so keeping the same behavior here - head_req = make_pre_authed_request( - req.environ, path=req.path_info, - headers=_headers, method='HEAD', swift_source='VW') - hresp = head_req.get_response(self.app) + get_req = make_pre_authed_request( + req.environ, path=path_info, + headers=_headers, method='GET', swift_source='VW') + source_resp = get_req.get_response(self.app) - is_dlo_manifest = 'X-Object-Manifest' in req.headers or \ - 'X-Object-Manifest' in hresp.headers + if source_resp.content_length is None or \ + source_resp.content_length > MAX_FILE_SIZE: + return HTTPRequestEntityTooLarge(request=req) + + return source_resp + + def _put_versioned_obj(self, req, put_path_info, source_resp): + # Create a new Request object to PUT to the versions container, copying + # all headers from the source object apart from x-timestamp. + put_req = make_pre_authed_request( + req.environ, path=put_path_info, method='PUT', + swift_source='VW') + copy_header_subset(source_resp, put_req, + lambda k: k.lower() != 'x-timestamp') + put_req.headers['x-auth-token'] = req.headers.get('x-auth-token') + put_req.environ['wsgi.input'] = FileLikeIter(source_resp.app_iter) + return put_req.get_response(self.app) + + def _check_response_error(self, req, resp): + """ + Raise Error Response in case of error + """ + if is_success(resp.status_int): + return + if is_client_error(resp.status_int): + # missing container or bad permissions + raise HTTPPreconditionFailed(request=req) + # could not version the data, bail + raise HTTPServiceUnavailable(request=req) + + def handle_obj_versions_put(self, req, versions_cont, api_version, + account_name, object_name): + """ + Copy current version of object to versions_container before proceding + with original request. + + :param req: original request. + :param versions_cont: container where previous versions of the object + are stored. + :param api_version: api version. + :param account_name: account name. + :param object_name: name of object of original request + """ + if 'X-Object-Manifest' in req.headers: + # do not version DLO manifest, proceed with original request + return self.app + + get_resp = self._get_source_object(req, req.path_info) + + if 'X-Object-Manifest' in get_resp.headers: + # do not version DLO manifest, proceed with original request + close_if_possible(get_resp.app_iter) + return self.app + if get_resp.status_int == HTTP_NOT_FOUND: + # nothing to version, proceed with original request + close_if_possible(get_resp.app_iter) + return self.app + + # check for any other errors + self._check_response_error(req, get_resp) # if there's an existing object, then copy it to # X-Versions-Location - if is_success(hresp.status_int) and not is_dlo_manifest: - lcontainer = object_versions.split('/')[0] - prefix_len = '%03x' % len(object_name) - lprefix = prefix_len + object_name + '/' - ts_source = hresp.environ.get('swift_x_timestamp') - if ts_source is None: - ts_source = calendar.timegm(time.strptime( - hresp.headers['last-modified'], - '%a, %d %b %Y %H:%M:%S GMT')) - new_ts = Timestamp(ts_source).internal - vers_obj_name = lprefix + new_ts - copy_headers = { - 'Destination': '%s/%s' % (lcontainer, vers_obj_name), - 'x-auth-token': req.headers.get('x-auth-token')} + prefix_len = '%03x' % len(object_name) + lprefix = prefix_len + object_name + '/' + ts_source = get_resp.headers.get( + 'x-timestamp', + calendar.timegm(time.strptime( + get_resp.headers['last-modified'], + '%a, %d %b %Y %H:%M:%S GMT'))) + vers_obj_name = lprefix + Timestamp(ts_source).internal - # COPY implementation sets X-Newest to True when it internally - # does a GET on source object. So, we don't have to explicity - # set it in request headers here. - copy_req = make_pre_authed_request( - req.environ, path=req.path_info, - headers=copy_headers, method='COPY', swift_source='VW') - copy_resp = copy_req.get_response(self.app) + put_path_info = "/%s/%s/%s/%s" % ( + api_version, account_name, versions_cont, vers_obj_name) + put_resp = self._put_versioned_obj(req, put_path_info, get_resp) - if is_success(copy_resp.status_int): - # success versioning previous existing object - # return None and handle original request - ret = None - else: - if is_client_error(copy_resp.status_int): - # missing container or bad permissions - ret = HTTPPreconditionFailed(request=req) - else: - # could not copy the data, bail - ret = HTTPServiceUnavailable(request=req) + self._check_response_error(req, put_resp) + return self.app - else: - if hresp.status_int == HTTP_NOT_FOUND or is_dlo_manifest: - # nothing to version - # return None and handle original request - ret = None - else: - # if not HTTP_NOT_FOUND, return error immediately - ret = hresp - - return ret - - def handle_obj_versions_delete(self, req, object_versions, + def handle_obj_versions_delete(self, req, versions_cont, api_version, account_name, container_name, object_name): - lcontainer = object_versions.split('/')[0] + """ + Delete current version of object and pop previous version in its place. + + :param req: original request. + :param versions_cont: container where previous versions of the object + are stored. + :param api_version: api version. + :param account_name: account name. + :param container_name: container name. + :param object_name: object name. + """ prefix_len = '%03x' % len(object_name) lprefix = prefix_len + object_name + '/' - item_iter = self._listing_iter(account_name, lcontainer, lprefix, req) + item_iter = self._listing_iter(account_name, versions_cont, lprefix, + req) authed = False for previous_version in item_iter: if not authed: - # we're about to start making COPY requests - need to - # validate the write access to the versioned container + # validate the write access to the versioned container before + # making any backend requests if 'swift.authorize' in req.environ: container_info = get_container_info( req.environ, self.app) @@ -348,35 +386,29 @@ class VersionedWritesContext(WSGIContext): # current object and delete the previous version prev_obj_name = previous_version['name'].encode('utf-8') - copy_path = '/v1/' + account_name + '/' + \ - lcontainer + '/' + prev_obj_name + get_path = "/%s/%s/%s/%s" % ( + api_version, account_name, versions_cont, prev_obj_name) - copy_headers = {'X-Newest': 'True', - 'Destination': container_name + '/' + object_name, - 'x-auth-token': req.headers.get('x-auth-token')} - - copy_req = make_pre_authed_request( - req.environ, path=copy_path, - headers=copy_headers, method='COPY', swift_source='VW') - copy_resp = copy_req.get_response(self.app) + get_resp = self._get_source_object(req, get_path) # if the version isn't there, keep trying with previous version - if copy_resp.status_int == HTTP_NOT_FOUND: + if get_resp.status_int == HTTP_NOT_FOUND: continue - if not is_success(copy_resp.status_int): - if is_client_error(copy_resp.status_int): - # some user error, maybe permissions - return HTTPPreconditionFailed(request=req) - else: - # could not copy the data, bail - return HTTPServiceUnavailable(request=req) + self._check_response_error(req, get_resp) - # reset these because the COPY changed them - new_del_req = make_pre_authed_request( - req.environ, path=copy_path, method='DELETE', + put_path_info = "/%s/%s/%s/%s" % ( + api_version, account_name, container_name, object_name) + put_resp = self._put_versioned_obj(req, put_path_info, get_resp) + + self._check_response_error(req, put_resp) + + # redirect the original DELETE to the source of the reinstated + # version object - we already auth'd original req so make a + # pre-authed request + req = make_pre_authed_request( + req.environ, path=get_path, method='DELETE', swift_source='VW') - req = new_del_req # remove 'X-If-Delete-At', since it is not for the older copy if 'X-If-Delete-At' in req.headers: @@ -438,7 +470,7 @@ class VersionedWritesMiddleware(object): req.headers['X-Versions-Location'] = '' # if both headers are in the same request - # adding location takes precendence over removing + # adding location takes precedence over removing if 'X-Remove-Versions-Location' in req.headers: del req.headers['X-Remove-Versions-Location'] else: @@ -456,7 +488,7 @@ class VersionedWritesMiddleware(object): vw_ctx = VersionedWritesContext(self.app, self.logger) return vw_ctx.handle_container_request(req.environ, start_response) - def object_request(self, req, version, account, container, obj, + def object_request(self, req, api_version, account, container, obj, allow_versioned_writes): account_name = unquote(account) container_name = unquote(container) @@ -473,7 +505,7 @@ class VersionedWritesMiddleware(object): account_name = check_account_format(req, account_name) container_name, object_name = check_destination_header(req) req.environ['PATH_INFO'] = "/%s/%s/%s/%s" % ( - version, account_name, container_name, object_name) + api_version, account_name, container_name, object_name) container_info = get_container_info( req.environ, self.app) @@ -485,30 +517,26 @@ class VersionedWritesMiddleware(object): # If stored as sysmeta, check if middleware is enabled. If sysmeta # is not set, but versions property is set in container_info, then # for backwards compatibility feature is enabled. - object_versions = container_info.get( + versions_cont = container_info.get( 'sysmeta', {}).get('versions-location') - if object_versions and isinstance(object_versions, six.text_type): - object_versions = object_versions.encode('utf-8') - elif not object_versions: - object_versions = container_info.get('versions') + if not versions_cont: + versions_cont = container_info.get('versions') # if allow_versioned_writes is not set in the configuration files # but 'versions' is configured, enable feature to maintain # backwards compatibility - if not allow_versioned_writes and object_versions: + if not allow_versioned_writes and versions_cont: is_enabled = True - if is_enabled and object_versions: - object_versions = unquote(object_versions) + if is_enabled and versions_cont: + versions_cont = unquote(versions_cont).split('/')[0] vw_ctx = VersionedWritesContext(self.app, self.logger) if req.method in ('PUT', 'COPY'): - policy_idx = req.headers.get( - 'X-Backend-Storage-Policy-Index', - container_info['storage_policy']) resp = vw_ctx.handle_obj_versions_put( - req, object_versions, object_name, policy_idx) + req, versions_cont, api_version, account_name, + object_name) else: # handle DELETE resp = vw_ctx.handle_obj_versions_delete( - req, object_versions, account_name, + req, versions_cont, api_version, account_name, container_name, object_name) if resp: @@ -522,7 +550,7 @@ class VersionedWritesMiddleware(object): # versioned container req = Request(env.copy()) try: - (version, account, container, obj) = req.split_path(3, 4, True) + (api_version, account, container, obj) = req.split_path(3, 4, True) except ValueError: return self.app(env, start_response) @@ -551,7 +579,7 @@ class VersionedWritesMiddleware(object): elif obj and req.method in ('PUT', 'COPY', 'DELETE'): try: return self.object_request( - req, version, account, container, obj, + req, api_version, account, container, obj, allow_versioned_writes)(env, start_response) except HTTPException as error_response: return error_response(env, start_response) diff --git a/test/functional/tests.py b/test/functional/tests.py index 35339e68ec..5678e3ae7b 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -3565,10 +3565,23 @@ class TestObjectVersioning(Base): obj_name = Utils.create_name() versioned_obj = container.file(obj_name) - versioned_obj.write("aaaaa", hdrs={'Content-Type': 'text/jibberish01'}) + put_headers = {'Content-Type': 'text/jibberish01', + 'Content-Encoding': 'gzip', + 'Content-Disposition': 'attachment; filename=myfile'} + versioned_obj.write("aaaaa", hdrs=put_headers) obj_info = versioned_obj.info() self.assertEqual('text/jibberish01', obj_info['content_type']) + # the allowed headers are configurable in object server, so we cannot + # assert that content-encoding or content-disposition get *copied* to + # the object version unless they were set on the original PUT, so + # populate expected_headers by making a HEAD on the original object + resp_headers = dict(versioned_obj.conn.response.getheaders()) + expected_headers = {} + for k, v in put_headers.items(): + if k.lower() in resp_headers: + expected_headers[k] = v + self.assertEqual(0, versions_container.info()['object_count']) versioned_obj.write("bbbbb", hdrs={'Content-Type': 'text/jibberish02', 'X-Object-Meta-Foo': 'Bar'}) @@ -3584,6 +3597,11 @@ class TestObjectVersioning(Base): self.assertEqual("aaaaa", prev_version.read()) self.assertEqual(prev_version.content_type, 'text/jibberish01') + resp_headers = dict(prev_version.conn.response.getheaders()) + for k, v in expected_headers.items(): + self.assertIn(k.lower(), resp_headers) + self.assertEqual(v, resp_headers[k.lower()]) + # make sure the new obj metadata did not leak to the prev. version self.assertTrue('foo' not in prev_version.metadata) @@ -3632,6 +3650,15 @@ class TestObjectVersioning(Base): versioned_obj.delete() self.assertEqual("aaaaa", versioned_obj.read()) self.assertEqual(0, versions_container.info()['object_count']) + + # verify that all the original object headers have been copied back + obj_info = versioned_obj.info() + self.assertEqual('text/jibberish01', obj_info['content_type']) + resp_headers = dict(versioned_obj.conn.response.getheaders()) + for k, v in expected_headers.items(): + self.assertIn(k.lower(), resp_headers) + self.assertEqual(v, resp_headers[k.lower()]) + versioned_obj.delete() self.assertRaises(ResponseError, versioned_obj.read) @@ -3795,6 +3822,107 @@ class TestCrossPolicyObjectVersioning(TestObjectVersioning): self.env.versioning_enabled,)) +class TestSloWithVersioning(Base): + + def setUp(self): + if 'slo' not in cluster_info: + raise SkipTest("SLO not enabled") + + self.conn = Connection(tf.config) + self.conn.authenticate() + self.account = Account( + self.conn, tf.config.get('account', tf.config['username'])) + self.account.delete_containers() + + # create a container with versioning + self.versions_container = self.account.container(Utils.create_name()) + self.container = self.account.container(Utils.create_name()) + self.segments_container = self.account.container(Utils.create_name()) + if not self.container.create( + hdrs={'X-Versions-Location': self.versions_container.name}): + raise ResponseError(self.conn.response) + if 'versions' not in self.container.info(): + raise SkipTest("Object versioning not enabled") + + for cont in (self.versions_container, self.segments_container): + if not cont.create(): + raise ResponseError(self.conn.response) + + # create some segments + self.seg_info = {} + for letter, size in (('a', 1024 * 1024), + ('b', 1024 * 1024)): + seg_name = letter + file_item = self.segments_container.file(seg_name) + file_item.write(letter * size) + self.seg_info[seg_name] = { + 'size_bytes': size, + 'etag': file_item.md5, + 'path': '/%s/%s' % (self.segments_container.name, seg_name)} + + def _create_manifest(self, seg_name): + # create a manifest in the versioning container + file_item = self.container.file("my-slo-manifest") + file_item.write( + json.dumps([self.seg_info[seg_name]]), + parms={'multipart-manifest': 'put'}) + return file_item + + def _assert_is_manifest(self, file_item, seg_name): + manifest_body = file_item.read(parms={'multipart-manifest': 'get'}) + resp_headers = dict(file_item.conn.response.getheaders()) + self.assertIn('x-static-large-object', resp_headers) + self.assertEqual('application/json; charset=utf-8', + file_item.content_type) + try: + manifest = json.loads(manifest_body) + except ValueError: + self.fail("GET with multipart-manifest=get got invalid json") + + self.assertEqual(1, len(manifest)) + key_map = {'etag': 'hash', 'size_bytes': 'bytes', 'path': 'name'} + for k_client, k_slo in key_map.items(): + self.assertEqual(self.seg_info[seg_name][k_client], + manifest[0][k_slo]) + + def _assert_is_object(self, file_item, seg_name): + file_contents = file_item.read() + self.assertEqual(1024 * 1024, len(file_contents)) + self.assertEqual(seg_name, file_contents[0]) + self.assertEqual(seg_name, file_contents[-1]) + + def tearDown(self): + # remove versioning to allow simple container delete + self.container.update_metadata(hdrs={'X-Versions-Location': ''}) + self.account.delete_containers() + + def test_slo_manifest_version(self): + file_item = self._create_manifest('a') + # sanity check: read the manifest, then the large object + self._assert_is_manifest(file_item, 'a') + self._assert_is_object(file_item, 'a') + + # upload new manifest + file_item = self._create_manifest('b') + # sanity check: read the manifest, then the large object + self._assert_is_manifest(file_item, 'b') + self._assert_is_object(file_item, 'b') + + versions_list = self.versions_container.files() + self.assertEqual(1, len(versions_list)) + version_file = self.versions_container.file(versions_list[0]) + # check the version is still a manifest + self._assert_is_manifest(version_file, 'a') + self._assert_is_object(version_file, 'a') + + # delete the newest manifest + file_item.delete() + + # expect the original manifest file to be restored + self._assert_is_manifest(file_item, 'a') + self._assert_is_object(file_item, 'a') + + class TestTempurlEnv(object): tempurl_enabled = None # tri-state: None initially, then True/False diff --git a/test/unit/common/middleware/helpers.py b/test/unit/common/middleware/helpers.py index 0847a1cbcf..9c69da7431 100644 --- a/test/unit/common/middleware/helpers.py +++ b/test/unit/common/middleware/helpers.py @@ -49,6 +49,7 @@ class FakeSwift(object): self._unclosed_req_paths = defaultdict(int) self.req_method_paths = [] self.swift_sources = [] + self.txn_ids = [] self.uploaded = {} # mapping of (method, path) --> (response class, headers, body) self._responses = {} @@ -83,6 +84,7 @@ class FakeSwift(object): req_headers = swob.Request(env).headers self.swift_sources.append(env.get('swift.source')) + self.txn_ids.append(env.get('swift.trans_id')) try: resp_class, raw_headers, body = self._find_response(method, path) diff --git a/test/unit/common/middleware/test_versioned_writes.py b/test/unit/common/middleware/test_versioned_writes.py index e53ef589b8..c6da47fde8 100644 --- a/test/unit/common/middleware/test_versioned_writes.py +++ b/test/unit/common/middleware/test_versioned_writes.py @@ -137,9 +137,8 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): status, headers, body = self.call_vw(req) self.assertEqual(status, "412 Precondition Failed") - # GET/HEAD performs as normal + # GET performs as normal self.app.register('GET', '/v1/a/c', swob.HTTPOk, {}, 'passed') - self.app.register('HEAD', '/v1/a/c', swob.HTTPOk, {}, 'passed') for method in ('GET', 'HEAD'): req = Request.blank('/v1/a/c', @@ -162,7 +161,31 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): self.assertEqual('POST', method) self.assertEqual('/v1/a/c', path) self.assertTrue('x-container-sysmeta-versions-location' in req_headers) + self.assertEqual('', + req_headers['x-container-sysmeta-versions-location']) self.assertTrue('x-versions-location' in req_headers) + self.assertEqual('', req_headers['x-versions-location']) + self.assertEqual(len(self.authorized), 1) + self.assertRequestEqual(req, self.authorized[0]) + + def test_empty_versions_location(self): + self.app.register('POST', '/v1/a/c', swob.HTTPOk, {}, 'passed') + req = Request.blank('/v1/a/c', + headers={'X-Versions-Location': ''}, + environ={'REQUEST_METHOD': 'POST'}) + status, headers, body = self.call_vw(req) + self.assertEqual(status, '200 OK') + + # check for sysmeta header + calls = self.app.calls_with_headers + method, path, req_headers = calls[0] + self.assertEqual('POST', method) + self.assertEqual('/v1/a/c', path) + self.assertTrue('x-container-sysmeta-versions-location' in req_headers) + self.assertEqual('', + req_headers['x-container-sysmeta-versions-location']) + self.assertTrue('x-versions-location' in req_headers) + self.assertEqual('', req_headers['x-versions-location']) self.assertEqual(len(self.authorized), 1) self.assertRequestEqual(req, self.authorized[0]) @@ -240,51 +263,27 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): self.app.register( 'PUT', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') self.app.register( - 'HEAD', '/v1/a/c/o', swob.HTTPNotFound, {}, None) + 'GET', '/v1/a/c/o', swob.HTTPNotFound, {}, None) cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) req = Request.blank( '/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache, - 'CONTENT_LENGTH': '100'}) + 'CONTENT_LENGTH': '100', + 'swift.trans_id': 'fake_trans_id'}) status, headers, body = self.call_vw(req) self.assertEqual(status, '200 OK') self.assertEqual(len(self.authorized), 1) self.assertRequestEqual(req, self.authorized[0]) - - def test_PUT_versioning_with_nonzero_default_policy(self): - self.app.register( - 'PUT', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') - self.app.register( - 'HEAD', '/v1/a/c/o', swob.HTTPNotFound, {}, None) - - cache = FakeCache({'versions': 'ver_cont', 'storage_policy': '2'}) - req = Request.blank( - '/v1/a/c/o', - environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache, - 'CONTENT_LENGTH': '100'}) - status, headers, body = self.call_vw(req) - self.assertEqual(status, '200 OK') - - # check for 'X-Backend-Storage-Policy-Index' in HEAD request - calls = self.app.calls_with_headers - method, path, req_headers = calls[0] - self.assertEqual('HEAD', method) - self.assertEqual('/v1/a/c/o', path) - self.assertTrue('X-Backend-Storage-Policy-Index' in req_headers) - self.assertEqual('2', - req_headers.get('X-Backend-Storage-Policy-Index')) - self.assertEqual(len(self.authorized), 1) - self.assertRequestEqual(req, self.authorized[0]) + self.assertEqual(2, self.app.call_count) + self.assertEqual(['VW', None], self.app.swift_sources) + self.assertEqual({'fake_trans_id'}, set(self.app.txn_ids)) def test_put_object_no_versioning_with_container_config_true(self): - # set False to versions_write obsously and expect no COPY occurred + # set False to versions_write and expect no GET occurred self.vw.conf = {'allow_versioned_writes': 'false'} self.app.register( 'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, 'passed') - self.app.register( - 'HEAD', '/v1/a/c/o', swob.HTTPOk, - {'last-modified': 'Wed, 19 Nov 2014 18:19:02 GMT'}, 'passed') cache = FakeCache({'versions': 'ver_cont'}) req = Request.blank( '/v1/a/c/o', @@ -295,11 +294,46 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): self.assertEqual(len(self.authorized), 1) self.assertRequestEqual(req, self.authorized[0]) called_method = [method for (method, path, hdrs) in self.app._calls] - self.assertTrue('COPY' not in called_method) + self.assertTrue('GET' not in called_method) + + def test_put_request_is_dlo_manifest_with_container_config_true(self): + # set x-object-manifest on request and expect no versioning occurred + # only the PUT for the original client request + self.app.register( + 'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, 'passed') + cache = FakeCache({'versions': 'ver_cont'}) + req = Request.blank( + '/v1/a/c/o', + environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache, + 'CONTENT_LENGTH': '100'}) + req.headers['X-Object-Manifest'] = 'req/manifest' + status, headers, body = self.call_vw(req) + self.assertEqual(status, '201 Created') + self.assertEqual(len(self.authorized), 1) + self.assertRequestEqual(req, self.authorized[0]) + self.assertEqual(1, self.app.call_count) + + def test_put_version_is_dlo_manifest_with_container_config_true(self): + # set x-object-manifest on response and expect no versioning occurred + # only initial GET on source object ok followed by PUT + self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, + {'X-Object-Manifest': 'resp/manifest'}, 'passed') + self.app.register( + 'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, 'passed') + cache = FakeCache({'versions': 'ver_cont'}) + req = Request.blank( + '/v1/a/c/o', + environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache, + 'CONTENT_LENGTH': '100'}) + status, headers, body = self.call_vw(req) + self.assertEqual(status, '201 Created') + self.assertEqual(len(self.authorized), 1) + self.assertRequestEqual(req, self.authorized[0]) + self.assertEqual(2, self.app.call_count) def test_delete_object_no_versioning_with_container_config_true(self): # set False to versions_write obviously and expect no GET versioning - # container and COPY called (just delete object as normal) + # container and PUT called (just delete object as normal) self.vw.conf = {'allow_versioned_writes': 'false'} self.app.register( 'DELETE', '/v1/a/c/o', swob.HTTPNoContent, {}, 'passed') @@ -313,8 +347,9 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): self.assertRequestEqual(req, self.authorized[0]) called_method = \ [method for (method, path, rheaders) in self.app._calls] - self.assertTrue('COPY' not in called_method) + self.assertTrue('PUT' not in called_method) self.assertTrue('GET' not in called_method) + self.assertEqual(1, self.app.call_count) def test_copy_object_no_versioning_with_container_config_true(self): # set False to versions_write obviously and expect no extra @@ -337,31 +372,90 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): def test_new_version_success(self): self.app.register( - 'PUT', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') + 'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, 'passed') self.app.register( - 'HEAD', '/v1/a/c/o', swob.HTTPOk, - {'last-modified': 'Wed, 19 Nov 2014 18:19:02 GMT'}, 'passed') + 'GET', '/v1/a/c/o', swob.HTTPOk, + {'last-modified': 'Thu, 1 Jan 1970 00:00:01 GMT'}, 'passed') self.app.register( - 'COPY', '/v1/a/c/o', swob.HTTPCreated, {}, None) + 'PUT', '/v1/a/ver_cont/001o/0000000001.00000', swob.HTTPCreated, + {}, None) + cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) + req = Request.blank( + '/v1/a/c/o', + environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache, + 'CONTENT_LENGTH': '100', + 'swift.trans_id': 'fake_trans_id'}) + status, headers, body = self.call_vw(req) + self.assertEqual(status, '201 Created') + self.assertEqual(len(self.authorized), 1) + self.assertRequestEqual(req, self.authorized[0]) + self.assertEqual(['VW', 'VW', None], self.app.swift_sources) + self.assertEqual({'fake_trans_id'}, set(self.app.txn_ids)) + + def test_new_version_get_errors(self): + # GET on source fails, expect client error response, + # no PUT should happen + self.app.register( + 'GET', '/v1/a/c/o', swob.HTTPBadRequest, {}, None) + cache = FakeCache({'versions': 'ver_cont'}) + req = Request.blank( + '/v1/a/c/o', + environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache, + 'CONTENT_LENGTH': '100'}) + status, headers, body = self.call_vw(req) + self.assertEqual(status, '412 Precondition Failed') + self.assertEqual(1, self.app.call_count) + + # GET on source fails, expect server error response + self.app.register( + 'GET', '/v1/a/c/o', swob.HTTPBadGateway, {}, None) + req = Request.blank( + '/v1/a/c/o', + environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache, + 'CONTENT_LENGTH': '100'}) + status, headers, body = self.call_vw(req) + self.assertEqual(status, '503 Service Unavailable') + self.assertEqual(2, self.app.call_count) + + def test_new_version_put_errors(self): + # PUT of version fails, expect client error response + self.app.register( + 'GET', '/v1/a/c/o', swob.HTTPOk, + {'last-modified': 'Thu, 1 Jan 1970 00:00:01 GMT'}, 'passed') + self.app.register( + 'PUT', '/v1/a/ver_cont/001o/0000000001.00000', + swob.HTTPUnauthorized, {}, None) cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) req = Request.blank( '/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache, 'CONTENT_LENGTH': '100'}) status, headers, body = self.call_vw(req) - self.assertEqual(status, '200 OK') - self.assertEqual(len(self.authorized), 1) - self.assertRequestEqual(req, self.authorized[0]) + self.assertEqual(status, '412 Precondition Failed') + self.assertEqual(2, self.app.call_count) + + # PUT of version fails, expect server error response + self.app.register( + 'PUT', '/v1/a/ver_cont/001o/0000000001.00000', swob.HTTPBadGateway, + {}, None) + req = Request.blank( + '/v1/a/c/o', + environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache, + 'CONTENT_LENGTH': '100'}) + status, headers, body = self.call_vw(req) + self.assertEqual(status, '503 Service Unavailable') + self.assertEqual(4, self.app.call_count) @local_tz def test_new_version_sysmeta_precedence(self): self.app.register( 'PUT', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') self.app.register( - 'HEAD', '/v1/a/c/o', swob.HTTPOk, + 'GET', '/v1/a/c/o', swob.HTTPOk, {'last-modified': 'Thu, 1 Jan 1970 00:00:00 GMT'}, 'passed') self.app.register( - 'COPY', '/v1/a/c/o', swob.HTTPCreated, {}, None) + 'PUT', '/v1/a/ver_cont/001o/0000000000.00000', swob.HTTPOk, + {}, None) # fill cache with two different values for versions location # new middleware should use sysmeta first @@ -379,16 +473,14 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): # check that sysmeta header was used calls = self.app.calls_with_headers method, path, req_headers = calls[1] - self.assertEqual('COPY', method) - self.assertEqual('/v1/a/c/o', path) - self.assertEqual('ver_cont/001o/0000000000.00000', - req_headers['Destination']) + self.assertEqual('PUT', method) + self.assertEqual('/v1/a/ver_cont/001o/0000000000.00000', path) def test_copy_first_version(self): self.app.register( 'COPY', '/v1/a/src_cont/src_obj', swob.HTTPOk, {}, 'passed') self.app.register( - 'HEAD', '/v1/a/tgt_cont/tgt_obj', swob.HTTPNotFound, {}, None) + 'GET', '/v1/a/tgt_cont/tgt_obj', swob.HTTPNotFound, {}, None) cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) req = Request.blank( '/v1/a/src_cont/src_obj', @@ -399,15 +491,17 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): self.assertEqual(status, '200 OK') self.assertEqual(len(self.authorized), 1) self.assertRequestEqual(req, self.authorized[0]) + self.assertEqual(2, self.app.call_count) def test_copy_new_version(self): self.app.register( 'COPY', '/v1/a/src_cont/src_obj', swob.HTTPOk, {}, 'passed') self.app.register( - 'HEAD', '/v1/a/tgt_cont/tgt_obj', swob.HTTPOk, - {'last-modified': 'Wed, 19 Nov 2014 18:19:02 GMT'}, 'passed') + 'GET', '/v1/a/tgt_cont/tgt_obj', swob.HTTPOk, + {'last-modified': 'Thu, 1 Jan 1970 00:00:01 GMT'}, 'passed') self.app.register( - 'COPY', '/v1/a/tgt_cont/tgt_obj', swob.HTTPCreated, {}, None) + 'PUT', '/v1/a/ver_cont/007tgt_obj/0000000001.00000', swob.HTTPOk, + {}, None) cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) req = Request.blank( '/v1/a/src_cont/src_obj', @@ -418,15 +512,17 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): self.assertEqual(status, '200 OK') self.assertEqual(len(self.authorized), 1) self.assertRequestEqual(req, self.authorized[0]) + self.assertEqual(3, self.app.call_count) def test_copy_new_version_different_account(self): self.app.register( 'COPY', '/v1/src_a/src_cont/src_obj', swob.HTTPOk, {}, 'passed') self.app.register( - 'HEAD', '/v1/tgt_a/tgt_cont/tgt_obj', swob.HTTPOk, - {'last-modified': 'Wed, 19 Nov 2014 18:19:02 GMT'}, 'passed') + 'GET', '/v1/tgt_a/tgt_cont/tgt_obj', swob.HTTPOk, + {'last-modified': 'Thu, 1 Jan 1970 00:00:01 GMT'}, 'passed') self.app.register( - 'COPY', '/v1/tgt_a/tgt_cont/tgt_obj', swob.HTTPCreated, {}, None) + 'PUT', '/v1/tgt_a/ver_cont/007tgt_obj/0000000001.00000', + swob.HTTPOk, {}, None) cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) req = Request.blank( '/v1/src_a/src_cont/src_obj', @@ -438,6 +534,7 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): self.assertEqual(status, '200 OK') self.assertEqual(len(self.authorized), 1) self.assertRequestEqual(req, self.authorized[0]) + self.assertEqual(3, self.app.call_count) def test_copy_new_version_bogus_account(self): cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) @@ -462,11 +559,14 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): req = Request.blank( '/v1/a/c/o', environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache, - 'CONTENT_LENGTH': '0'}) + 'CONTENT_LENGTH': '0', 'swift.trans_id': 'fake_trans_id'}) status, headers, body = self.call_vw(req) self.assertEqual(status, '200 OK') self.assertEqual(len(self.authorized), 1) self.assertRequestEqual(req, self.authorized[0]) + self.assertEqual(2, self.app.call_count) + self.assertEqual(['VW', None], self.app.swift_sources) + self.assertEqual({'fake_trans_id'}, set(self.app.txn_ids)) prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' self.assertEqual(self.app.calls, [ @@ -475,8 +575,6 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): ]) def test_delete_latest_version_success(self): - self.app.register( - 'DELETE', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') self.app.register( 'GET', '/v1/a/ver_cont?format=json&prefix=001o/&marker=&reverse=on', @@ -492,8 +590,10 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): '"name": "001o/1", ' '"content_type": "text/plain"}]') self.app.register( - 'COPY', '/v1/a/ver_cont/001o/2', swob.HTTPCreated, - {}, None) + 'GET', '/v1/a/ver_cont/001o/2', swob.HTTPCreated, + {'content-length': '3'}, None) + self.app.register( + 'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, None) self.app.register( 'DELETE', '/v1/a/ver_cont/001o/2', swob.HTTPOk, {}, None) @@ -503,11 +603,14 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): '/v1/a/c/o', headers={'X-If-Delete-At': 1}, environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache, - 'CONTENT_LENGTH': '0'}) + 'CONTENT_LENGTH': '0', 'swift.trans_id': 'fake_trans_id'}) status, headers, body = self.call_vw(req) self.assertEqual(status, '200 OK') self.assertEqual(len(self.authorized), 1) self.assertRequestEqual(req, self.authorized[0]) + self.assertEqual(4, self.app.call_count) + self.assertEqual(['VW', 'VW', 'VW', 'VW'], self.app.swift_sources) + self.assertEqual({'fake_trans_id'}, set(self.app.txn_ids)) # check that X-If-Delete-At was removed from DELETE request req_headers = self.app.headers[-1] @@ -516,7 +619,8 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' self.assertEqual(self.app.calls, [ ('GET', prefix_listing_prefix + 'marker=&reverse=on'), - ('COPY', '/v1/a/ver_cont/001o/2'), + ('GET', '/v1/a/ver_cont/001o/2'), + ('PUT', '/v1/a/c/o'), ('DELETE', '/v1/a/ver_cont/001o/2'), ]) @@ -535,8 +639,10 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): '"name": "001o/1", ' '"content_type": "text/plain"}]') self.app.register( - 'COPY', '/v1/a/ver_cont/001o/1', swob.HTTPCreated, - {}, None) + 'GET', '/v1/a/ver_cont/001o/1', swob.HTTPOk, + {'content-length': '3'}, None) + self.app.register( + 'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, None) self.app.register( 'DELETE', '/v1/a/ver_cont/001o/1', swob.HTTPOk, {}, None) @@ -554,7 +660,8 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' self.assertEqual(self.app.calls, [ ('GET', prefix_listing_prefix + 'marker=&reverse=on'), - ('COPY', '/v1/a/ver_cont/001o/1'), + ('GET', '/v1/a/ver_cont/001o/1'), + ('PUT', '/v1/a/c/o'), ('DELETE', '/v1/a/ver_cont/001o/1'), ]) @@ -576,11 +683,13 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): # expired object self.app.register( - 'COPY', '/v1/a/ver_cont/001o/2', swob.HTTPNotFound, + 'GET', '/v1/a/ver_cont/001o/2', swob.HTTPNotFound, {}, None) self.app.register( - 'COPY', '/v1/a/ver_cont/001o/1', swob.HTTPCreated, - {}, None) + 'GET', '/v1/a/ver_cont/001o/1', swob.HTTPCreated, + {'content-length': '3'}, None) + self.app.register( + 'PUT', '/v1/a/c/o', swob.HTTPOk, {}, None) self.app.register( 'DELETE', '/v1/a/ver_cont/001o/1', swob.HTTPOk, {}, None) @@ -594,19 +703,19 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): self.assertEqual(status, '200 OK') self.assertEqual(len(self.authorized), 1) self.assertRequestEqual(req, self.authorized[0]) + self.assertEqual(5, self.app.call_count) prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' self.assertEqual(self.app.calls, [ ('GET', prefix_listing_prefix + 'marker=&reverse=on'), - ('COPY', '/v1/a/ver_cont/001o/2'), - ('COPY', '/v1/a/ver_cont/001o/1'), + ('GET', '/v1/a/ver_cont/001o/2'), + ('GET', '/v1/a/ver_cont/001o/1'), + ('PUT', '/v1/a/c/o'), ('DELETE', '/v1/a/ver_cont/001o/1'), ]) def test_denied_DELETE_of_versioned_object(self): authorize_call = [] - self.app.register( - 'DELETE', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') self.app.register( 'GET', '/v1/a/ver_cont?format=json&prefix=001o/&marker=&reverse=on', @@ -621,11 +730,9 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): '"bytes": 3, ' '"name": "001o/1", ' '"content_type": "text/plain"}]') - self.app.register( - 'DELETE', '/v1/a/c/o', swob.HTTPForbidden, - {}, None) def fake_authorize(req): + # the container GET is pre-auth'd so here we deny the object DELETE authorize_call.append(req) return swob.HTTPForbidden() @@ -669,8 +776,10 @@ class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase): '&marker=001o/2', swob.HTTPNotFound, {}, None) self.app.register( - 'COPY', '/v1/a/ver_cont/001o/2', swob.HTTPCreated, - {}, None) + 'GET', '/v1/a/ver_cont/001o/2', swob.HTTPCreated, + {'content-length': '3'}, None) + self.app.register( + 'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, None) self.app.register( 'DELETE', '/v1/a/ver_cont/001o/2', swob.HTTPOk, {}, None) @@ -680,11 +789,15 @@ class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase): '/v1/a/c/o', headers={'X-If-Delete-At': 1}, environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache, - 'CONTENT_LENGTH': '0'}) + 'CONTENT_LENGTH': '0', 'swift.trans_id': 'fake_trans_id'}) status, headers, body = self.call_vw(req) self.assertEqual(status, '200 OK') self.assertEqual(len(self.authorized), 1) self.assertRequestEqual(req, self.authorized[0]) + self.assertEqual(5, self.app.call_count) + self.assertEqual(['VW', 'VW', 'VW', 'VW', 'VW'], + self.app.swift_sources) + self.assertEqual({'fake_trans_id'}, set(self.app.txn_ids)) # check that X-If-Delete-At was removed from DELETE request req_headers = self.app.headers[-1] @@ -694,7 +807,8 @@ class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase): self.assertEqual(self.app.calls, [ ('GET', prefix_listing_prefix + 'marker=&reverse=on'), ('GET', prefix_listing_prefix + 'marker=001o/2'), - ('COPY', '/v1/a/ver_cont/001o/2'), + ('GET', '/v1/a/ver_cont/001o/2'), + ('PUT', '/v1/a/c/o'), ('DELETE', '/v1/a/ver_cont/001o/2'), ]) @@ -720,11 +834,13 @@ class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase): # expired object self.app.register( - 'COPY', '/v1/a/ver_cont/001o/2', swob.HTTPNotFound, + 'GET', '/v1/a/ver_cont/001o/2', swob.HTTPNotFound, {}, None) self.app.register( - 'COPY', '/v1/a/ver_cont/001o/1', swob.HTTPCreated, - {}, None) + 'GET', '/v1/a/ver_cont/001o/1', swob.HTTPCreated, + {'content-length': '3'}, None) + self.app.register( + 'PUT', '/v1/a/c/o', swob.HTTPOk, {}, None) self.app.register( 'DELETE', '/v1/a/ver_cont/001o/1', swob.HTTPOk, {}, None) @@ -738,13 +854,15 @@ class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase): self.assertEqual(status, '200 OK') self.assertEqual(len(self.authorized), 1) self.assertRequestEqual(req, self.authorized[0]) + self.assertEqual(6, self.app.call_count) prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' self.assertEqual(self.app.calls, [ ('GET', prefix_listing_prefix + 'marker=&reverse=on'), ('GET', prefix_listing_prefix + 'marker=001o/2'), - ('COPY', '/v1/a/ver_cont/001o/2'), - ('COPY', '/v1/a/ver_cont/001o/1'), + ('GET', '/v1/a/ver_cont/001o/2'), + ('GET', '/v1/a/ver_cont/001o/1'), + ('PUT', '/v1/a/c/o'), ('DELETE', '/v1/a/ver_cont/001o/1'), ]) @@ -810,13 +928,13 @@ class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase): swob.HTTPOk, {}, json.dumps(list(reversed(old_versions[2:])))) # but all objects are already gone self.app.register( - 'COPY', '/v1/a/ver_cont/001o/4', swob.HTTPNotFound, + 'GET', '/v1/a/ver_cont/001o/4', swob.HTTPNotFound, {}, None) self.app.register( - 'COPY', '/v1/a/ver_cont/001o/3', swob.HTTPNotFound, + 'GET', '/v1/a/ver_cont/001o/3', swob.HTTPNotFound, {}, None) self.app.register( - 'COPY', '/v1/a/ver_cont/001o/2', swob.HTTPNotFound, + 'GET', '/v1/a/ver_cont/001o/2', swob.HTTPNotFound, {}, None) # second container server can't reverse @@ -839,8 +957,10 @@ class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase): 'marker=001o/1&end_marker=001o/2', swob.HTTPOk, {}, '[]') self.app.register( - 'COPY', '/v1/a/ver_cont/001o/1', swob.HTTPOk, - {}, None) + 'GET', '/v1/a/ver_cont/001o/1', swob.HTTPOk, + {'content-length': '3'}, None) + self.app.register( + 'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, None) self.app.register( 'DELETE', '/v1/a/ver_cont/001o/1', swob.HTTPNoContent, {}, None) @@ -855,14 +975,15 @@ class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase): prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' self.assertEqual(self.app.calls, [ ('GET', prefix_listing_prefix + 'marker=&reverse=on'), - ('COPY', '/v1/a/ver_cont/001o/4'), - ('COPY', '/v1/a/ver_cont/001o/3'), - ('COPY', '/v1/a/ver_cont/001o/2'), + ('GET', '/v1/a/ver_cont/001o/4'), + ('GET', '/v1/a/ver_cont/001o/3'), + ('GET', '/v1/a/ver_cont/001o/2'), ('GET', prefix_listing_prefix + 'marker=001o/2&reverse=on'), ('GET', prefix_listing_prefix + 'marker=&end_marker=001o/2'), ('GET', prefix_listing_prefix + 'marker=001o/0&end_marker=001o/2'), ('GET', prefix_listing_prefix + 'marker=001o/1&end_marker=001o/2'), - ('COPY', '/v1/a/ver_cont/001o/1'), + ('GET', '/v1/a/ver_cont/001o/1'), + ('PUT', '/v1/a/c/o'), ('DELETE', '/v1/a/ver_cont/001o/1'), ]) @@ -882,10 +1003,10 @@ class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase): swob.HTTPOk, {}, json.dumps(list(reversed(old_versions[-2:])))) # but both objects are already gone self.app.register( - 'COPY', '/v1/a/ver_cont/001o/4', swob.HTTPNotFound, + 'GET', '/v1/a/ver_cont/001o/4', swob.HTTPNotFound, {}, None) self.app.register( - 'COPY', '/v1/a/ver_cont/001o/3', swob.HTTPNotFound, + 'GET', '/v1/a/ver_cont/001o/3', swob.HTTPNotFound, {}, None) # second container server can't reverse @@ -908,8 +1029,10 @@ class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase): 'marker=001o/2&end_marker=001o/3', swob.HTTPOk, {}, '[]') self.app.register( - 'COPY', '/v1/a/ver_cont/001o/2', swob.HTTPOk, - {}, None) + 'GET', '/v1/a/ver_cont/001o/2', swob.HTTPOk, + {'content-length': '3'}, None) + self.app.register( + 'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, None) self.app.register( 'DELETE', '/v1/a/ver_cont/001o/2', swob.HTTPNoContent, {}, None) @@ -924,12 +1047,13 @@ class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase): prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' self.assertEqual(self.app.calls, [ ('GET', prefix_listing_prefix + 'marker=&reverse=on'), - ('COPY', '/v1/a/ver_cont/001o/4'), - ('COPY', '/v1/a/ver_cont/001o/3'), + ('GET', '/v1/a/ver_cont/001o/4'), + ('GET', '/v1/a/ver_cont/001o/3'), ('GET', prefix_listing_prefix + 'marker=001o/3&reverse=on'), ('GET', prefix_listing_prefix + 'marker=&end_marker=001o/3'), ('GET', prefix_listing_prefix + 'marker=001o/1&end_marker=001o/3'), ('GET', prefix_listing_prefix + 'marker=001o/2&end_marker=001o/3'), - ('COPY', '/v1/a/ver_cont/001o/2'), + ('GET', '/v1/a/ver_cont/001o/2'), + ('PUT', '/v1/a/c/o'), ('DELETE', '/v1/a/ver_cont/001o/2'), ]) From 22933f5b55d3b8ba13b19c7004d24dfd28378f32 Mon Sep 17 00:00:00 2001 From: Daisuke Morita Date: Mon, 22 Feb 2016 18:03:48 -0800 Subject: [PATCH 084/141] Fix bug expirer unexpectedly deletes object created after x-delete-at As reported at bug/1546067, expirer might accidentally deletes an object which is created after x-delete-at timestamp. This is because expirer sends a request with "X-Timestamp: " and tombstone is named as .ts so if object creation time is between x-delete-at and expirer's DELETE request x-timestamp, the object might be hidden by tombstone. This possibility can be simply removed if the value of x-timestamp which an expirer sends is the same timestamp as x-delete-at of an actual object. Namely, expirer pretends to delete an object at the time an user really wants to delete it. Change-Id: I53e343f4e73b0b1c4ced9a3bc054541473d26cf8 Closes-Bug: #1546067 --- swift/obj/expirer.py | 3 +- test/probe/test_object_expirer.py | 46 +++++++++++++++++++++++++++++++ test/unit/obj/test_expirer.py | 4 +++ 3 files changed, 52 insertions(+), 1 deletion(-) diff --git a/swift/obj/expirer.py b/swift/obj/expirer.py index 7f26f129c3..e6f7de8241 100644 --- a/swift/obj/expirer.py +++ b/swift/obj/expirer.py @@ -293,5 +293,6 @@ class ObjectExpirer(Daemon): """ path = '/v1/' + urllib.parse.quote(actual_obj.lstrip('/')) self.swift.make_request('DELETE', path, - {'X-If-Delete-At': str(timestamp)}, + {'X-If-Delete-At': str(timestamp), + 'X-Timestamp': str(timestamp)}, (2, HTTP_PRECONDITION_FAILED)) diff --git a/test/probe/test_object_expirer.py b/test/probe/test_object_expirer.py index 3f8f39deed..3cfee3656b 100644 --- a/test/probe/test_object_expirer.py +++ b/test/probe/test_object_expirer.py @@ -13,6 +13,7 @@ # limitations under the License. import random +import time import uuid import unittest @@ -126,5 +127,50 @@ class TestObjectExpirer(ReplProbeTest): self.assertTrue(Timestamp(metadata['x-backend-timestamp']) > create_timestamp) + def test_expirer_object_should_not_be_expired(self): + obj_brain = BrainSplitter(self.url, self.token, self.container_name, + self.object_name, 'object', self.policy) + + # T(obj_created) < T(obj_deleted with x-delete-at) < T(obj_recreated) + # < T(expirer_executed) + # Recreated obj should be appeared in any split brain case + + # T(obj_created) + first_created_at = time.time() + # T(obj_deleted with x-delete-at) + # object-server accepts req only if X-Delete-At is later than 'now' + delete_at = int(time.time() + 1.5) + # T(obj_recreated) + recreated_at = time.time() + 2.0 + # T(expirer_executed) - 'now' + sleep_for_expirer = 2.01 + + obj_brain.put_container(int(self.policy)) + obj_brain.put_object( + headers={'X-Delete-At': delete_at, + 'X-Timestamp': Timestamp(first_created_at).internal}) + + # some object servers stopped + obj_brain.stop_primary_half() + obj_brain.put_object( + headers={'X-Timestamp': Timestamp(recreated_at).internal, + 'X-Object-Meta-Expired': 'False'}) + + # make sure auto-created containers get in the account listing + Manager(['container-updater']).once() + # some object servers recovered + obj_brain.start_primary_half() + # sleep to make sure expirer runs at the time after obj is recreated + time.sleep(sleep_for_expirer) + self.expirer.once() + # inconsistent state of objects is recovered + Manager(['object-replicator']).once() + + # check if you can get recreated object + metadata = self.client.get_object_metadata( + self.account, self.container_name, self.object_name) + self.assertIn('x-object-meta-expired', metadata) + + if __name__ == "__main__": unittest.main() diff --git a/test/unit/obj/test_expirer.py b/test/unit/obj/test_expirer.py index 17fddf174d..02a04dda01 100644 --- a/test/unit/obj/test_expirer.py +++ b/test/unit/obj/test_expirer.py @@ -675,6 +675,8 @@ class TestObjectExpirer(TestCase): ts = '1234' x.delete_actual_object('/path/to/object', ts) self.assertEqual(got_env[0]['HTTP_X_IF_DELETE_AT'], ts) + self.assertEqual(got_env[0]['HTTP_X_TIMESTAMP'], + got_env[0]['HTTP_X_IF_DELETE_AT']) def test_delete_actual_object_nourlquoting(self): # delete_actual_object should not do its own url quoting because @@ -692,6 +694,8 @@ class TestObjectExpirer(TestCase): ts = '1234' x.delete_actual_object('/path/to/object name', ts) self.assertEqual(got_env[0]['HTTP_X_IF_DELETE_AT'], ts) + self.assertEqual(got_env[0]['HTTP_X_TIMESTAMP'], + got_env[0]['HTTP_X_IF_DELETE_AT']) self.assertEqual(got_env[0]['PATH_INFO'], '/v1/path/to/object name') def test_delete_actual_object_raises_404(self): From 0e7fca576cee81dd6ca8774760cb880c3fff9c1c Mon Sep 17 00:00:00 2001 From: Tin Lam Date: Fri, 8 Apr 2016 18:17:44 -0500 Subject: [PATCH 085/141] Convert README.md to README.rst Change-Id: I223890bd4ffe469becc2127f9362243cdb52bc08 Closes-Bug: #1567026 --- MANIFEST.in | 2 +- README.md | 86 -------------------------------------------- README.rst | 100 ++++++++++++++++++++++++++++++++++++++++++++++++++++ setup.cfg | 2 +- 4 files changed, 102 insertions(+), 88 deletions(-) delete mode 100644 README.md create mode 100644 README.rst diff --git a/MANIFEST.in b/MANIFEST.in index bcbc015d33..87eb0c9c96 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,5 +1,5 @@ include AUTHORS LICENSE .functests .unittests .probetests test/__init__.py -include CHANGELOG CONTRIBUTING.rst README.md +include CHANGELOG CONTRIBUTING.rst README.rst include babel.cfg include test/sample.conf include tox.ini diff --git a/README.md b/README.md deleted file mode 100644 index 8a95684f3a..0000000000 --- a/README.md +++ /dev/null @@ -1,86 +0,0 @@ -# Swift - -A distributed object storage system designed to scale from a single machine -to thousands of servers. Swift is optimized for multi-tenancy and high -concurrency. Swift is ideal for backups, web and mobile content, and any other -unstructured data that can grow without bound. - -Swift provides a simple, REST-based API fully documented at -http://docs.openstack.org/. - -Swift was originally developed as the basis for Rackspace's Cloud Files and -was open-sourced in 2010 as part of the OpenStack project. It has since grown -to include contributions from many companies and has spawned a thriving -ecosystem of 3rd party tools. Swift's contributors are listed in the AUTHORS -file. - -## Docs - -To build documentation install sphinx (`pip install sphinx`), run -`python setup.py build_sphinx`, and then browse to /doc/build/html/index.html. -These docs are auto-generated after every commit and available online at -http://docs.openstack.org/developer/swift/. - -## For Developers - -The best place to get started is the ["SAIO - Swift All In One"](http://docs.openstack.org/developer/swift/development_saio.html). -This document will walk you through setting up a development cluster of Swift -in a VM. The SAIO environment is ideal for running small-scale tests against -swift and trying out new features and bug fixes. - -You can run unit tests with `.unittests` and functional tests with -`.functests`. - -If you would like to start contributing, check out these [notes](CONTRIBUTING.md) -to help you get started. - -### Code Organization - - * bin/: Executable scripts that are the processes run by the deployer - * doc/: Documentation - * etc/: Sample config files - * swift/: Core code - * account/: account server - * common/: code shared by different modules - * middleware/: "standard", officially-supported middleware - * ring/: code implementing Swift's ring - * container/: container server - * obj/: object server - * proxy/: proxy server - * test/: Unit and functional tests - -### Data Flow - -Swift is a WSGI application and uses eventlet's WSGI server. After the -processes are running, the entry point for new requests is the `Application` -class in `swift/proxy/server.py`. From there, a controller is chosen, and the -request is processed. The proxy may choose to forward the request to a back- -end server. For example, the entry point for requests to the object server is -the `ObjectController` class in `swift/obj/server.py`. - - -## For Deployers - -Deployer docs are also available at -http://docs.openstack.org/developer/swift/. A good starting point is at -http://docs.openstack.org/developer/swift/deployment_guide.html - -You can run functional tests against a swift cluster with `.functests`. These -functional tests require `/etc/swift/test.conf` to run. A sample config file -can be found in this source tree in `test/sample.conf`. - -## For Client Apps - -For client applications, official Python language bindings are provided at -http://github.com/openstack/python-swiftclient. - -Complete API documentation at -http://docs.openstack.org/api/openstack-object-storage/1.0/content/ - ----- - -For more information come hang out in #openstack-swift on freenode. - -Thanks, - -The Swift Development Team diff --git a/README.rst b/README.rst new file mode 100644 index 0000000000..aba20cb903 --- /dev/null +++ b/README.rst @@ -0,0 +1,100 @@ +Swift +===== + +A distributed object storage system designed to scale from a single +machine to thousands of servers. Swift is optimized for multi-tenancy +and high concurrency. Swift is ideal for backups, web and mobile +content, and any other unstructured data that can grow without bound. + +Swift provides a simple, REST-based API fully documented at +http://docs.openstack.org/. + +Swift was originally developed as the basis for Rackspace's Cloud Files +and was open-sourced in 2010 as part of the OpenStack project. It has +since grown to include contributions from many companies and has spawned +a thriving ecosystem of 3rd party tools. Swift's contributors are listed +in the AUTHORS file. + +Docs +---- + +To build documentation install sphinx (``pip install sphinx``), run +``python setup.py build_sphinx``, and then browse to +/doc/build/html/index.html. These docs are auto-generated after every +commit and available online at +http://docs.openstack.org/developer/swift/. + +For Developers +-------------- + +The best place to get started is the `"SAIO - Swift All In +One" `__. +This document will walk you through setting up a development cluster of +Swift in a VM. The SAIO environment is ideal for running small-scale +tests against swift and trying out new features and bug fixes. + +You can run unit tests with ``.unittests`` and functional tests with +``.functests``. + +If you would like to start contributing, check out these +`notes `__ to help you get started. + +Code Organization +~~~~~~~~~~~~~~~~~ + +- bin/: Executable scripts that are the processes run by the deployer +- doc/: Documentation +- etc/: Sample config files +- swift/: Core code + + - account/: account server + - common/: code shared by different modules + + - middleware/: "standard", officially-supported middleware + - ring/: code implementing Swift's ring + + - container/: container server + - obj/: object server + - proxy/: proxy server + +- test/: Unit and functional tests + +Data Flow +~~~~~~~~~ + +Swift is a WSGI application and uses eventlet's WSGI server. After the +processes are running, the entry point for new requests is the +``Application`` class in ``swift/proxy/server.py``. From there, a +controller is chosen, and the request is processed. The proxy may choose +to forward the request to a back- end server. For example, the entry +point for requests to the object server is the ``ObjectController`` +class in ``swift/obj/server.py``. + +For Deployers +------------- + +Deployer docs are also available at +http://docs.openstack.org/developer/swift/. A good starting point is at +http://docs.openstack.org/developer/swift/deployment\_guide.html + +You can run functional tests against a swift cluster with +``.functests``. These functional tests require ``/etc/swift/test.conf`` +to run. A sample config file can be found in this source tree in +``test/sample.conf``. + +For Client Apps +--------------- + +For client applications, official Python language bindings are provided +at http://github.com/openstack/python-swiftclient. + +Complete API documentation at +http://docs.openstack.org/api/openstack-object-storage/1.0/content/ + +-------------- + +For more information come hang out in #openstack-swift on freenode. + +Thanks, + +The Swift Development Team diff --git a/setup.cfg b/setup.cfg index a819a57f02..77c6824b44 100644 --- a/setup.cfg +++ b/setup.cfg @@ -2,7 +2,7 @@ name = swift summary = OpenStack Object Storage description-file = - README.md + README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org home-page = http://www.openstack.org/ From 91f980314f305c536e6e7c8607b8b52693c865f5 Mon Sep 17 00:00:00 2001 From: John Dickinson Date: Wed, 13 Apr 2016 11:57:45 -0500 Subject: [PATCH 086/141] fix fallocate_reserve traceback Previously, fallocate_reserve could result in a traceback. The OSError being raised didn't have the proper errno set. This patch sets the errno to ENOSPC. Change-Id: I017b0584972ca8832f3b160bbcdff335ae9a1aa6 --- swift/common/utils.py | 6 ++++-- test/unit/common/test_utils.py | 24 ++++++++++++++++++------ 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/swift/common/utils.py b/swift/common/utils.py index 68c6025f29..fb4baa4396 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -597,8 +597,10 @@ class FallocateWrapper(object): st = os.fstatvfs(fd) free = st.f_frsize * st.f_bavail - length.value if free <= FALLOCATE_RESERVE: - raise OSError('FALLOCATE_RESERVE fail %s <= %s' % ( - free, FALLOCATE_RESERVE)) + raise OSError( + errno.ENOSPC, + 'FALLOCATE_RESERVE fail %s <= %s' % (free, + FALLOCATE_RESERVE)) args = { 'fallocate': (fd, mode, offset, length), 'posix_fallocate': (fd, offset, length) diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index c2a9e93cc3..626043de3d 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -2611,7 +2611,9 @@ cluster_dfw1 = http://dfw1.host/v1/ fallocate(0, 1, 0, ctypes.c_uint64(0)) except OSError as err: exc = err - self.assertEqual(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 1024') + self.assertEqual(str(exc), + '[Errno 28] FALLOCATE_RESERVE fail 1024 <= 1024') + self.assertEqual(err.errno, errno.ENOSPC) # Want 1024 reserved, have 512 * 2 free, so fails utils.FALLOCATE_RESERVE = 1024 StatVFS.f_frsize = 512 @@ -2621,7 +2623,9 @@ cluster_dfw1 = http://dfw1.host/v1/ fallocate(0, 1, 0, ctypes.c_uint64(0)) except OSError as err: exc = err - self.assertEqual(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 1024') + self.assertEqual(str(exc), + '[Errno 28] FALLOCATE_RESERVE fail 1024 <= 1024') + self.assertEqual(err.errno, errno.ENOSPC) # Want 2048 reserved, have 1024 * 1 free, so fails utils.FALLOCATE_RESERVE = 2048 StatVFS.f_frsize = 1024 @@ -2631,7 +2635,9 @@ cluster_dfw1 = http://dfw1.host/v1/ fallocate(0, 1, 0, ctypes.c_uint64(0)) except OSError as err: exc = err - self.assertEqual(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 2048') + self.assertEqual(str(exc), + '[Errno 28] FALLOCATE_RESERVE fail 1024 <= 2048') + self.assertEqual(err.errno, errno.ENOSPC) # Want 2048 reserved, have 512 * 2 free, so fails utils.FALLOCATE_RESERVE = 2048 StatVFS.f_frsize = 512 @@ -2641,7 +2647,9 @@ cluster_dfw1 = http://dfw1.host/v1/ fallocate(0, 1, 0, ctypes.c_uint64(0)) except OSError as err: exc = err - self.assertEqual(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 2048') + self.assertEqual(str(exc), + '[Errno 28] FALLOCATE_RESERVE fail 1024 <= 2048') + self.assertEqual(err.errno, errno.ENOSPC) # Want 1023 reserved, have 1024 * 1 free, but file size is 1, so # fails utils.FALLOCATE_RESERVE = 1023 @@ -2652,7 +2660,9 @@ cluster_dfw1 = http://dfw1.host/v1/ fallocate(0, 1, 0, ctypes.c_uint64(1)) except OSError as err: exc = err - self.assertEqual(str(exc), 'FALLOCATE_RESERVE fail 1023 <= 1023') + self.assertEqual(str(exc), + '[Errno 28] FALLOCATE_RESERVE fail 1023 <= 1023') + self.assertEqual(err.errno, errno.ENOSPC) # Want 1022 reserved, have 1024 * 1 free, and file size is 1, so # succeeds utils.FALLOCATE_RESERVE = 1022 @@ -2675,7 +2685,9 @@ cluster_dfw1 = http://dfw1.host/v1/ fallocate(0, 1, 0, ctypes.c_uint64(0)) except OSError as err: exc = err - self.assertEqual(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 1024') + self.assertEqual(str(exc), + '[Errno 28] FALLOCATE_RESERVE fail 1024 <= 1024') + self.assertEqual(err.errno, errno.ENOSPC) finally: utils.FALLOCATE_RESERVE = orig_FALLOCATE_RESERVE utils.os.fstatvfs = orig_fstatvfs From fb3692c9bb662d9cadc4238920f86676857a7f1f Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Wed, 13 Apr 2016 11:07:44 -0700 Subject: [PATCH 087/141] Don't include conditional headers in SLO HEAD requests Previously, attempting to PUT a SLO manifest with `If-None-Match: *` would include the header when validating the segments, causing the upload to fail. Now when SLO validates segments, no conditional headers will be included in the HEAD request. Change-Id: I03ad454092d3caa73d29e6d30d8033b45bc96136 Closes-Bug: #1569253 --- swift/common/middleware/slo.py | 22 ++++++++-------------- test/functional/tests.py | 21 +++++++++++++++++++++ test/unit/common/middleware/helpers.py | 4 ++-- test/unit/common/middleware/test_slo.py | 20 ++++++++++++++++++++ 4 files changed, 51 insertions(+), 16 deletions(-) diff --git a/swift/common/middleware/slo.py b/swift/common/middleware/slo.py index a3291dd7fb..0216264b99 100644 --- a/swift/common/middleware/slo.py +++ b/swift/common/middleware/slo.py @@ -399,7 +399,7 @@ class SloGetContext(WSGIContext): req.environ, path='/'.join(['', version, acc, con, obj]), method='GET', headers={'x-auth-token': req.headers.get('x-auth-token')}, - agent=('%(orig)s ' + 'SLO MultipartGET'), swift_source='SLO') + agent='%(orig)s SLO MultipartGET', swift_source='SLO') sub_resp = sub_req.get_response(self.slo.app) if not is_success(sub_resp.status_int): @@ -603,7 +603,7 @@ class SloGetContext(WSGIContext): get_req = make_subrequest( req.environ, method='GET', headers={'x-auth-token': req.headers.get('x-auth-token')}, - agent=('%(orig)s ' + 'SLO MultipartGET'), swift_source='SLO') + agent='%(orig)s SLO MultipartGET', swift_source='SLO') resp_iter = self._app_call(get_req.environ) # Any Content-Range from a manifest is almost certainly wrong for the @@ -857,20 +857,14 @@ class StaticLargeObject(object): obj_name = obj_name.encode('utf-8') obj_path = '/'.join(['', vrs, account, obj_name.lstrip('/')]) - new_env = req.environ.copy() - new_env['PATH_INFO'] = obj_path - new_env['REQUEST_METHOD'] = 'HEAD' - new_env['swift.source'] = 'SLO' - del(new_env['wsgi.input']) - del(new_env['QUERY_STRING']) - new_env['CONTENT_LENGTH'] = 0 - new_env['HTTP_USER_AGENT'] = \ - '%s MultipartPUT' % req.environ.get('HTTP_USER_AGENT') - if obj_path != last_obj_path: last_obj_path = obj_path - head_seg_resp = \ - Request.blank(obj_path, new_env).get_response(self) + sub_req = make_subrequest( + req.environ, path=obj_path + '?', # kill the query string + method='HEAD', + headers={'x-auth-token': req.headers.get('x-auth-token')}, + agent='%(orig)s SLO MultipartPUT', swift_source='SLO') + head_seg_resp = sub_req.get_response(self) if head_seg_resp.is_success: segment_length = head_seg_resp.content_length diff --git a/test/functional/tests.py b/test/functional/tests.py index 35339e68ec..f7430a69b6 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -3307,6 +3307,27 @@ class TestSlo(Base): manifest.read(hdrs={'If-Match': etag}) self.assert_status(200) + def test_slo_if_none_match_put(self): + file_item = self.env.container.file("manifest-if-none-match") + manifest = json.dumps([{ + 'size_bytes': 1024 * 1024, + 'etag': None, + 'path': '/%s/%s' % (self.env.container.name, 'seg_a')}]) + + self.assertRaises(ResponseError, file_item.write, manifest, + parms={'multipart-manifest': 'put'}, + hdrs={'If-None-Match': '"not-star"'}) + self.assert_status(400) + + file_item.write(manifest, parms={'multipart-manifest': 'put'}, + hdrs={'If-None-Match': '*'}) + self.assert_status(201) + + self.assertRaises(ResponseError, file_item.write, manifest, + parms={'multipart-manifest': 'put'}, + hdrs={'If-None-Match': '*'}) + self.assert_status(412) + def test_slo_if_none_match_get(self): manifest = self.env.container.file("manifest-abcde") etag = manifest.info()['etag'] diff --git a/test/unit/common/middleware/helpers.py b/test/unit/common/middleware/helpers.py index 0847a1cbcf..2c5328ba26 100644 --- a/test/unit/common/middleware/helpers.py +++ b/test/unit/common/middleware/helpers.py @@ -119,10 +119,10 @@ class FakeSwift(object): if "CONTENT_TYPE" in env: self.uploaded[path][0]['Content-Type'] = env["CONTENT_TYPE"] - # range requests ought to work, hence conditional_response=True + # range requests ought to work, which require conditional_response=True req = swob.Request(env) resp = resp_class(req=req, headers=headers, body=body, - conditional_response=True) + conditional_response=req.method in ('GET', 'HEAD')) wsgi_iter = resp(env, start_response) self.mark_opened(path) return LeakTrackingIter(wsgi_iter, self, path) diff --git a/test/unit/common/middleware/test_slo.py b/test/unit/common/middleware/test_slo.py index 830892a26c..03f5c23213 100644 --- a/test/unit/common/middleware/test_slo.py +++ b/test/unit/common/middleware/test_slo.py @@ -621,6 +621,26 @@ class TestSloPutManifest(SloTestCase): self.assertEqual(400, catcher.exception.status_int) self.assertIn("Unsatisfiable Range", catcher.exception.body) + def test_handle_multipart_put_success_conditional(self): + test_json_data = json.dumps([{'path': u'/cont/object', + 'etag': 'etagoftheobjectsegment', + 'size_bytes': 100}]) + req = Request.blank( + '/v1/AUTH_test/c/man?multipart-manifest=put', + environ={'REQUEST_METHOD': 'PUT'}, headers={'If-None-Match': '*'}, + body=test_json_data) + status, headers, body = self.call_slo(req) + self.assertEqual(('201 Created', ''), (status, body)) + self.assertEqual([ + ('HEAD', '/v1/AUTH_test/cont/object'), + ('PUT', '/v1/AUTH_test/c/man?multipart-manifest=put'), + ], self.app.calls) + # HEAD shouldn't be conditional + self.assertNotIn('If-None-Match', self.app.headers[0]) + # But the PUT should be + self.assertIn('If-None-Match', self.app.headers[1]) + self.assertEqual('*', self.app.headers[1]['If-None-Match']) + def test_handle_single_ranges(self): good_data = json.dumps( [{'path': '/checktest/a_1', 'etag': None, From be84b03a07892f4972dd59309ad261fc72bd7ede Mon Sep 17 00:00:00 2001 From: dharmendra Date: Thu, 14 Apr 2016 16:41:09 +0530 Subject: [PATCH 088/141] Removing unused clause Removing unused clause from common/middleware/dlo.py Change-Id: I7de9aaefd203c4f1be00ee74b89b5184fd419469 --- swift/common/middleware/dlo.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/swift/common/middleware/dlo.py b/swift/common/middleware/dlo.py index 80959f1149..2fd37c3d29 100644 --- a/swift/common/middleware/dlo.py +++ b/swift/common/middleware/dlo.py @@ -344,12 +344,11 @@ class GetContext(WSGIContext): close_if_possible(resp_iter) response = self.get_or_head_response(req, value) return response(req.environ, start_response) - else: - # Not a dynamic large object manifest; just pass it through. - start_response(self._response_status, - self._response_headers, - self._response_exc_info) - return resp_iter + # Not a dynamic large object manifest; just pass it through. + start_response(self._response_status, + self._response_headers, + self._response_exc_info) + return resp_iter class DynamicLargeObject(object): From c96d5c671db9c96f65067d93c0a307cf4b7d91b4 Mon Sep 17 00:00:00 2001 From: oshritf Date: Thu, 18 Feb 2016 14:50:08 +0200 Subject: [PATCH 089/141] Per container stat. report In addition to the container sync stat. report, keeping per container statistics allows administrator with more control over bytes transfered over a specific time per user account: The per container stats are crucial for billing purposes and provides the operator a 'progress bar' equivalent on the container's replication status. Change-Id: Ia8abcdaf53e466e8d60a957c76e32c2b2c5dc3fa --- doc/source/overview_container_sync.rst | 44 ++++++++ swift/container/sync.py | 142 +++++++++++++++++-------- test/unit/container/test_sync.py | 94 ++++++++++++++-- 3 files changed, 226 insertions(+), 54 deletions(-) diff --git a/doc/source/overview_container_sync.rst b/doc/source/overview_container_sync.rst index 25772bdf1e..e69ec2743e 100644 --- a/doc/source/overview_container_sync.rst +++ b/doc/source/overview_container_sync.rst @@ -121,6 +121,50 @@ should be noted there is no way for an end user to detect sync progress or problems other than HEADing both containers and comparing the overall information. + + +----------------------------- +Container Sync Statistics +----------------------------- + +Container Sync INFO level logs contains activity metrics and accounting +information foe insightful tracking. +Currently two different statistics are collected: + +About once an hour or so, accumulated statistics of all operations performed +by Container Sync are reported to the log file with the following format: +"Since (time): (sync) synced [(delete) deletes, (put) puts], (skip) skipped, +(fail) failed" +time: last report time +sync: number of containers with sync turned on that were successfully synced +delete: number of successful DELETE object requests to the target cluster +put: number of successful PUT object request to the target cluster +skip: number of containers whose sync has been turned off, but are not +yet cleared from the sync store +fail: number of containers with failure (due to exception, timeout or other +reason) + +For each container synced, per container statistics are reported with the +following format: +Container sync report: (container), time window start: (start), time window +end: %(end), puts: (puts), posts: (posts), deletes: (deletes), bytes: (bytes), +sync_point1: (point1), sync_point2: (point2), total_rows: (total) +container: account/container statistics are for +start: report start time +end: report end time +puts: number of successful PUT object requests to the target container +posts: N/A (0) +deletes: number of successful DELETE object requests to the target container +bytes: number of bytes sent over the network to the target container +point1: progress indication - the container's x_container_sync_point1 +point2: progress indication - the container's x_container_sync_point2 +total: number of objects processed at the container + +it is possible that more than one server syncs a container, therefore logfiles +from all servers need to be evaluated + + + ---------------------------------------------------------- Using the ``swift`` tool to set up synchronized containers ---------------------------------------------------------- diff --git a/swift/container/sync.py b/swift/container/sync.py index f72bc4838f..2ff4bff5c4 100644 --- a/swift/container/sync.py +++ b/swift/container/sync.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import collections import errno import os import uuid @@ -198,6 +199,14 @@ class ContainerSync(Daemon): self.container_skips = 0 #: Number of containers that had a failure of some type. self.container_failures = 0 + + #: Per container stats. These are collected per container. + #: puts - the number of puts that were done for the container + #: deletes - the number of deletes that were fot the container + #: bytes - the total number of bytes transferred per the container + self.container_stats = collections.defaultdict(int) + self.container_stats.clear() + #: Time of last stats report. self.reported = time() self.swift_dir = conf.get('swift_dir', '/etc/swift') @@ -239,6 +248,7 @@ class ContainerSync(Daemon): while True: begin = time() for path in self.sync_store.synced_containers_generator(): + self.container_stats.clear() self.container_sync(path) if time() - self.reported >= 3600: # once an hour self.report() @@ -282,6 +292,30 @@ class ContainerSync(Daemon): self.container_skips = 0 self.container_failures = 0 + def container_report(self, start, end, sync_point1, sync_point2, info, + max_row): + self.logger.info(_('Container sync report: %(container)s, ' + 'time window start: %(start)s, ' + 'time window end: %(end)s, ' + 'puts: %(puts)s, ' + 'posts: %(posts)s, ' + 'deletes: %(deletes)s, ' + 'bytes: %(bytes)s, ' + 'sync_point1: %(point1)s, ' + 'sync_point2: %(point2)s, ' + 'total_rows: %(total)s'), + {'container': '%s/%s' % (info['account'], + info['container']), + 'start': start, + 'end': end, + 'puts': self.container_stats['puts'], + 'posts': 0, + 'deletes': self.container_stats['deletes'], + 'bytes': self.container_stats['bytes'], + 'point1': sync_point1, + 'point2': sync_point2, + 'total': max_row}) + def container_sync(self, path): """ Checks the given path for a container database, determines if syncing @@ -339,51 +373,68 @@ class ContainerSync(Daemon): self.container_failures += 1 self.logger.increment('failures') return - stop_at = time() + self.container_time + start_at = time() + stop_at = start_at + self.container_time next_sync_point = None - while time() < stop_at and sync_point2 < sync_point1: - rows = broker.get_items_since(sync_point2, 1) - if not rows: - break - row = rows[0] - if row['ROWID'] > sync_point1: - break - # This node will only initially sync out one third of the - # objects (if 3 replicas, 1/4 if 4, etc.) and will skip - # problematic rows as needed in case of faults. - # This section will attempt to sync previously skipped - # rows in case the previous attempts by any of the nodes - # didn't succeed. - if not self.container_sync_row( - row, sync_to, user_key, broker, info, realm, - realm_key): - if not next_sync_point: - next_sync_point = sync_point2 - sync_point2 = row['ROWID'] - broker.set_x_container_sync_points(None, sync_point2) - if next_sync_point: - broker.set_x_container_sync_points(None, next_sync_point) - while time() < stop_at: - rows = broker.get_items_since(sync_point1, 1) - if not rows: - break - row = rows[0] - key = hash_path(info['account'], info['container'], - row['name'], raw_digest=True) - # This node will only initially sync out one third of the - # objects (if 3 replicas, 1/4 if 4, etc.). It'll come back - # around to the section above and attempt to sync - # previously skipped rows in case the other nodes didn't - # succeed or in case it failed to do so the first time. - if unpack_from('>I', key)[0] % \ - len(nodes) == ordinal: - self.container_sync_row( - row, sync_to, user_key, broker, info, realm, - realm_key) - sync_point1 = row['ROWID'] - broker.set_x_container_sync_points(sync_point1, None) - self.container_syncs += 1 - self.logger.increment('syncs') + sync_stage_time = start_at + try: + while time() < stop_at and sync_point2 < sync_point1: + rows = broker.get_items_since(sync_point2, 1) + if not rows: + break + row = rows[0] + if row['ROWID'] > sync_point1: + break + # This node will only initially sync out one third + # of the objects (if 3 replicas, 1/4 if 4, etc.) + # and will skip problematic rows as needed in case of + # faults. + # This section will attempt to sync previously skipped + # rows in case the previous attempts by any of the + # nodes didn't succeed. + if not self.container_sync_row( + row, sync_to, user_key, broker, info, realm, + realm_key): + if not next_sync_point: + next_sync_point = sync_point2 + sync_point2 = row['ROWID'] + broker.set_x_container_sync_points(None, sync_point2) + if next_sync_point: + broker.set_x_container_sync_points(None, + next_sync_point) + else: + next_sync_point = sync_point2 + sync_stage_time = time() + while sync_stage_time < stop_at: + rows = broker.get_items_since(sync_point1, 1) + if not rows: + break + row = rows[0] + key = hash_path(info['account'], info['container'], + row['name'], raw_digest=True) + # This node will only initially sync out one third of + # the objects (if 3 replicas, 1/4 if 4, etc.). + # It'll come back around to the section above + # and attempt to sync previously skipped rows in case + # the other nodes didn't succeed or in case it failed + # to do so the first time. + if unpack_from('>I', key)[0] % \ + len(nodes) == ordinal: + self.container_sync_row( + row, sync_to, user_key, broker, info, realm, + realm_key) + sync_point1 = row['ROWID'] + broker.set_x_container_sync_points(sync_point1, None) + sync_stage_time = time() + self.container_syncs += 1 + self.logger.increment('syncs') + except Exception as ex: + raise ex + finally: + self.container_report(start_at, sync_stage_time, + sync_point1, + next_sync_point, + info, broker.get_max_row()) except (Exception, Timeout): self.container_failures += 1 self.logger.increment('failures') @@ -506,6 +557,7 @@ class ContainerSync(Daemon): if err.http_status != HTTP_NOT_FOUND: raise self.container_deletes += 1 + self.container_stats['deletes'] += 1 self.logger.increment('deletes') self.logger.timing_since('deletes.timing', start_time) else: @@ -556,6 +608,8 @@ class ContainerSync(Daemon): proxy=self.select_http_proxy(), logger=self.logger, timeout=self.conn_timeout) self.container_puts += 1 + self.container_stats['puts'] += 1 + self.container_stats['bytes'] += row['size'] self.logger.increment('puts') self.logger.timing_since('puts.timing', start_time) except ClientException as err: diff --git a/test/unit/container/test_sync.py b/test/unit/container/test_sync.py index ef4a4f5a82..8adc282235 100644 --- a/test/unit/container/test_sync.py +++ b/test/unit/container/test_sync.py @@ -58,6 +58,9 @@ class FakeContainerBroker(object): self.sync_point1 = -1 self.sync_point2 = -1 + def get_max_row(self): + return 1 + def get_info(self): return self.info @@ -736,6 +739,67 @@ class TestContainerSync(unittest.TestCase): sync.hash_path = orig_hash_path sync.delete_object = orig_delete_object + def test_container_report(self): + container_stats = {'puts': 0, + 'deletes': 0, + 'bytes': 0} + + def fake_container_sync_row(self, row, sync_to, + user_key, broker, info, realm, realm_key): + if 'deleted' in row: + container_stats['deletes'] += 1 + return True + + container_stats['puts'] += 1 + container_stats['bytes'] += row['size'] + return True + + def fake_hash_path(account, container, obj, raw_digest=False): + # Ensures that no rows match for second loop, ordinal is 0 and + # all hashes are 1 + return '\x01' * 16 + + fcb = FakeContainerBroker( + 'path', + info={'account': 'a', 'container': 'c', + 'storage_policy_index': 0, + 'x_container_sync_point1': 5, + 'x_container_sync_point2': -1}, + metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1), + 'x-container-sync-key': ('key', 1)}, + items_since=[{'ROWID': 1, 'name': 'o1', 'size': 0, + 'deleted': True}, + {'ROWID': 2, 'name': 'o2', 'size': 1010}, + {'ROWID': 3, 'name': 'o3', 'size': 0, + 'deleted': True}, + {'ROWID': 4, 'name': 'o4', 'size': 90}, + {'ROWID': 5, 'name': 'o5', 'size': 0}]) + + with mock.patch('swift.container.sync.InternalClient'), \ + mock.patch('swift.container.sync.hash_path', + fake_hash_path), \ + mock.patch('swift.container.sync.ContainerBroker', + lambda p: fcb): + cring = FakeRing() + cs = sync.ContainerSync({}, container_ring=cring, + logger=self.logger) + cs.container_stats = container_stats + cs._myips = ['10.0.0.0'] # Match + cs._myport = 1000 # Match + cs.allowed_sync_hosts = ['127.0.0.1'] + funcType = type(sync.ContainerSync.container_sync_row) + cs.container_sync_row = funcType(fake_container_sync_row, + cs, sync.ContainerSync) + cs.container_sync('isa.db') + # Succeeds because no rows match + log_line = cs.logger.get_lines_for_level('info')[0] + lines = log_line.split(',') + self.assertTrue('sync_point2: 5', lines.pop().strip()) + self.assertTrue('sync_point1: 5', lines.pop().strip()) + self.assertTrue('bytes: 1100', lines.pop().strip()) + self.assertTrue('deletes: 2', lines.pop().strip()) + self.assertTrue('puts: 3', lines.pop().strip()) + def test_container_sync_row_delete(self): self._test_container_sync_row_delete(None, None) @@ -783,7 +847,8 @@ class TestContainerSync(unittest.TestCase): self.assertTrue(cs.container_sync_row( {'deleted': True, 'name': 'object', - 'created_at': created_at}, 'http://sync/to/path', + 'created_at': created_at, + 'size': '1000'}, 'http://sync/to/path', 'key', FakeContainerBroker('broker'), {'account': 'a', 'container': 'c', 'storage_policy_index': 0}, realm, realm_key)) @@ -925,7 +990,8 @@ class TestContainerSync(unittest.TestCase): self.assertTrue(cs.container_sync_row( {'deleted': False, 'name': 'object', - 'created_at': created_at}, 'http://sync/to/path', + 'created_at': created_at, + 'size': 50}, 'http://sync/to/path', 'key', FakeContainerBroker('broker'), {'account': 'a', 'container': 'c', 'storage_policy_index': 0}, realm, realm_key)) @@ -953,7 +1019,8 @@ class TestContainerSync(unittest.TestCase): self.assertTrue(cs.container_sync_row( {'deleted': False, 'name': 'object', - 'created_at': timestamp.internal}, 'http://sync/to/path', + 'created_at': timestamp.internal, + 'size': 60}, 'http://sync/to/path', 'key', FakeContainerBroker('broker'), {'account': 'a', 'container': 'c', 'storage_policy_index': 0}, realm, realm_key)) @@ -966,7 +1033,8 @@ class TestContainerSync(unittest.TestCase): self.assertTrue(cs.container_sync_row( {'deleted': False, 'name': 'object', - 'created_at': '1.1'}, 'http://sync/to/path', + 'created_at': '1.1', + 'size': 60}, 'http://sync/to/path', 'key', FakeContainerBroker('broker'), {'account': 'a', 'container': 'c', 'storage_policy_index': 0}, realm, realm_key)) @@ -987,7 +1055,8 @@ class TestContainerSync(unittest.TestCase): self.assertFalse(cs.container_sync_row( {'deleted': False, 'name': 'object', - 'created_at': timestamp.internal}, 'http://sync/to/path', + 'created_at': timestamp.internal, + 'size': 70}, 'http://sync/to/path', 'key', FakeContainerBroker('broker'), {'account': 'a', 'container': 'c', 'storage_policy_index': 0}, realm, realm_key)) @@ -1011,7 +1080,8 @@ class TestContainerSync(unittest.TestCase): self.assertFalse(cs.container_sync_row( {'deleted': False, 'name': 'object', - 'created_at': timestamp.internal}, 'http://sync/to/path', + 'created_at': timestamp.internal, + 'size': 80}, 'http://sync/to/path', 'key', FakeContainerBroker('broker'), {'account': 'a', 'container': 'c', 'storage_policy_index': 0}, realm, realm_key)) @@ -1038,7 +1108,8 @@ class TestContainerSync(unittest.TestCase): self.assertFalse(cs.container_sync_row( {'deleted': False, 'name': 'object', - 'created_at': timestamp.internal}, 'http://sync/to/path', + 'created_at': timestamp.internal, + 'size': 90}, 'http://sync/to/path', 'key', FakeContainerBroker('broker'), {'account': 'a', 'container': 'c', 'storage_policy_index': 0}, realm, realm_key)) @@ -1055,7 +1126,8 @@ class TestContainerSync(unittest.TestCase): self.assertFalse(cs.container_sync_row( {'deleted': False, 'name': 'object', - 'created_at': timestamp.internal}, 'http://sync/to/path', + 'created_at': timestamp.internal, + 'size': 50}, 'http://sync/to/path', 'key', FakeContainerBroker('broker'), {'account': 'a', 'container': 'c', 'storage_policy_index': 0}, realm, realm_key)) @@ -1072,7 +1144,8 @@ class TestContainerSync(unittest.TestCase): self.assertFalse(cs.container_sync_row( {'deleted': False, 'name': 'object', - 'created_at': timestamp.internal}, 'http://sync/to/path', + 'created_at': timestamp.internal, + 'size': 50}, 'http://sync/to/path', 'key', FakeContainerBroker('broker'), {'account': 'a', 'container': 'c', 'storage_policy_index': 0}, realm, realm_key)) @@ -1093,7 +1166,8 @@ class TestContainerSync(unittest.TestCase): test_row = {'deleted': False, 'name': 'object', 'created_at': timestamp.internal, - 'etag': '1111'} + 'etag': '1111', + 'size': 10} test_info = {'account': 'a', 'container': 'c', 'storage_policy_index': 0} From 746d928a875281a7154dcd438f46a58bbf656db9 Mon Sep 17 00:00:00 2001 From: Dmitriy Ukhlov Date: Fri, 8 Apr 2016 16:00:16 +0300 Subject: [PATCH 090/141] Adds eventlet monkey patching of select module if thread is pathed Oslo.messaging pika driver requires patching of select module if thread is patched. Pika driver uses select call and if it is not patched onsuming messages blocks whole eventlet loop Closes-Bug: #1570242 Change-Id: I9756737309f401ebddb7475eb84725f65bca01bf --- swift/common/wsgi.py | 8 ++++++-- swift/container/updater.py | 5 +++-- swift/obj/updater.py | 3 ++- test/unit/common/test_wsgi.py | 3 +++ 4 files changed, 14 insertions(+), 5 deletions(-) diff --git a/swift/common/wsgi.py b/swift/common/wsgi.py index 7ba97eefce..2c169eb2a6 100644 --- a/swift/common/wsgi.py +++ b/swift/common/wsgi.py @@ -407,8 +407,12 @@ def run_server(conf, logger, sock, global_conf=None): wsgi.WRITE_TIMEOUT = int(conf.get('client_timeout') or 60) eventlet.hubs.use_hub(get_hub()) - # NOTE(sileht): monkey-patching thread is required by python-keystoneclient - eventlet.patcher.monkey_patch(all=False, socket=True, thread=True) + # NOTE(sileht): + # monkey-patching thread is required by python-keystoneclient; + # monkey-patching select is required by oslo.messaging pika driver + # if thread is monkey-patched. + eventlet.patcher.monkey_patch(all=False, socket=True, select=True, + thread=True) eventlet_debug = config_true_value(conf.get('eventlet_debug', 'no')) eventlet.debug.hub_exceptions(eventlet_debug) wsgi_logger = NullLogger() diff --git a/swift/container/updater.py b/swift/container/updater.py index 4703a5a04e..f2cd8f3328 100644 --- a/swift/container/updater.py +++ b/swift/container/updater.py @@ -143,7 +143,8 @@ class ContainerUpdater(Daemon): pid2filename[pid] = tmpfilename else: signal.signal(signal.SIGTERM, signal.SIG_DFL) - patcher.monkey_patch(all=False, socket=True, thread=True) + patcher.monkey_patch(all=False, socket=True, select=True, + thread=True) self.no_changes = 0 self.successes = 0 self.failures = 0 @@ -177,7 +178,7 @@ class ContainerUpdater(Daemon): """ Run the updater once. """ - patcher.monkey_patch(all=False, socket=True, thread=True) + patcher.monkey_patch(all=False, socket=True, select=True, thread=True) self.logger.info(_('Begin container update single threaded sweep')) begin = time.time() self.no_changes = 0 diff --git a/swift/obj/updater.py b/swift/obj/updater.py index 87c21c397f..9bf4ef19a3 100644 --- a/swift/obj/updater.py +++ b/swift/obj/updater.py @@ -94,7 +94,8 @@ class ObjectUpdater(Daemon): pids.append(pid) else: signal.signal(signal.SIGTERM, signal.SIG_DFL) - patcher.monkey_patch(all=False, socket=True, thread=True) + patcher.monkey_patch(all=False, socket=True, select=True, + thread=True) self.successes = 0 self.failures = 0 forkbegin = time.time() diff --git a/test/unit/common/test_wsgi.py b/test/unit/common/test_wsgi.py index c062762f93..f39f215499 100644 --- a/test/unit/common/test_wsgi.py +++ b/test/unit/common/test_wsgi.py @@ -380,6 +380,7 @@ class TestWSGI(unittest.TestCase): _eventlet.hubs.use_hub.assert_called_with(utils.get_hub()) _eventlet.patcher.monkey_patch.assert_called_with(all=False, socket=True, + select=True, thread=True) _eventlet.debug.hub_exceptions.assert_called_with(False) self.assertTrue(_wsgi.server.called) @@ -468,6 +469,7 @@ class TestWSGI(unittest.TestCase): _eventlet.hubs.use_hub.assert_called_with(utils.get_hub()) _eventlet.patcher.monkey_patch.assert_called_with(all=False, socket=True, + select=True, thread=True) _eventlet.debug.hub_exceptions.assert_called_with(False) self.assertTrue(_wsgi.server.called) @@ -520,6 +522,7 @@ class TestWSGI(unittest.TestCase): _eventlet.hubs.use_hub.assert_called_with(utils.get_hub()) _eventlet.patcher.monkey_patch.assert_called_with(all=False, socket=True, + select=True, thread=True) _eventlet.debug.hub_exceptions.assert_called_with(True) self.assertTrue(mock_server.called) From 3bc33e59aaeefdb940794782a36517c2b4eb0642 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Thu, 14 Apr 2016 20:01:38 -0700 Subject: [PATCH 091/141] Fix download resumption after getting no data. When the proxy is handling an object GET response and an object server fails to send data in a timely fashion, the proxy can pick up where it left off with another object server; other than a pause in the download, the client doesn't even know anything happened. However, if the proxy received the GET response headers but no data, it would resume at the wrong spot. In particular, for an N-byte object, it would ask the second object server for the last N-1 bytes (or equivalently, all but the first byte). For a replicated storage policy, this would result in the client getting an abbreviated download, while for an EC storage policy, the proxy would 500 after trying to decode a fragment set with a bogus fragment in it. This commit fixes the resumption logic to ask for all N bytes of the object from the second object server. Change-Id: Ib9e28c3dceaded1708e7a30844b534566c7a320c --- swift/proxy/controllers/base.py | 9 +++----- test/unit/proxy/controllers/test_base.py | 26 ++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/swift/proxy/controllers/base.py b/swift/proxy/controllers/base.py index 7dcc1ca3de..74e8fa1ac6 100644 --- a/swift/proxy/controllers/base.py +++ b/swift/proxy/controllers/base.py @@ -678,18 +678,15 @@ class ResumingGetter(object): end -= num_bytes else: begin += num_bytes - if end and begin == end + 1: + + if end is not None and begin == end + 1: # we sent out exactly the first range's worth of bytes, so # we're done with it raise RangeAlreadyComplete() elif end and begin > end: raise HTTPRequestedRangeNotSatisfiable() - elif end and begin: - req_range.ranges = [(begin, end)] + req_range.ranges[1:] - elif end: - req_range.ranges = [(None, end)] + req_range.ranges[1:] else: - req_range.ranges = [(begin, None)] + req_range.ranges[1:] + req_range.ranges = [(begin, end)] + req_range.ranges[1:] self.backend_headers['Range'] = str(req_range) else: diff --git a/test/unit/proxy/controllers/test_base.py b/test/unit/proxy/controllers/test_base.py index 330250e2c9..0c5d7fc9eb 100644 --- a/test/unit/proxy/controllers/test_base.py +++ b/test/unit/proxy/controllers/test_base.py @@ -720,6 +720,32 @@ class TestFuncs(unittest.TestCase): handler.fast_forward(20) self.assertEqual(handler.backend_headers['Range'], 'bytes=-80') + def test_range_fast_forward_after_data_timeout(self): + req = Request.blank('/') + + # We get a 200 and learn that it's a 1000-byte object, but receive 0 + # bytes of data, so then we get a new node, fast_forward(0), and + # send out a new request. That new request must be for all 1000 + # bytes. + handler = GetOrHeadHandler(None, req, None, None, None, None, {}) + handler.learn_size_from_content_range(0, 999, 1000) + handler.fast_forward(0) + self.assertEqual(handler.backend_headers['Range'], 'bytes=0-999') + + # Same story as above, but a 1-byte object so we can have our byte + # indices be 0. + handler = GetOrHeadHandler(None, req, None, None, None, None, {}) + handler.learn_size_from_content_range(0, 0, 1) + handler.fast_forward(0) + self.assertEqual(handler.backend_headers['Range'], 'bytes=0-0') + + # last 100 bytes + handler = GetOrHeadHandler(None, req, None, None, None, None, + {'Range': 'bytes=-100'}) + handler.learn_size_from_content_range(900, 999, 1000) + handler.fast_forward(0) + self.assertEqual(handler.backend_headers['Range'], 'bytes=900-999') + def test_transfer_headers_with_sysmeta(self): base = Controller(self.app) good_hdrs = {'x-base-sysmeta-foo': 'ok', From b12222548de012ed496fba70c3d12aa31f003e3d Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Fri, 15 Apr 2016 16:08:26 -0700 Subject: [PATCH 092/141] Tighten up ResumingGetter.fast_forward When the current range is a "last N bytes" request, properly raise HTTPRequestedRangeNotSatisfiable/RangeAlreadyComplete errors. Also, add a couple tests for RangeAlreadyComplete with "normal" range requests. Change-Id: Icbfd621e7160747c5dfaf9f189da7d74e45a5347 --- swift/proxy/controllers/base.py | 17 ++++++++++------- test/unit/proxy/controllers/test_base.py | 11 +++++++++++ 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/swift/proxy/controllers/base.py b/swift/proxy/controllers/base.py index 74e8fa1ac6..d24cb3f293 100644 --- a/swift/proxy/controllers/base.py +++ b/swift/proxy/controllers/base.py @@ -676,18 +676,21 @@ class ResumingGetter(object): if begin is None: # this is a -50 range req (last 50 bytes of file) end -= num_bytes + if end == 0: + # we sent out exactly the first range's worth of bytes, so + # we're done with it + raise RangeAlreadyComplete() else: begin += num_bytes + if end is not None and begin == end + 1: + # we sent out exactly the first range's worth of bytes, so + # we're done with it + raise RangeAlreadyComplete() - if end is not None and begin == end + 1: - # we sent out exactly the first range's worth of bytes, so - # we're done with it - raise RangeAlreadyComplete() - elif end and begin > end: + if end is not None and (begin > end or end < 0): raise HTTPRequestedRangeNotSatisfiable() - else: - req_range.ranges = [(begin, end)] + req_range.ranges[1:] + req_range.ranges = [(begin, end)] + req_range.ranges[1:] self.backend_headers['Range'] = str(req_range) else: self.backend_headers['Range'] = 'bytes=%d-' % num_bytes diff --git a/test/unit/proxy/controllers/test_base.py b/test/unit/proxy/controllers/test_base.py index 0c5d7fc9eb..ed23cd8fc0 100644 --- a/test/unit/proxy/controllers/test_base.py +++ b/test/unit/proxy/controllers/test_base.py @@ -709,6 +709,8 @@ class TestFuncs(unittest.TestCase): self.assertEqual(handler.backend_headers['Range'], 'bytes=43-50') self.assertRaises(HTTPException, handler.fast_forward, 80) + self.assertRaises(exceptions.RangeAlreadyComplete, + handler.fast_forward, 8) handler = GetOrHeadHandler(None, req, None, None, None, None, {'Range': 'bytes=23-'}) @@ -719,6 +721,15 @@ class TestFuncs(unittest.TestCase): {'Range': 'bytes=-100'}) handler.fast_forward(20) self.assertEqual(handler.backend_headers['Range'], 'bytes=-80') + self.assertRaises(HTTPException, + handler.fast_forward, 100) + self.assertRaises(exceptions.RangeAlreadyComplete, + handler.fast_forward, 80) + + handler = GetOrHeadHandler(None, req, None, None, None, None, + {'Range': 'bytes=0-0'}) + self.assertRaises(exceptions.RangeAlreadyComplete, + handler.fast_forward, 1) def test_range_fast_forward_after_data_timeout(self): req = Request.blank('/') From ca304cd08e9f8d37e4027f2f71dd77ebba3a30f9 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Fri, 15 Apr 2016 17:22:44 -0700 Subject: [PATCH 093/141] Ignore negative suffix-byte-range requests. If the client asked for "Range: bytes=--123", Swift would respond with a 206 and a Content-Length of -123. Now that Range header is ignored just like all kinds of other invalid Range headers. Change-Id: I30d4522d223076ce342d20c52f57ff0eb2aea1f4 Closes-Bug: 1571106 --- swift/common/swob.py | 4 +++- test/unit/common/test_swob.py | 26 ++++++++++++++------------ 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/swift/common/swob.py b/swift/common/swob.py index 704212084d..0954ef9d3c 100644 --- a/swift/common/swob.py +++ b/swift/common/swob.py @@ -486,7 +486,9 @@ class Range(object): # when end contains non numeric value, this also causes # ValueError end = int(end) - if start is not None and end < start: + if end < 0: + raise ValueError('Invalid Range header: %s' % headerval) + elif start is not None and end < start: raise ValueError('Invalid Range header: %s' % headerval) else: end = None diff --git a/test/unit/common/test_swob.py b/test/unit/common/test_swob.py index fede30785d..4f8d8f7be9 100644 --- a/test/unit/common/test_swob.py +++ b/test/unit/common/test_swob.py @@ -231,12 +231,13 @@ class TestRange(unittest.TestCase): def test_range_invalid_syntax(self): - def _check_invalid_range(range_value): + def _assert_invalid_range(range_value): try: swift.common.swob.Range(range_value) - return False + self.fail("Expected %r to be invalid, but wasn't" % + (range_value,)) except ValueError: - return True + pass """ All the following cases should result ValueError exception @@ -248,15 +249,16 @@ class TestRange(unittest.TestCase): 6. any combination of the above """ - self.assertTrue(_check_invalid_range('nonbytes=foobar,10-2')) - self.assertTrue(_check_invalid_range('bytes=5-3')) - self.assertTrue(_check_invalid_range('bytes=-')) - self.assertTrue(_check_invalid_range('bytes=45')) - self.assertTrue(_check_invalid_range('bytes=foo-bar,3-5')) - self.assertTrue(_check_invalid_range('bytes=4-10,45')) - self.assertTrue(_check_invalid_range('bytes=foobar,3-5')) - self.assertTrue(_check_invalid_range('bytes=nonumber-5')) - self.assertTrue(_check_invalid_range('bytes=nonumber')) + _assert_invalid_range('nonbytes=foobar,10-2') + _assert_invalid_range('bytes=5-3') + _assert_invalid_range('bytes=-') + _assert_invalid_range('bytes=45') + _assert_invalid_range('bytes=foo-bar,3-5') + _assert_invalid_range('bytes=4-10,45') + _assert_invalid_range('bytes=foobar,3-5') + _assert_invalid_range('bytes=nonumber-5') + _assert_invalid_range('bytes=nonumber') + _assert_invalid_range('bytes=--1') class TestMatch(unittest.TestCase): From 7d7eaab5afa4c36f0ac8467784138fc423f6ac4f Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Mon, 18 Apr 2016 06:31:34 +0000 Subject: [PATCH 094/141] Imported Translations from Zanata For more information about this automatic import see: https://wiki.openstack.org/wiki/Translations/Infrastructure Change-Id: I88be5c9dbb1fcc3a15592d42af7160478308b1f4 --- swift/locale/de/LC_MESSAGES/swift.po | 8 +- swift/locale/es/LC_MESSAGES/swift.po | 9 +- swift/locale/fr/LC_MESSAGES/swift.po | 11 +- swift/locale/it/LC_MESSAGES/swift.po | 11 +- swift/locale/ja/LC_MESSAGES/swift.po | 10 +- swift/locale/ko_KR/LC_MESSAGES/swift.po | 8 +- swift/locale/pt_BR/LC_MESSAGES/swift.po | 10 +- swift/locale/ru/LC_MESSAGES/swift.po | 11 +- swift/locale/swift.pot | 156 ++++++++++++------------ swift/locale/tr_TR/LC_MESSAGES/swift.po | 8 +- swift/locale/zh_CN/LC_MESSAGES/swift.po | 10 +- swift/locale/zh_TW/LC_MESSAGES/swift.po | 10 +- 12 files changed, 119 insertions(+), 143 deletions(-) diff --git a/swift/locale/de/LC_MESSAGES/swift.po b/swift/locale/de/LC_MESSAGES/swift.po index 9a4724a549..ab9dc8aa84 100644 --- a/swift/locale/de/LC_MESSAGES/swift.po +++ b/swift/locale/de/LC_MESSAGES/swift.po @@ -6,12 +6,12 @@ # Andreas Jaeger , 2014 # Ettore Atalan , 2014-2015 # Jonas John , 2015 -# Frank Kloeker , 2016. #zanata +# Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.7.1.dev28\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-04-07 22:18+0000\n" +"Project-Id-Version: swift 2.7.1.dev50\n" +"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" +"POT-Creation-Date: 2016-04-17 21:20+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" diff --git a/swift/locale/es/LC_MESSAGES/swift.po b/swift/locale/es/LC_MESSAGES/swift.po index 1c3cbef87e..9a137fbb2a 100644 --- a/swift/locale/es/LC_MESSAGES/swift.po +++ b/swift/locale/es/LC_MESSAGES/swift.po @@ -3,15 +3,12 @@ # This file is distributed under the same license as the swift project. # # Translators: -# Carlos A. Muñoz , 2015. #zanata -# OpenStack Infra , 2015. #zanata -# Tom Cocozzello , 2015. #zanata # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.7.1.dev28\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-04-07 22:18+0000\n" +"Project-Id-Version: swift 2.7.1.dev50\n" +"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" +"POT-Creation-Date: 2016-04-17 21:20+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" diff --git a/swift/locale/fr/LC_MESSAGES/swift.po b/swift/locale/fr/LC_MESSAGES/swift.po index bba51cb434..138c49e579 100644 --- a/swift/locale/fr/LC_MESSAGES/swift.po +++ b/swift/locale/fr/LC_MESSAGES/swift.po @@ -4,15 +4,12 @@ # # Translators: # Maxime COQUEREL , 2014 -# OpenStack Infra , 2015. #zanata -# Tom Cocozzello , 2015. #zanata -# Angelique Pillal , 2016. #zanata -# Gael Rehault , 2016. #zanata +# Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.7.1.dev28\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-04-07 22:18+0000\n" +"Project-Id-Version: swift 2.7.1.dev50\n" +"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" +"POT-Creation-Date: 2016-04-17 21:20+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" diff --git a/swift/locale/it/LC_MESSAGES/swift.po b/swift/locale/it/LC_MESSAGES/swift.po index 092641a79e..abba03b38e 100644 --- a/swift/locale/it/LC_MESSAGES/swift.po +++ b/swift/locale/it/LC_MESSAGES/swift.po @@ -3,15 +3,12 @@ # This file is distributed under the same license as the swift project. # # Translators: -# OpenStack Infra , 2015. #zanata -# Tom Cocozzello , 2015. #zanata -# Alessandra , 2016. #zanata -# Remo Mattei , 2016. #zanata +# Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.7.1.dev28\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-04-07 22:18+0000\n" +"Project-Id-Version: swift 2.7.1.dev50\n" +"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" +"POT-Creation-Date: 2016-04-17 21:20+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" diff --git a/swift/locale/ja/LC_MESSAGES/swift.po b/swift/locale/ja/LC_MESSAGES/swift.po index d328e5570c..88815803ae 100644 --- a/swift/locale/ja/LC_MESSAGES/swift.po +++ b/swift/locale/ja/LC_MESSAGES/swift.po @@ -4,16 +4,12 @@ # # Translators: # Sasuke(Kyohei MORIYAMA) <>, 2015 -# Akihiro Motoki , 2015. #zanata -# OpenStack Infra , 2015. #zanata -# Tom Cocozzello , 2015. #zanata # Andreas Jaeger , 2016. #zanata -# 笹原 昌美 , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.7.1.dev28\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-04-07 22:18+0000\n" +"Project-Id-Version: swift 2.7.1.dev50\n" +"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" +"POT-Creation-Date: 2016-04-17 21:20+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" diff --git a/swift/locale/ko_KR/LC_MESSAGES/swift.po b/swift/locale/ko_KR/LC_MESSAGES/swift.po index b5fb2426df..84889e2e16 100644 --- a/swift/locale/ko_KR/LC_MESSAGES/swift.po +++ b/swift/locale/ko_KR/LC_MESSAGES/swift.po @@ -5,14 +5,12 @@ # Translators: # Mario Cho , 2014 # Ying Chun Guo , 2015 -# Lucas Palm , 2015. #zanata -# OpenStack Infra , 2015. #zanata # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.7.1.dev28\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-04-07 22:18+0000\n" +"Project-Id-Version: swift 2.7.1.dev50\n" +"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" +"POT-Creation-Date: 2016-04-17 21:20+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" diff --git a/swift/locale/pt_BR/LC_MESSAGES/swift.po b/swift/locale/pt_BR/LC_MESSAGES/swift.po index 61005cbfae..ffcdab0de3 100644 --- a/swift/locale/pt_BR/LC_MESSAGES/swift.po +++ b/swift/locale/pt_BR/LC_MESSAGES/swift.po @@ -7,14 +7,12 @@ # Lucas Ribeiro , 2014 # thiagol , 2015 # Volmar Oliveira Junior , 2014 -# Lucas Palm , 2015. #zanata -# OpenStack Infra , 2015. #zanata -# Carlos Marques , 2016. #zanata +# Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.7.1.dev28\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-04-07 22:18+0000\n" +"Project-Id-Version: swift 2.7.1.dev50\n" +"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" +"POT-Creation-Date: 2016-04-17 21:20+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" diff --git a/swift/locale/ru/LC_MESSAGES/swift.po b/swift/locale/ru/LC_MESSAGES/swift.po index a074dc851d..95e8c7e762 100644 --- a/swift/locale/ru/LC_MESSAGES/swift.po +++ b/swift/locale/ru/LC_MESSAGES/swift.po @@ -3,15 +3,12 @@ # This file is distributed under the same license as the swift project. # # Translators: -# Lucas Palm , 2015. #zanata -# OpenStack Infra , 2015. #zanata -# Filatov Sergey , 2016. #zanata -# Grigory Mokhin , 2016. #zanata +# Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.7.1.dev28\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-04-07 22:18+0000\n" +"Project-Id-Version: swift 2.7.1.dev50\n" +"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" +"POT-Creation-Date: 2016-04-17 21:20+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" diff --git a/swift/locale/swift.pot b/swift/locale/swift.pot index 5bf3b2de26..b4b78d11e0 100644 --- a/swift/locale/swift.pot +++ b/swift/locale/swift.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: swift 2.7.1.dev21\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-04-07 07:00+0000\n" +"Project-Id-Version: swift 2.7.1.dev50\n" +"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" +"POT-Creation-Date: 2016-04-18 06:31+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -63,8 +63,8 @@ msgstr "" msgid "ERROR Could not get account info %s" msgstr "" -#: swift/account/reaper.py:139 swift/common/utils.py:2357 -#: swift/obj/diskfile.py:361 swift/obj/updater.py:88 swift/obj/updater.py:131 +#: swift/account/reaper.py:139 swift/common/utils.py:2392 +#: swift/obj/diskfile.py:361 swift/obj/updater.py:88 swift/obj/updater.py:132 #, python-format msgid "Skipping %s as it is not mounted" msgstr "" @@ -143,7 +143,7 @@ msgid "Account %(account)s has not been reaped since %(time)s" msgstr "" #: swift/account/reaper.py:376 swift/account/reaper.py:430 -#: swift/account/reaper.py:506 swift/container/updater.py:307 +#: swift/account/reaper.py:506 swift/container/updater.py:308 #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "" @@ -210,7 +210,7 @@ msgstr "" msgid "Removed %(remove)d dbs" msgstr "" -#: swift/common/db_replicator.py:215 swift/obj/replicator.py:516 +#: swift/common/db_replicator.py:215 swift/obj/replicator.py:517 #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "" @@ -426,87 +426,87 @@ msgstr "" msgid "Unable to perform fsync() on directory %(dir)s: %(err)s" msgstr "" -#: swift/common/utils.py:1245 +#: swift/common/utils.py:1271 #, python-format msgid "%s: Connection reset by peer" msgstr "" -#: swift/common/utils.py:1247 swift/common/utils.py:1251 +#: swift/common/utils.py:1273 swift/common/utils.py:1284 #, python-format msgid "%(type)s: %(value)s" msgstr "" -#: swift/common/utils.py:1501 +#: swift/common/utils.py:1536 msgid "Connection refused" msgstr "" -#: swift/common/utils.py:1503 +#: swift/common/utils.py:1538 msgid "Host unreachable" msgstr "" -#: swift/common/utils.py:1505 +#: swift/common/utils.py:1540 msgid "Connection timeout" msgstr "" -#: swift/common/utils.py:1783 +#: swift/common/utils.py:1818 msgid "UNCAUGHT EXCEPTION" msgstr "" -#: swift/common/utils.py:1838 +#: swift/common/utils.py:1873 msgid "Error: missing config path argument" msgstr "" -#: swift/common/utils.py:1843 +#: swift/common/utils.py:1878 #, python-format msgid "Error: unable to locate %s" msgstr "" -#: swift/common/utils.py:2215 +#: swift/common/utils.py:2250 #, python-format msgid "Unable to read config from %s" msgstr "" -#: swift/common/utils.py:2221 +#: swift/common/utils.py:2256 #, python-format msgid "Unable to find %(section)s config section in %(conf)s" msgstr "" -#: swift/common/utils.py:2606 +#: swift/common/utils.py:2641 #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "" -#: swift/common/utils.py:2611 +#: swift/common/utils.py:2646 #, python-format msgid "No realm key for %r" msgstr "" -#: swift/common/utils.py:2615 +#: swift/common/utils.py:2650 #, python-format msgid "No cluster endpoint for %r %r" msgstr "" -#: swift/common/utils.py:2624 +#: swift/common/utils.py:2659 #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" -#: swift/common/utils.py:2628 +#: swift/common/utils.py:2663 msgid "Path required in X-Container-Sync-To" msgstr "" -#: swift/common/utils.py:2631 +#: swift/common/utils.py:2666 msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "" -#: swift/common/utils.py:2636 +#: swift/common/utils.py:2671 #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "" -#: swift/common/utils.py:2830 +#: swift/common/utils.py:2865 msgid "Exception dumping recon cache" msgstr "" @@ -748,7 +748,7 @@ msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "" #: swift/container/updater.py:92 swift/obj/reconstructor.py:822 -#: swift/obj/replicator.py:600 swift/obj/replicator.py:717 +#: swift/obj/replicator.py:601 swift/obj/replicator.py:718 #, python-format msgid "%s is not mounted" msgstr "" @@ -762,40 +762,40 @@ msgstr "" msgid "Begin container update sweep" msgstr "" -#: swift/container/updater.py:155 +#: swift/container/updater.py:156 #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" -#: swift/container/updater.py:169 +#: swift/container/updater.py:170 #, python-format msgid "Container update sweep completed: %.02fs" msgstr "" -#: swift/container/updater.py:181 +#: swift/container/updater.py:182 msgid "Begin container update single threaded sweep" msgstr "" -#: swift/container/updater.py:189 +#: swift/container/updater.py:190 #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" -#: swift/container/updater.py:244 +#: swift/container/updater.py:245 #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "" -#: swift/container/updater.py:253 +#: swift/container/updater.py:254 #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "" -#: swift/container/updater.py:295 +#: swift/container/updater.py:296 #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " @@ -865,7 +865,7 @@ msgstr "" msgid "Skipping %(dir)s: %(err)s" msgstr "" -#: swift/obj/diskfile.py:380 swift/obj/updater.py:162 +#: swift/obj/diskfile.py:380 swift/obj/updater.py:163 #, python-format msgid "Directory %r does not map to a valid policy (%s)" msgstr "" @@ -983,14 +983,14 @@ msgid "" "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" -#: swift/obj/reconstructor.py:376 swift/obj/replicator.py:521 +#: swift/obj/reconstructor.py:376 swift/obj/replicator.py:522 #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% " "synced" msgstr "" -#: swift/obj/reconstructor.py:383 swift/obj/replicator.py:528 +#: swift/obj/reconstructor.py:383 swift/obj/replicator.py:529 #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "" @@ -1000,7 +1000,7 @@ msgstr "" msgid "Nothing reconstructed for %s seconds." msgstr "" -#: swift/obj/reconstructor.py:420 swift/obj/replicator.py:565 +#: swift/obj/reconstructor.py:420 swift/obj/replicator.py:566 msgid "Lockup detected.. killing live coros." msgstr "" @@ -1014,7 +1014,7 @@ msgstr "" msgid "%s responded as unmounted" msgstr "" -#: swift/obj/reconstructor.py:893 swift/obj/replicator.py:371 +#: swift/obj/reconstructor.py:893 swift/obj/replicator.py:372 #, python-format msgid "Removing partition: %s" msgstr "" @@ -1064,78 +1064,78 @@ msgstr "" msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "" -#: swift/obj/replicator.py:337 +#: swift/obj/replicator.py:338 #, python-format msgid "Removing %s objects" msgstr "" -#: swift/obj/replicator.py:358 +#: swift/obj/replicator.py:359 msgid "Error syncing handoff partition" msgstr "" -#: swift/obj/replicator.py:436 +#: swift/obj/replicator.py:437 #, python-format msgid "%(ip)s/%(device)s responded as unmounted" msgstr "" -#: swift/obj/replicator.py:443 +#: swift/obj/replicator.py:444 #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "" -#: swift/obj/replicator.py:487 +#: swift/obj/replicator.py:488 #, python-format msgid "Error syncing with node: %s" msgstr "" -#: swift/obj/replicator.py:492 +#: swift/obj/replicator.py:493 msgid "Error syncing partition" msgstr "" -#: swift/obj/replicator.py:507 +#: swift/obj/replicator.py:508 #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" -#: swift/obj/replicator.py:536 +#: swift/obj/replicator.py:537 #, python-format msgid "Nothing replicated for %s seconds." msgstr "" -#: swift/obj/replicator.py:723 +#: swift/obj/replicator.py:724 msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "" -#: swift/obj/replicator.py:729 +#: swift/obj/replicator.py:730 msgid "Ring change detected. Aborting current replication pass." msgstr "" -#: swift/obj/replicator.py:757 +#: swift/obj/replicator.py:758 msgid "Exception in top-level replication loop" msgstr "" -#: swift/obj/replicator.py:767 +#: swift/obj/replicator.py:768 msgid "Running object replicator in script mode." msgstr "" -#: swift/obj/replicator.py:785 +#: swift/obj/replicator.py:786 #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "" -#: swift/obj/replicator.py:796 +#: swift/obj/replicator.py:797 msgid "Starting object replicator in daemon mode." msgstr "" -#: swift/obj/replicator.py:800 +#: swift/obj/replicator.py:801 msgid "Starting object replication pass." msgstr "" -#: swift/obj/replicator.py:805 +#: swift/obj/replicator.py:806 #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "" @@ -1170,40 +1170,40 @@ msgstr "" msgid "Begin object update sweep" msgstr "" -#: swift/obj/updater.py:104 +#: swift/obj/updater.py:105 #, python-format msgid "" "Object update sweep of %(device)s completed: %(elapsed).02fs, %(success)s" " successes, %(fail)s failures" msgstr "" -#: swift/obj/updater.py:113 +#: swift/obj/updater.py:114 #, python-format msgid "Object update sweep completed: %.02fs" msgstr "" -#: swift/obj/updater.py:122 +#: swift/obj/updater.py:123 msgid "Begin object update single threaded sweep" msgstr "" -#: swift/obj/updater.py:136 +#: swift/obj/updater.py:137 #, python-format msgid "" "Object update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures" msgstr "" -#: swift/obj/updater.py:180 +#: swift/obj/updater.py:181 #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "" -#: swift/obj/updater.py:210 +#: swift/obj/updater.py:211 #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "" -#: swift/obj/updater.py:275 +#: swift/obj/updater.py:276 #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "" @@ -1234,9 +1234,9 @@ msgstr "" #: swift/proxy/controllers/base.py:813 swift/proxy/controllers/base.py:852 #: swift/proxy/controllers/base.py:948 swift/proxy/controllers/obj.py:340 #: swift/proxy/controllers/obj.py:885 swift/proxy/controllers/obj.py:934 -#: swift/proxy/controllers/obj.py:948 swift/proxy/controllers/obj.py:1774 -#: swift/proxy/controllers/obj.py:2013 swift/proxy/controllers/obj.py:2176 -#: swift/proxy/controllers/obj.py:2410 +#: swift/proxy/controllers/obj.py:948 swift/proxy/controllers/obj.py:1764 +#: swift/proxy/controllers/obj.py:2003 swift/proxy/controllers/obj.py:2166 +#: swift/proxy/controllers/obj.py:2400 msgid "Object" msgstr "" @@ -1268,7 +1268,7 @@ msgstr "" #: swift/proxy/controllers/base.py:1041 swift/proxy/controllers/base.py:1429 #: swift/proxy/controllers/obj.py:364 swift/proxy/controllers/obj.py:925 -#: swift/proxy/controllers/obj.py:2168 swift/proxy/controllers/obj.py:2455 +#: swift/proxy/controllers/obj.py:2158 swift/proxy/controllers/obj.py:2445 msgid "ERROR Insufficient Storage" msgstr "" @@ -1296,7 +1296,7 @@ msgstr "" msgid "Trying to get final status of PUT to %s" msgstr "" -#: swift/proxy/controllers/obj.py:368 swift/proxy/controllers/obj.py:2460 +#: swift/proxy/controllers/obj.py:368 swift/proxy/controllers/obj.py:2450 #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "" @@ -1311,40 +1311,40 @@ msgstr "" msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" -#: swift/proxy/controllers/obj.py:929 swift/proxy/controllers/obj.py:2171 +#: swift/proxy/controllers/obj.py:929 swift/proxy/controllers/obj.py:2161 #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "" -#: swift/proxy/controllers/obj.py:935 swift/proxy/controllers/obj.py:2177 +#: swift/proxy/controllers/obj.py:935 swift/proxy/controllers/obj.py:2167 #, python-format msgid "Expect: 100-continue on %s" msgstr "" -#: swift/proxy/controllers/obj.py:949 swift/proxy/controllers/obj.py:1775 +#: swift/proxy/controllers/obj.py:949 swift/proxy/controllers/obj.py:1765 #, python-format msgid "Trying to write to %s" msgstr "" -#: swift/proxy/controllers/obj.py:1000 swift/proxy/controllers/obj.py:2342 +#: swift/proxy/controllers/obj.py:1000 swift/proxy/controllers/obj.py:2332 #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "" -#: swift/proxy/controllers/obj.py:1008 swift/proxy/controllers/obj.py:2348 +#: swift/proxy/controllers/obj.py:1008 swift/proxy/controllers/obj.py:2338 msgid "Client disconnected without sending last chunk" msgstr "" -#: swift/proxy/controllers/obj.py:1013 swift/proxy/controllers/obj.py:2355 +#: swift/proxy/controllers/obj.py:1013 swift/proxy/controllers/obj.py:2345 msgid "ERROR Exception causing client disconnect" msgstr "" -#: swift/proxy/controllers/obj.py:1017 swift/proxy/controllers/obj.py:2359 +#: swift/proxy/controllers/obj.py:1017 swift/proxy/controllers/obj.py:2349 #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "" -#: swift/proxy/controllers/obj.py:1023 swift/proxy/controllers/obj.py:2273 +#: swift/proxy/controllers/obj.py:1023 swift/proxy/controllers/obj.py:2263 msgid "Client disconnected without sending enough data" msgstr "" @@ -1353,17 +1353,17 @@ msgstr "" msgid "Object servers returned %s mismatched etags" msgstr "" -#: swift/proxy/controllers/obj.py:1073 swift/proxy/controllers/obj.py:2319 -#: swift/proxy/controllers/obj.py:2544 +#: swift/proxy/controllers/obj.py:1073 swift/proxy/controllers/obj.py:2309 +#: swift/proxy/controllers/obj.py:2534 msgid "Object PUT" msgstr "" -#: swift/proxy/controllers/obj.py:2312 +#: swift/proxy/controllers/obj.py:2302 #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "" -#: swift/proxy/controllers/obj.py:2411 +#: swift/proxy/controllers/obj.py:2401 #, python-format msgid "Trying to get %(status_type)s status of PUT to %(path)s" msgstr "" diff --git a/swift/locale/tr_TR/LC_MESSAGES/swift.po b/swift/locale/tr_TR/LC_MESSAGES/swift.po index 31d8b02756..4e0582008a 100644 --- a/swift/locale/tr_TR/LC_MESSAGES/swift.po +++ b/swift/locale/tr_TR/LC_MESSAGES/swift.po @@ -4,12 +4,12 @@ # # Translators: # İşbaran Akçayır , 2015 -# OpenStack Infra , 2015. #zanata +# Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.7.1.dev28\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-04-07 22:18+0000\n" +"Project-Id-Version: swift 2.7.1.dev50\n" +"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" +"POT-Creation-Date: 2016-04-17 21:20+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" diff --git a/swift/locale/zh_CN/LC_MESSAGES/swift.po b/swift/locale/zh_CN/LC_MESSAGES/swift.po index 1c1d4f82f4..a72ca8d57b 100644 --- a/swift/locale/zh_CN/LC_MESSAGES/swift.po +++ b/swift/locale/zh_CN/LC_MESSAGES/swift.po @@ -4,14 +4,12 @@ # # Translators: # Pearl Yajing Tan(Seagate Tech) , 2014 -# Lucas Palm , 2015. #zanata -# OpenStack Infra , 2015. #zanata -# Linda , 2016. #zanata +# Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.7.1.dev28\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-04-07 22:18+0000\n" +"Project-Id-Version: swift 2.7.1.dev50\n" +"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" +"POT-Creation-Date: 2016-04-17 21:20+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" diff --git a/swift/locale/zh_TW/LC_MESSAGES/swift.po b/swift/locale/zh_TW/LC_MESSAGES/swift.po index 0565e364c1..b9f27fbe93 100644 --- a/swift/locale/zh_TW/LC_MESSAGES/swift.po +++ b/swift/locale/zh_TW/LC_MESSAGES/swift.po @@ -3,14 +3,12 @@ # This file is distributed under the same license as the swift project. # # Translators: -# Lucas Palm , 2015. #zanata -# OpenStack Infra , 2015. #zanata -# Jennifer , 2016. #zanata +# Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.7.1.dev28\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2016-04-07 22:18+0000\n" +"Project-Id-Version: swift 2.7.1.dev50\n" +"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" +"POT-Creation-Date: 2016-04-17 21:20+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" From 0da9da513101c172d8923469e40f9c15b3518e73 Mon Sep 17 00:00:00 2001 From: Andy McCrae Date: Thu, 3 Mar 2016 11:14:39 +0000 Subject: [PATCH 095/141] Allow fallocate_reserve to be a percentage Add the ability to set the fallocate_reserve value as a percentage. This happens automatically when adding the '%' at the end of the value. Having the ability to set a % of free space rather than a byte value is useful especially when drive sizes are heterogenous. The default for fallocate_reserve has been adjusted to 1%, having the fallocate_reserve set seems sensible for all deploys and percentages are far safer to default than byte values (across drives of any size). Tests added for using fallocate_reserve as a percentage. Duplicate tests for fallocate_reserve have been removed. Docs updated to reflect the fallocate_reserve change. Change-Id: I4aea613a708205c917e81d6b2861396655e73238 --- doc/manpages/account-server.conf.5 | 6 +- doc/manpages/container-server.conf.5 | 6 +- doc/manpages/object-server.conf.5 | 6 +- doc/source/deployment_guide.rst | 48 +++++---- etc/account-server.conf-sample | 7 +- etc/container-server.conf-sample | 7 +- etc/object-server.conf-sample | 7 +- swift/common/daemon.py | 5 +- swift/common/utils.py | 26 ++++- swift/common/wsgi.py | 6 +- test/unit/common/test_utils.py | 153 +++++++++++++++++++++++---- 11 files changed, 215 insertions(+), 62 deletions(-) diff --git a/doc/manpages/account-server.conf.5 b/doc/manpages/account-server.conf.5 index 4a7e8c597e..62ee40f186 100644 --- a/doc/manpages/account-server.conf.5 +++ b/doc/manpages/account-server.conf.5 @@ -121,8 +121,10 @@ The default is false. .IP \fBeventlet_debug\fR Debug mode for eventlet library. The default is false. .IP \fBfallocate_reserve\fR -You can set fallocate_reserve to the number of bytes you'd like fallocate to -reserve, whether there is space for the given file size or not. The default is 0. +You can set fallocate_reserve to the number of bytes or percentage of disk +space you'd like fallocate to reserve, whether there is space for the given +file size or not. Percentage will be used if the value ends with a '%'. +The default is 1%. .RE .PD diff --git a/doc/manpages/container-server.conf.5 b/doc/manpages/container-server.conf.5 index 970fa18f2c..fa7b8a6d55 100644 --- a/doc/manpages/container-server.conf.5 +++ b/doc/manpages/container-server.conf.5 @@ -127,8 +127,10 @@ The default is false. .IP \fBeventlet_debug\fR Debug mode for eventlet library. The default is false. .IP \fBfallocate_reserve\fR -You can set fallocate_reserve to the number of bytes you'd like fallocate to -reserve, whether there is space for the given file size or not. The default is 0. +You can set fallocate_reserve to the number of bytes or percentage of disk +space you'd like fallocate to reserve, whether there is space for the given +file size or not. Percentage will be used if the value ends with a '%'. +The default is 1%. .RE .PD diff --git a/doc/manpages/object-server.conf.5 b/doc/manpages/object-server.conf.5 index 2e58de32fb..2c5b8fb327 100644 --- a/doc/manpages/object-server.conf.5 +++ b/doc/manpages/object-server.conf.5 @@ -126,8 +126,10 @@ The default is empty. .IP \fBeventlet_debug\fR Debug mode for eventlet library. The default is false. .IP \fBfallocate_reserve\fR -You can set fallocate_reserve to the number of bytes you'd like fallocate to -reserve, whether there is space for the given file size or not. The default is 0. +You can set fallocate_reserve to the number of bytes or percentage of disk +space you'd like fallocate to reserve, whether there is space for the given +file size or not. Percentage will be used if the value ends with a '%'. +The default is 1%. .IP \fBnode_timeout\fR Request timeout to external services. The default is 3 seconds. .IP \fBconn_timeout\fR diff --git a/doc/source/deployment_guide.rst b/doc/source/deployment_guide.rst index 083a298578..a19f2c2bc5 100644 --- a/doc/source/deployment_guide.rst +++ b/doc/source/deployment_guide.rst @@ -489,12 +489,14 @@ log_statsd_sample_rate_factor 1.0 log_statsd_metric_prefix eventlet_debug false If true, turn on debug logging for eventlet -fallocate_reserve 0 You can set fallocate_reserve to the - number of bytes you'd like fallocate to - reserve, whether there is space for the - given file size or not. This is useful for - systems that behave badly when they - completely run out of space; you can +fallocate_reserve 1% You can set fallocate_reserve to the + number of bytes or percentage of disk + space you'd like fallocate to reserve, + whether there is space for the given + file size or not. Percentage will be used + if the value ends with a '%'. This is + useful for systems that behave badly when + they completely run out of space; you can make the services pretend they're out of space early. conn_timeout 0.5 Time to wait while attempting to connect @@ -809,13 +811,16 @@ log_statsd_default_sample_rate 1.0 log_statsd_sample_rate_factor 1.0 log_statsd_metric_prefix eventlet_debug false If true, turn on debug logging for eventlet -fallocate_reserve 0 You can set fallocate_reserve to the number of - bytes you'd like fallocate to reserve, whether - there is space for the given file size or not. - This is useful for systems that behave badly - when they completely run out of space; you can - make the services pretend they're out of space - early. +fallocate_reserve 1% You can set fallocate_reserve to the + number of bytes or percentage of disk + space you'd like fallocate to reserve, + whether there is space for the given + file size or not. Percentage will be used + if the value ends with a '%'. This is + useful for systems that behave badly when + they completely run out of space; you can + make the services pretend they're out of + space early. db_preallocation off If you don't mind the extra disk space usage in overhead, you can turn this on to preallocate disk space with SQLite databases to decrease @@ -1024,13 +1029,16 @@ log_statsd_default_sample_rate 1.0 log_statsd_sample_rate_factor 1.0 log_statsd_metric_prefix eventlet_debug false If true, turn on debug logging for eventlet -fallocate_reserve 0 You can set fallocate_reserve to the number of - bytes you'd like fallocate to reserve, whether - there is space for the given file size or not. - This is useful for systems that behave badly - when they completely run out of space; you can - make the services pretend they're out of space - early. +fallocate_reserve 1% You can set fallocate_reserve to the + number of bytes or percentage of disk + space you'd like fallocate to reserve, + whether there is space for the given + file size or not. Percentage will be used + if the value ends with a '%'. This is + useful for systems that behave badly when + they completely run out of space; you can + make the services pretend they're out of + space early. =============================== ========== ============================================= [account-server] diff --git a/etc/account-server.conf-sample b/etc/account-server.conf-sample index 9fa98c6f20..3471747331 100644 --- a/etc/account-server.conf-sample +++ b/etc/account-server.conf-sample @@ -47,9 +47,10 @@ bind_port = 6002 # # eventlet_debug = false # -# You can set fallocate_reserve to the number of bytes you'd like fallocate to -# reserve, whether there is space for the given file size or not. -# fallocate_reserve = 0 +# You can set fallocate_reserve to the number of bytes or percentage of disk +# space you'd like fallocate to reserve, whether there is space for the given +# file size or not. Percentage will be used if the value ends with a '%'. +# fallocate_reserve = 1% [pipeline:main] pipeline = healthcheck recon account-server diff --git a/etc/container-server.conf-sample b/etc/container-server.conf-sample index 5927f5e230..17e37c5b78 100644 --- a/etc/container-server.conf-sample +++ b/etc/container-server.conf-sample @@ -53,9 +53,10 @@ bind_port = 6001 # # eventlet_debug = false # -# You can set fallocate_reserve to the number of bytes you'd like fallocate to -# reserve, whether there is space for the given file size or not. -# fallocate_reserve = 0 +# You can set fallocate_reserve to the number of bytes or percentage of disk +# space you'd like fallocate to reserve, whether there is space for the given +# file size or not. Percentage will be used if the value ends with a '%'. +# fallocate_reserve = 1% [pipeline:main] pipeline = healthcheck recon container-server diff --git a/etc/object-server.conf-sample b/etc/object-server.conf-sample index e01193bff2..528e293d4a 100644 --- a/etc/object-server.conf-sample +++ b/etc/object-server.conf-sample @@ -52,9 +52,10 @@ bind_port = 6000 # # eventlet_debug = false # -# You can set fallocate_reserve to the number of bytes you'd like fallocate to -# reserve, whether there is space for the given file size or not. -# fallocate_reserve = 0 +# You can set fallocate_reserve to the number of bytes or percentage of disk +# space you'd like fallocate to reserve, whether there is space for the given +# file size or not. Percentage will be used if the value ends with a '%'. +# fallocate_reserve = 1% # # Time to wait while attempting to connect to another backend node. # conn_timeout = 0.5 diff --git a/swift/common/daemon.py b/swift/common/daemon.py index 7b2ea93c03..a5d415638f 100644 --- a/swift/common/daemon.py +++ b/swift/common/daemon.py @@ -92,9 +92,8 @@ def run_daemon(klass, conf_file, section_name='', once=False, **kwargs): if utils.config_true_value(conf.get('disable_fallocate', 'no')): utils.disable_fallocate() # set utils.FALLOCATE_RESERVE if desired - reserve = int(conf.get('fallocate_reserve', 0)) - if reserve > 0: - utils.FALLOCATE_RESERVE = reserve + utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ + utils.config_fallocate_value(conf.get('fallocate_reserve', '1%')) # By default, disable eventlet printing stacktraces eventlet_debug = utils.config_true_value(conf.get('eventlet_debug', 'no')) diff --git a/swift/common/utils.py b/swift/common/utils.py index fb4baa4396..b0376d8bcf 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -97,6 +97,9 @@ _libc_accept = None # If set to non-zero, fallocate routines will fail based on free space # available being at or below this amount, in bytes. FALLOCATE_RESERVE = 0 +# Indicates if FALLOCATE_RESERVE is the percentage of free space (True) or +# the number of bytes (False). +FALLOCATE_IS_PERCENT = False # Used by hash_path to offer a bit more security when generating hashes for # paths. It simply appends this value to all paths; guessing the hash a path @@ -453,6 +456,25 @@ def get_trans_id_time(trans_id): return None +def config_fallocate_value(reserve_value): + """ + Returns fallocate reserve_value as an int or float. + Returns is_percent as a boolean. + Returns a ValueError on invalid fallocate value. + """ + try: + if str(reserve_value[-1:]) == '%': + reserve_value = float(reserve_value[:-1]) + is_percent = True + else: + reserve_value = int(reserve_value) + is_percent = False + except ValueError: + raise ValueError('Error: %s is an invalid value for fallocate' + '_reserve.' % reserve_value) + return reserve_value, is_percent + + class FileLikeIter(object): def __init__(self, iterable): @@ -596,7 +618,9 @@ class FallocateWrapper(object): if FALLOCATE_RESERVE > 0: st = os.fstatvfs(fd) free = st.f_frsize * st.f_bavail - length.value - if free <= FALLOCATE_RESERVE: + if FALLOCATE_IS_PERCENT: + free = (float(free) / float(st.f_frsize * st.f_blocks)) * 100 + if float(free) <= float(FALLOCATE_RESERVE): raise OSError( errno.ENOSPC, 'FALLOCATE_RESERVE fail %s <= %s' % (free, diff --git a/swift/common/wsgi.py b/swift/common/wsgi.py index 2c169eb2a6..6365afb552 100644 --- a/swift/common/wsgi.py +++ b/swift/common/wsgi.py @@ -897,9 +897,9 @@ def run_wsgi(conf_path, app_section, *args, **kwargs): loadapp(conf_path, global_conf=global_conf) # set utils.FALLOCATE_RESERVE if desired - reserve = int(conf.get('fallocate_reserve', 0)) - if reserve > 0: - utils.FALLOCATE_RESERVE = reserve + utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ + utils.config_fallocate_value(conf.get('fallocate_reserve', '1%')) + # redirect errors to logger and close stdio capture_stdio(logger) diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index 626043de3d..ca92d7f58f 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -2583,6 +2583,7 @@ cluster_dfw1 = http://dfw1.host/v1/ class StatVFS(object): f_frsize = 1024 f_bavail = 1 + f_blocks = 100 def fstatvfs(fd): return StatVFS() @@ -2593,17 +2594,20 @@ cluster_dfw1 = http://dfw1.host/v1/ fallocate = utils.FallocateWrapper(noop=True) utils.os.fstatvfs = fstatvfs # Want 1023 reserved, have 1024 * 1 free, so succeeds - utils.FALLOCATE_RESERVE = 1023 + utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ + utils.config_fallocate_value('1023') StatVFS.f_frsize = 1024 StatVFS.f_bavail = 1 self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0) # Want 1023 reserved, have 512 * 2 free, so succeeds - utils.FALLOCATE_RESERVE = 1023 + utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ + utils.config_fallocate_value('1023') StatVFS.f_frsize = 512 StatVFS.f_bavail = 2 self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0) # Want 1024 reserved, have 1024 * 1 free, so fails - utils.FALLOCATE_RESERVE = 1024 + utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ + utils.config_fallocate_value('1024') StatVFS.f_frsize = 1024 StatVFS.f_bavail = 1 exc = None @@ -2615,7 +2619,8 @@ cluster_dfw1 = http://dfw1.host/v1/ '[Errno 28] FALLOCATE_RESERVE fail 1024 <= 1024') self.assertEqual(err.errno, errno.ENOSPC) # Want 1024 reserved, have 512 * 2 free, so fails - utils.FALLOCATE_RESERVE = 1024 + utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ + utils.config_fallocate_value('1024') StatVFS.f_frsize = 512 StatVFS.f_bavail = 2 exc = None @@ -2627,7 +2632,8 @@ cluster_dfw1 = http://dfw1.host/v1/ '[Errno 28] FALLOCATE_RESERVE fail 1024 <= 1024') self.assertEqual(err.errno, errno.ENOSPC) # Want 2048 reserved, have 1024 * 1 free, so fails - utils.FALLOCATE_RESERVE = 2048 + utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ + utils.config_fallocate_value('2048') StatVFS.f_frsize = 1024 StatVFS.f_bavail = 1 exc = None @@ -2639,7 +2645,8 @@ cluster_dfw1 = http://dfw1.host/v1/ '[Errno 28] FALLOCATE_RESERVE fail 1024 <= 2048') self.assertEqual(err.errno, errno.ENOSPC) # Want 2048 reserved, have 512 * 2 free, so fails - utils.FALLOCATE_RESERVE = 2048 + utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ + utils.config_fallocate_value('2048') StatVFS.f_frsize = 512 StatVFS.f_bavail = 2 exc = None @@ -2652,7 +2659,8 @@ cluster_dfw1 = http://dfw1.host/v1/ self.assertEqual(err.errno, errno.ENOSPC) # Want 1023 reserved, have 1024 * 1 free, but file size is 1, so # fails - utils.FALLOCATE_RESERVE = 1023 + utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ + utils.config_fallocate_value('1023') StatVFS.f_frsize = 1024 StatVFS.f_bavail = 1 exc = None @@ -2665,28 +2673,95 @@ cluster_dfw1 = http://dfw1.host/v1/ self.assertEqual(err.errno, errno.ENOSPC) # Want 1022 reserved, have 1024 * 1 free, and file size is 1, so # succeeds - utils.FALLOCATE_RESERVE = 1022 + utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ + utils.config_fallocate_value('1022') StatVFS.f_frsize = 1024 StatVFS.f_bavail = 1 self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(1)), 0) - # Want 1023 reserved, have 1024 * 1 free, and file size is 0, so - # succeeds - utils.FALLOCATE_RESERVE = 1023 - StatVFS.f_frsize = 1024 - StatVFS.f_bavail = 1 - self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0) - # Want 1024 reserved, have 1024 * 1 free, and even though - # file size is 0, since we're under the reserve, fails - utils.FALLOCATE_RESERVE = 1024 - StatVFS.f_frsize = 1024 - StatVFS.f_bavail = 1 + # Want 1% reserved, have 100 bytes * 2/100 free, and file size is + # 99, so succeeds + utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ + utils.config_fallocate_value('1%') + StatVFS.f_frsize = 100 + StatVFS.f_bavail = 2 + StatVFS.f_blocks = 100 + self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(99)), 0) + # Want 2% reserved, have 50 bytes * 2/50 free, and file size is 49, + # so succeeds + utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ + utils.config_fallocate_value('2%') + StatVFS.f_frsize = 50 + StatVFS.f_bavail = 2 + StatVFS.f_blocks = 50 + self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(49)), 0) + # Want 100% reserved, have 100 * 100/100 free, and file size is 0, + # so fails. + utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ + utils.config_fallocate_value('100%') + StatVFS.f_frsize = 100 + StatVFS.f_bavail = 100 + StatVFS.f_blocks = 100 exc = None try: fallocate(0, 1, 0, ctypes.c_uint64(0)) except OSError as err: exc = err self.assertEqual(str(exc), - '[Errno 28] FALLOCATE_RESERVE fail 1024 <= 1024') + '[Errno 28] FALLOCATE_RESERVE fail 100.0 <= ' + '100.0') + self.assertEqual(err.errno, errno.ENOSPC) + # Want 1% reserved, have 100 * 2/100 free, and file size is 101, + # so fails. + utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ + utils.config_fallocate_value('1%') + StatVFS.f_frsize = 100 + StatVFS.f_bavail = 2 + StatVFS.f_blocks = 100 + exc = None + try: + fallocate(0, 1, 0, ctypes.c_uint64(101)) + except OSError as err: + exc = err + self.assertEqual(str(exc), + '[Errno 28] FALLOCATE_RESERVE fail 0.99 <= 1.0') + self.assertEqual(err.errno, errno.ENOSPC) + # Want 98% reserved, have 100 bytes * 99/100 free, and file size + # is 100, so fails + utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ + utils.config_fallocate_value('98%') + StatVFS.f_frsize = 100 + StatVFS.f_bavail = 99 + StatVFS.f_blocks = 100 + exc = None + try: + fallocate(0, 1, 0, ctypes.c_uint64(100)) + except OSError as err: + exc = err + self.assertEqual(str(exc), + '[Errno 28] FALLOCATE_RESERVE fail 98.0 <= 98.0') + self.assertEqual(err.errno, errno.ENOSPC) + # Want 2% reserved, have 1000 bytes * 21/1000 free, and file size + # is 999, so succeeds. + utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ + utils.config_fallocate_value('2%') + StatVFS.f_frsize = 1000 + StatVFS.f_bavail = 21 + StatVFS.f_blocks = 1000 + self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(999)), 0) + # Want 2% resereved, have 1000 bytes * 21/1000 free, and file size + # is 1000, so fails. + utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ + utils.config_fallocate_value('2%') + StatVFS.f_frsize = 1000 + StatVFS.f_bavail = 21 + StatVFS.f_blocks = 1000 + exc = None + try: + fallocate(0, 1, 0, ctypes.c_uint64(1000)) + except OSError as err: + exc = err + self.assertEqual(str(exc), + '[Errno 28] FALLOCATE_RESERVE fail 2.0 <= 2.0') self.assertEqual(err.errno, errno.ENOSPC) finally: utils.FALLOCATE_RESERVE = orig_FALLOCATE_RESERVE @@ -2767,6 +2842,44 @@ cluster_dfw1 = http://dfw1.host/v1/ ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-almostright') self.assertEqual(ts, None) + def test_config_fallocate_value(self): + fallocate_value, is_percent = utils.config_fallocate_value('10%') + self.assertEqual(fallocate_value, 10) + self.assertTrue(is_percent) + fallocate_value, is_percent = utils.config_fallocate_value('10') + self.assertEqual(fallocate_value, 10) + self.assertFalse(is_percent) + try: + fallocate_value, is_percent = utils.config_fallocate_value('ab%') + except ValueError as err: + exc = err + self.assertEqual(str(exc), 'Error: ab% is an invalid value for ' + 'fallocate_reserve.') + try: + fallocate_value, is_percent = utils.config_fallocate_value('ab') + except ValueError as err: + exc = err + self.assertEqual(str(exc), 'Error: ab is an invalid value for ' + 'fallocate_reserve.') + try: + fallocate_value, is_percent = utils.config_fallocate_value('1%%') + except ValueError as err: + exc = err + self.assertEqual(str(exc), 'Error: 1%% is an invalid value for ' + 'fallocate_reserve.') + try: + fallocate_value, is_percent = utils.config_fallocate_value('10.0') + except ValueError as err: + exc = err + self.assertEqual(str(exc), 'Error: 10.0 is an invalid value for ' + 'fallocate_reserve.') + fallocate_value, is_percent = utils.config_fallocate_value('10.5%') + self.assertEqual(fallocate_value, 10.5) + self.assertTrue(is_percent) + fallocate_value, is_percent = utils.config_fallocate_value('10.000%') + self.assertEqual(fallocate_value, 10.000) + self.assertTrue(is_percent) + def test_tpool_reraise(self): with patch.object(utils.tpool, 'execute', lambda f: f()): self.assertTrue( From 1a18079a15c2630e602c7c156a30e10e5f884d4b Mon Sep 17 00:00:00 2001 From: "Denis V. Meltsaykin" Date: Thu, 7 Apr 2016 23:07:13 +0300 Subject: [PATCH 096/141] Change pids' type to set Previously, the pids type was list(), which raised a ValueError in case if pid was not in pids during the pids.remove(pid) call. The fix changes the type of pids to set() and substitutes remove() with discard(), which doesn't raise an Exception if a value is not in the set. Change-Id: I8d41cb2a8ec14d29e5c7411ddfe48ae7a41deb45 Closes-Bug: #1567638 --- swift/obj/auditor.py | 16 ++++++++-------- test/unit/obj/test_auditor.py | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 8 deletions(-) diff --git a/swift/obj/auditor.py b/swift/obj/auditor.py index 4c4d24391c..88214ccacf 100644 --- a/swift/obj/auditor.py +++ b/swift/obj/auditor.py @@ -330,13 +330,13 @@ class ObjectAuditor(Daemon): kwargs['zero_byte_fps'] = zbo_fps self.run_audit(**kwargs) else: - pids = [] + pids = set() if self.conf_zero_byte_fps: zbf_pid = self.fork_child(zero_byte_fps=True, **kwargs) - pids.append(zbf_pid) + pids.add(zbf_pid) if self.concurrency == 1: # Audit all devices in 1 process - pids.append(self.fork_child(**kwargs)) + pids.add(self.fork_child(**kwargs)) else: # Divide devices amongst parallel processes set by # self.concurrency. Total number of parallel processes @@ -350,7 +350,7 @@ class ObjectAuditor(Daemon): pid = None if len(pids) == parallel_proc: pid = os.wait()[0] - pids.remove(pid) + pids.discard(pid) if self.conf_zero_byte_fps and pid == zbf_pid and once: # If we're only running one pass and the ZBF scanner @@ -363,10 +363,10 @@ class ObjectAuditor(Daemon): # sleep between ZBF scanner forks self._sleep() zbf_pid = self.fork_child(zero_byte_fps=True, **kwargs) - pids.append(zbf_pid) + pids.add(zbf_pid) else: kwargs['device_dirs'] = [device_list.pop()] - pids.append(self.fork_child(**kwargs)) + pids.add(self.fork_child(**kwargs)) while pids: pid = os.wait()[0] # ZBF scanner must be restarted as soon as it finishes @@ -377,8 +377,8 @@ class ObjectAuditor(Daemon): # sleep between ZBF scanner forks self._sleep() zbf_pid = self.fork_child(zero_byte_fps=True, **kwargs) - pids.append(zbf_pid) - pids.remove(pid) + pids.add(zbf_pid) + pids.discard(pid) def run_forever(self, *args, **kwargs): """Run the object audit until stopped.""" diff --git a/test/unit/obj/test_auditor.py b/test/unit/obj/test_auditor.py index db37e49ab1..78aa08a246 100644 --- a/test/unit/obj/test_auditor.py +++ b/test/unit/obj/test_auditor.py @@ -988,6 +988,39 @@ class TestAuditor(unittest.TestCase): self.assertEqual(sorted(forked_pids), [2, 1001, 1003, 1005, 1007]) + def test_run_parallel_audit_once_failed_fork(self): + my_auditor = auditor.ObjectAuditor( + dict(devices=self.devices, mount_check='false', + concurrency=2)) + + start_pid = [1001] + outstanding_pids = [] + failed_once = [False] + + def failing_fork(**kwargs): + # this fork fails only on the 2nd call + # it's enough to cause the growth of orphaned child processes + if len(outstanding_pids) > 0 and not failed_once[0]: + failed_once[0] = True + raise OSError + start_pid[0] += 2 + pid = start_pid[0] + outstanding_pids.append(pid) + return pid + + def fake_wait(): + return outstanding_pids.pop(0), 0 + + with mock.patch("swift.obj.auditor.os.wait", fake_wait), \ + mock.patch.object(my_auditor, 'fork_child', failing_fork), \ + mock.patch.object(my_auditor, '_sleep', lambda *a: None): + for i in range(3): + my_auditor.run_once() + + self.assertEqual(len(outstanding_pids), 0, + "orphaned children left {0}, expected 0." + .format(outstanding_pids)) + if __name__ == '__main__': unittest.main() From 5e420efc728b9f02a3f15eb312ebfa29e2e221ff Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Thu, 7 Jan 2016 15:46:17 -0800 Subject: [PATCH 097/141] Fix versioned_writes functional test skipping Previously, if object versioning was enabled via the old-style allow_versions container-server setting rather than the new-style allow_versioned_writes proxy-server setting, TestCrossPolicyObjectVersioning would skip tests while TestObjectVersioning and TestObjectVersioningUTF8 would run them. Additionally, if versioned_writes was explicitly included in the proxy-server's pipeline and allow_versioned_writes was disabled, the functional tests would fail with a 412. Now, all three will use the same logic to check whether versioning is enabled. Specifically, they will all try to set an X-Versions-Location header and skip if it doesn't stick. Additionally, the TestCrossPolicyObjectVersioningEnv will now properly clean up after itself. Change-Id: I4c788a0e18587ff17d3c6e346fd22b881495f06d --- test/functional/tests.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/test/functional/tests.py b/test/functional/tests.py index c55133eb70..01249d2305 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -3213,6 +3213,9 @@ class TestObjectVersioningEnv(object): cls.container = cls.account.container(prefix + "-objs") if not cls.container.create( hdrs={'X-Versions-Location': cls.versions_container.name}): + if cls.conn.response.status == 412: + cls.versioning_enabled = False + return raise ResponseError(cls.conn.response) container_info = cls.container.info() @@ -3265,14 +3268,12 @@ class TestCrossPolicyObjectVersioningEnv(object): cls.multiple_policies_enabled = True else: cls.multiple_policies_enabled = False - cls.versioning_enabled = False + cls.versioning_enabled = True + # We don't actually know the state of versioning, but without + # multiple policies the tests should be skipped anyway. Claiming + # versioning support lets us report the right reason for skipping. return - if cls.versioning_enabled is None: - cls.versioning_enabled = 'versioned_writes' in cluster_info - if not cls.versioning_enabled: - return - policy = cls.policies.select() version_policy = cls.policies.exclude(name=policy['name']).select() @@ -3300,6 +3301,9 @@ class TestCrossPolicyObjectVersioningEnv(object): if not cls.container.create( hdrs={'X-Versions-Location': cls.versions_container.name, 'X-Storage-Policy': version_policy['name']}): + if cls.conn.response.status == 412: + cls.versioning_enabled = False + return raise ResponseError(cls.conn.response) container_info = cls.container.info() @@ -3325,6 +3329,11 @@ class TestCrossPolicyObjectVersioningEnv(object): cls.storage_url3, cls.storage_token3 = cls.conn3.authenticate() cls.account3 = cls.conn3.get_account() + @classmethod + def tearDown(cls): + cls.account.delete_containers() + cls.account2.delete_containers() + class TestObjectVersioning(Base): env = TestObjectVersioningEnv From 29544a9e175b1ec9e0dbfd5288edc00a1402d5ca Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Wed, 27 Apr 2016 16:59:00 -0500 Subject: [PATCH 098/141] Use smaller quorum size in proxy for even numbers of replicas Requiring 2/2 backends for PUT requests means that the cluster can't tolerate a single failure. Likewise, if you have 4 replicas in 2 regions, requiring 3/4 on a POST request means you cannot POST with your inter-region link down or congested. This changes the (replication) quorum size in the proxy to be at least half the nodes instead of a majority of the nodes. Daemons that were looking for a majority remain unchanged. The container reconciler, replicator, and updater still require majorities so their functioning is unchanged. Odd numbers of replicas are unaffected by this commit. Change-Id: I3b07ff0222aba6293ad7d60afe1747acafbe6ce4 --- swift/common/utils.py | 6 +++- swift/container/reconciler.py | 10 +++---- swift/container/replicator.py | 6 ++-- swift/container/updater.py | 4 +-- test/unit/common/test_storage_policy.py | 4 +-- test/unit/common/test_utils.py | 14 +++++++-- test/unit/proxy/controllers/test_account.py | 30 +++++++++---------- test/unit/proxy/controllers/test_base.py | 9 ++++-- test/unit/proxy/controllers/test_container.py | 30 +++++++++---------- 9 files changed, 65 insertions(+), 48 deletions(-) diff --git a/swift/common/utils.py b/swift/common/utils.py index fb4baa4396..ce6f9d56da 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -2937,6 +2937,10 @@ def public(func): return func +def majority_size(n): + return (n // 2) + 1 + + def quorum_size(n): """ quorum size as it applies to services that use 'replication' for data @@ -2946,7 +2950,7 @@ def quorum_size(n): Number of successful backend requests needed for the proxy to consider the client request successful. """ - return (n // 2) + 1 + return (n + 1) // 2 def rsync_ip(ip): diff --git a/swift/container/reconciler.py b/swift/container/reconciler.py index efacfd2248..f167bed79d 100644 --- a/swift/container/reconciler.py +++ b/swift/container/reconciler.py @@ -25,7 +25,7 @@ from swift.common.direct_client import ( direct_head_container, direct_delete_container_object, direct_put_container_object, ClientException) from swift.common.internal_client import InternalClient, UnexpectedResponse -from swift.common.utils import get_logger, split_path, quorum_size, \ +from swift.common.utils import get_logger, split_path, majority_size, \ FileLikeIter, Timestamp, last_modified_date_to_timestamp, \ LRUCache, decode_timestamps @@ -194,7 +194,7 @@ def add_to_reconciler_queue(container_ring, account, container, obj, server :returns: .misplaced_object container name, False on failure. "Success" - means a quorum of containers got the update. + means a majority of containers got the update. """ container_name = get_reconciler_container_name(obj_timestamp) object_name = get_reconciler_obj_name(obj_policy_index, account, @@ -232,7 +232,7 @@ def add_to_reconciler_queue(container_ring, account, container, obj, response_timeout=response_timeout) successes = sum(pile) - if successes >= quorum_size(len(nodes)): + if successes >= majority_size(len(nodes)): return container_name else: return False @@ -289,7 +289,7 @@ def direct_get_container_policy_index(container_ring, account_name, :param container_ring: ring in which to look up the container locations :param account_name: name of the container's account :param container_name: name of the container - :returns: storage policy index, or None if it couldn't get a quorum + :returns: storage policy index, or None if it couldn't get a majority """ def _eat_client_exception(*args): try: @@ -307,7 +307,7 @@ def direct_get_container_policy_index(container_ring, account_name, container_name) headers = [x for x in pile if x is not None] - if len(headers) < quorum_size(len(nodes)): + if len(headers) < majority_size(len(nodes)): return return best_policy_index(headers) diff --git a/swift/container/replicator.py b/swift/container/replicator.py index b428086bdd..f7af1efe76 100644 --- a/swift/container/replicator.py +++ b/swift/container/replicator.py @@ -31,7 +31,7 @@ from swift.common.exceptions import DeviceUnavailable from swift.common.http import is_success from swift.common.db import DatabaseAlreadyExists from swift.common.utils import (Timestamp, hash_path, - storage_directory, quorum_size) + storage_directory, majority_size) class ContainerReplicator(db_replicator.Replicator): @@ -202,9 +202,9 @@ class ContainerReplicator(db_replicator.Replicator): broker.update_reconciler_sync(info['max_row']) return max_sync = self.dump_to_reconciler(broker, point) - success = responses.count(True) >= quorum_size(len(responses)) + success = responses.count(True) >= majority_size(len(responses)) if max_sync > point and success: - # to be safe, only slide up the sync point with a quorum on + # to be safe, only slide up the sync point with a majority on # replication broker.update_reconciler_sync(max_sync) diff --git a/swift/container/updater.py b/swift/container/updater.py index f2cd8f3328..fb6d741e60 100644 --- a/swift/container/updater.py +++ b/swift/container/updater.py @@ -31,7 +31,7 @@ from swift.common.bufferedhttp import http_connect from swift.common.exceptions import ConnectionTimeout from swift.common.ring import Ring from swift.common.utils import get_logger, config_true_value, ismount, \ - dump_recon_cache, quorum_size, Timestamp + dump_recon_cache, majority_size, Timestamp from swift.common.daemon import Daemon from swift.common.http import is_success, HTTP_INTERNAL_SERVER_ERROR @@ -238,7 +238,7 @@ class ContainerUpdater(Daemon): for event in events: if is_success(event.wait()): successes += 1 - if successes >= quorum_size(len(events)): + if successes >= majority_size(len(events)): self.logger.increment('successes') self.successes += 1 self.logger.debug( diff --git a/test/unit/common/test_storage_policy.py b/test/unit/common/test_storage_policy.py index 12a743f9ba..3fd721b732 100755 --- a/test/unit/common/test_storage_policy.py +++ b/test/unit/common/test_storage_policy.py @@ -1119,9 +1119,9 @@ class TestStoragePolicies(unittest.TestCase): def test_quorum_size_replication(self): expected_sizes = {1: 1, - 2: 2, + 2: 1, 3: 2, - 4: 3, + 4: 2, 5: 3} for n, expected in expected_sizes.items(): policy = StoragePolicy(0, 'zero', diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index 626043de3d..d237970941 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -2504,13 +2504,23 @@ cluster_dfw1 = http://dfw1.host/v1/ self.assertFalse(utils.streq_const_time('a', 'aaaaa')) self.assertFalse(utils.streq_const_time('ABC123', 'abc123')) - def test_replication_quorum_size(self): + def test_quorum_size(self): + expected_sizes = {1: 1, + 2: 1, + 3: 2, + 4: 2, + 5: 3} + got_sizes = dict([(n, utils.quorum_size(n)) + for n in expected_sizes]) + self.assertEqual(expected_sizes, got_sizes) + + def test_majority_size(self): expected_sizes = {1: 1, 2: 2, 3: 2, 4: 3, 5: 3} - got_sizes = dict([(n, utils.quorum_size(n)) + got_sizes = dict([(n, utils.majority_size(n)) for n in expected_sizes]) self.assertEqual(expected_sizes, got_sizes) diff --git a/test/unit/proxy/controllers/test_account.py b/test/unit/proxy/controllers/test_account.py index c7ef854d1b..d3dd9cf504 100644 --- a/test/unit/proxy/controllers/test_account.py +++ b/test/unit/proxy/controllers/test_account.py @@ -320,16 +320,16 @@ class TestAccountController4Replicas(TestAccountController): ((201, 201, 201, 201), 201), ((201, 201, 201, 404), 201), ((201, 201, 201, 503), 201), - ((201, 201, 404, 404), 503), - ((201, 201, 404, 503), 503), - ((201, 201, 503, 503), 503), + ((201, 201, 404, 404), 201), + ((201, 201, 404, 503), 201), + ((201, 201, 503, 503), 201), ((201, 404, 404, 404), 404), - ((201, 404, 404, 503), 503), + ((201, 404, 404, 503), 404), ((201, 404, 503, 503), 503), ((201, 503, 503, 503), 503), ((404, 404, 404, 404), 404), ((404, 404, 404, 503), 404), - ((404, 404, 503, 503), 503), + ((404, 404, 503, 503), 404), ((404, 503, 503, 503), 503), ((503, 503, 503, 503), 503) ] @@ -340,16 +340,16 @@ class TestAccountController4Replicas(TestAccountController): ((204, 204, 204, 204), 204), ((204, 204, 204, 404), 204), ((204, 204, 204, 503), 204), - ((204, 204, 404, 404), 503), - ((204, 204, 404, 503), 503), - ((204, 204, 503, 503), 503), + ((204, 204, 404, 404), 204), + ((204, 204, 404, 503), 204), + ((204, 204, 503, 503), 204), ((204, 404, 404, 404), 404), - ((204, 404, 404, 503), 503), + ((204, 404, 404, 503), 404), ((204, 404, 503, 503), 503), ((204, 503, 503, 503), 503), ((404, 404, 404, 404), 404), ((404, 404, 404, 503), 404), - ((404, 404, 503, 503), 503), + ((404, 404, 503, 503), 404), ((404, 503, 503, 503), 503), ((503, 503, 503, 503), 503) ] @@ -360,16 +360,16 @@ class TestAccountController4Replicas(TestAccountController): ((204, 204, 204, 204), 204), ((204, 204, 204, 404), 204), ((204, 204, 204, 503), 204), - ((204, 204, 404, 404), 503), - ((204, 204, 404, 503), 503), - ((204, 204, 503, 503), 503), + ((204, 204, 404, 404), 204), + ((204, 204, 404, 503), 204), + ((204, 204, 503, 503), 204), ((204, 404, 404, 404), 404), - ((204, 404, 404, 503), 503), + ((204, 404, 404, 503), 404), ((204, 404, 503, 503), 503), ((204, 503, 503, 503), 503), ((404, 404, 404, 404), 404), ((404, 404, 404, 503), 404), - ((404, 404, 503, 503), 503), + ((404, 404, 503, 503), 404), ((404, 503, 503, 503), 503), ((503, 503, 503, 503), 503) ] diff --git a/test/unit/proxy/controllers/test_base.py b/test/unit/proxy/controllers/test_base.py index 330250e2c9..0715701815 100644 --- a/test/unit/proxy/controllers/test_base.py +++ b/test/unit/proxy/controllers/test_base.py @@ -662,12 +662,15 @@ class TestFuncs(unittest.TestCase): base = Controller(self.app) # just throw a bunch of test cases at it self.assertEqual(base.have_quorum([201, 404], 3), False) - self.assertEqual(base.have_quorum([201, 201], 4), False) - self.assertEqual(base.have_quorum([201, 201, 404, 404], 4), False) - self.assertEqual(base.have_quorum([201, 503, 503, 201], 4), False) + self.assertEqual(base.have_quorum([201, 201], 4), True) + self.assertEqual(base.have_quorum([201], 4), False) + self.assertEqual(base.have_quorum([201, 201, 404, 404], 4), True) + self.assertEqual(base.have_quorum([201, 302, 418, 503], 4), False) + self.assertEqual(base.have_quorum([201, 503, 503, 201], 4), True) self.assertEqual(base.have_quorum([201, 201], 3), True) self.assertEqual(base.have_quorum([404, 404], 3), True) self.assertEqual(base.have_quorum([201, 201], 2), True) + self.assertEqual(base.have_quorum([201, 404], 2), True) self.assertEqual(base.have_quorum([404, 404], 2), True) self.assertEqual(base.have_quorum([201, 404, 201, 201], 4), True) diff --git a/test/unit/proxy/controllers/test_container.py b/test/unit/proxy/controllers/test_container.py index 44e5fb5142..a95e058452 100644 --- a/test/unit/proxy/controllers/test_container.py +++ b/test/unit/proxy/controllers/test_container.py @@ -278,16 +278,16 @@ class TestContainerController4Replicas(TestContainerController): ((201, 201, 201, 201), 201), ((201, 201, 201, 404), 201), ((201, 201, 201, 503), 201), - ((201, 201, 404, 404), 503), - ((201, 201, 404, 503), 503), - ((201, 201, 503, 503), 503), + ((201, 201, 404, 404), 201), + ((201, 201, 404, 503), 201), + ((201, 201, 503, 503), 201), ((201, 404, 404, 404), 404), - ((201, 404, 404, 503), 503), + ((201, 404, 404, 503), 404), ((201, 404, 503, 503), 503), ((201, 503, 503, 503), 503), ((404, 404, 404, 404), 404), ((404, 404, 404, 503), 404), - ((404, 404, 503, 503), 503), + ((404, 404, 503, 503), 404), ((404, 503, 503, 503), 503), ((503, 503, 503, 503), 503) ] @@ -298,16 +298,16 @@ class TestContainerController4Replicas(TestContainerController): ((204, 204, 204, 204), 204), ((204, 204, 204, 404), 204), ((204, 204, 204, 503), 204), - ((204, 204, 404, 404), 503), - ((204, 204, 404, 503), 503), - ((204, 204, 503, 503), 503), + ((204, 204, 404, 404), 204), + ((204, 204, 404, 503), 204), + ((204, 204, 503, 503), 204), ((204, 404, 404, 404), 404), - ((204, 404, 404, 503), 503), + ((204, 404, 404, 503), 404), ((204, 404, 503, 503), 503), ((204, 503, 503, 503), 503), ((404, 404, 404, 404), 404), ((404, 404, 404, 503), 404), - ((404, 404, 503, 503), 503), + ((404, 404, 503, 503), 404), ((404, 503, 503, 503), 503), ((503, 503, 503, 503), 503) ] @@ -318,16 +318,16 @@ class TestContainerController4Replicas(TestContainerController): ((204, 204, 204, 204), 204), ((204, 204, 204, 404), 204), ((204, 204, 204, 503), 204), - ((204, 204, 404, 404), 503), - ((204, 204, 404, 503), 503), - ((204, 204, 503, 503), 503), + ((204, 204, 404, 404), 204), + ((204, 204, 404, 503), 204), + ((204, 204, 503, 503), 204), ((204, 404, 404, 404), 404), - ((204, 404, 404, 503), 503), + ((204, 404, 404, 503), 404), ((204, 404, 503, 503), 503), ((204, 503, 503, 503), 503), ((404, 404, 404, 404), 404), ((404, 404, 404, 503), 404), - ((404, 404, 503, 503), 503), + ((404, 404, 503, 503), 404), ((404, 503, 503, 503), 503), ((503, 503, 503, 503), 503) ] From 00dd89fe6936e4c2da667b496a9996638c26196b Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Thu, 3 Mar 2016 06:08:12 +0000 Subject: [PATCH 099/141] adding review guidelines Change-Id: I61304856a4ecccbbf3aa06c30822494592a3b3d5 --- REVIEW_GUIDELINES.rst | 387 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 387 insertions(+) create mode 100644 REVIEW_GUIDELINES.rst diff --git a/REVIEW_GUIDELINES.rst b/REVIEW_GUIDELINES.rst new file mode 100644 index 0000000000..fe56c98e7c --- /dev/null +++ b/REVIEW_GUIDELINES.rst @@ -0,0 +1,387 @@ +Review Guidelines +================= + +Effective code review is a skill like any other professional skill you +develop with experience. Effective code review requires trust. No +one is perfect. Everyone makes mistakes. Trust builds over time. + +This document will enumerate behaviors commonly observed and +associated with competent reviews of changes purposed to the Swift +code base. No one is expected to "follow these steps". Guidelines +are not *rules*, not all behaviors will be relevant in all situations. + + +Checkout the Change +------------------- + +You will need to have a copy of the change in an environment where you +can freely edit and experiment with the code in order to provide a +non-superficial review. Superficial reviews are not terribly helpful. +Always try to be helpful. ;) + +Check out the change so that you may begin. + +Commonly, ``git review -d `` + +Run it +------ + +Imagine that you submit a patch to Swift, and a reviewer starts to +take a look at it. Your commit message on the patch claims that it +fixes a bug or adds a feature, but as soon as the reviewer downloads +it locally and tries to test it, a severe and obvious error shows up. +Something like a syntax error or a missing dependency. + +"Did you even run this?" is the review comment all contributors dread. + +Reviewers in particular need to be fearful merging changes that just +don't work - or at least fail in frequently common enough scenarios to +be considered "horribly broken". A comment in our review that says +roughly "I ran this on my machine and observed ``description of +behavior change is supposed to achieve``" is the most powerful defense +we have against the terrible terrible scorn from our fellow Swift +developers and operators when we accidentally merge bad code. + +If you're doing a fair amount of reviews - you will participate in +merging a change that will break my clusters - it's cool - I'll do it +to you at some point too (sorry about that). But when either of us go +look at the reviews to understand the process gap that allowed this to +happen - it better not be just because we were too lazy to check it out +and run it before it got merged. + +Or be warned, you may receive, the dreaded... + + "Did you even *run* this?" + +I'm sorry, I know it's rough. ;) + +Consider edge cases very seriously +---------------------------------- + + Saying "that should rarely happen" is the same as saying "that + *will* happen" + + -- Douglas Crockford + +Scale is an *amazingly* abusive partner. If you contribute changes to +Swift your code is running - in production - at scale - and your bugs +can not hide. I wish on all of us that our bugs may be exceptionally +rare - meaning they only happen in extremely unlikely edge cases. For +example, bad things that happen only 1 out of every 10K times an op is +performed will be discovered in minutes. Bad things that happen only +1 out of every one billion times something happens will be observed - +by multiple deployments - over the course of a release. Bad things +that happen 1/100 times some op is performed are considered "horribly +broken". Tests must exhaustively exercise possible scenarios. Every +system call and network connection will raise an error and timeout - +where will that Exception be caught? + +Run the tests +------------- + +Yes, I know Gerrit does this already. You can do it *too*. You might +not need to re-run *all* the tests on your machine - it depends on the +change. But, if you're not sure which will be most useful - running +all of them best - unit - functional - probe. If you can't reliably +get all tests passing in your development environment you will not be +able to do effective reviews. Whatever tests/suites you are able to +exercise/validate on your machine against your config you should +mention in your review comments so that other reviewers might choose +to do *other* testing locally when they have the change checked out. + +e.g. + + I went ahead and ran probe/test_object_metadata_replication.py on + my machine with both sync_method = rsync and sync_method = ssync - + that works for me - but I didn't try it with object_post_as_copy = + false + +Maintainable Code is Obvious +---------------------------- + +Style is an important component to review. The goal is maintainability. + +However, keep in mind that generally style, readability and +maintainability are orthogonal to the suitability of a change for +merge. A critical bug fix may be a well written pythonic masterpiece +of style - or it may be a hack-y ugly mess that will absolutely need +to be cleaned up at some point - but it absolutely should merge +because: CRITICAL. BUG. FIX. + +You should comment inline to praise code that is "obvious". You should +comment inline to highlight code that that you found to be "obfuscated". + +Unfortunately "readability" is often subjective. We should remember +that it's probably just our own personal preference. Rather than a +comment that says "You should use a list comprehension here" - rewrite +the code as a list comprehension, run the specific tests that hit the +relevant section to validate your code is correct, then leave a +comment that says: + + I find this more readable: + + ``diff with working tested code`` + +If the author (or another reviewer) agrees - it's possible the change will get +updated to include that improvement before it is merged; or it may happen in a +follow up. + +However, remember that style is non-material - it is useful to provide (via +diff) suggestions to improve maintainability as part of your review - but if +the suggestion is functionally equivalent - it is by definition optional. + +Commit Messages +--------------- + +Read the commit message thoroughly before you begin the review. + +Commit messages must answer the "why" and the "what for" - more so +than the "how" or "what it does". Commonly this will take the form of +a short description: + +- What is broken - without this change +- What is impossible to do with Swift - without this change +- What is slower/worse/harder - without this change + +If you're not able to discern why a change is being made or how it +would be used - you may have to ask for more details before you can +successfully review it. + +Commit messages need to have a high consistent quality. While many +things under source control can be fixed and improved in a follow up +change - commit messages are forever. Luckily it's easy to fix minor +mistakes using the in-line edit feature in Gerrit! If you can avoid +ever having to *ask* someone to change a commit message you will find +yourself an amazingly happier and more productive reviewer. + +Also commit messages should follow the OpenStack Commit Message +guidelines, including references to relevant impact tags or bug +numbers. You should hand out links to the OpenStack Commit Message +guidelines *liberally* via comments when fixing commit messages during +review. + +Here you go: `GitCommitMessages `_ + +New Tests +--------- + +New tests should be added for all code changes. Historically you +should expect good changes to have a diff line count ratio of at least +2:1 tests to code. Even if a change has to "fix" a lot of *existing* +tests, if a change does not include any *new* tests it probably should +not merge. + +If a change includes a good ratio of test changes and adds new tests - +you should say so in your review comments. + +If it does not - you should write some! + +... and offer them to the patch author as a diff indicating to them that +"something" like these tests I'm providing as an example will *need* to be +included in this change before it is suitable to merge. Bonus points if you +include suggestions for the author as to how they might improve or expanded on +the tests stubs you provide. + +Be *very* careful about asking an author to add a test for a "small change" +before attempting to do so yourself. It's quite possible there is a lack of +existing test infrastructure needed to develop a concise and clear test - the +author of a small change may not be the best person to introduce a large +amount of new test infrastructure. Also, most of the time remember it's +*harder* to write the test than the change - if the author is unable to +develop a test for their change on their own you may prevent a useful change +from being merged. At a minimum you should suggest a specific unit test that +you think they should be able to copy and modify to exercise the behavior in +their change. If you're not sure if such a test exists - replace their change +with an Exception and run tests until you find one that blows up. + +Documentation +------------- + +Most changes should include documentation. New functions and code +should have Docstrings. Tests should obviate new or changed behaviors +with descriptive and meaningful phrases. New features should include +changes to the documentation tree. New config options should be +documented in example configs. The commit message should document the +change for the change log. + +Always point out typos or grammar mistakes when you see them in +review, but also consider that if you were able to recognize the +intent of the statement - documentation with tpyos may be easier to +iterate and improve on than nothing. + +If a change does not have adequate documentation it may not be suitable to +merge. If a change includes incorrect or misleading documentation or is +contrary to *existing* documentation is probably is not suitable to merge. + +Every change could have better documentation. + +Like with tests, a patch isn't done until it has docs. Any patch that +adds a new feature, changes behavior, updates configs, or in any other +way is different than previous behavior requires docs. manpages, +sample configs, docstrings, descriptive prose in the source tree + +Reviewers Write Code +-------------------- + +Reviews have been shown to to provide many benefits - one of which is shared +ownership. After providing a positive review you should understand how the +change works. Doing this will probably require you to "play with" the change. + +You might functionally test the change in various scenarios. You may need to +write a new unittest to validate the change will degrade gracefully under +failure. You might have to write a script to exercise the change under some +superficial load. You might have to break the change and validate the new +tests fail and provide useful errors. You might have to step through some +critical section of the code in a debugger to understand when all the possible +branches are exercised in tests. + +When you're done with your review an artifact of your effort will be +observable in the piles of code and scripts and diffs you wrote while +reviewing. You should make sure to capture those artifacts in a paste +or gist and include them in your review comments so that others may +reference them. + +e.g. + + When I broke the change like this: + + ``diff`` + + it blew up like this: + + ``unittest failure`` + + +It's not uncommon that a review takes more time than writing a change - +hopefully the author also spent as much time as you did *validating* their +change but that's not really in your control. When you provide a positive +review you should be sure you understand the change - even seemingly trivial +changes will take time to consider the ramifications. + +Leave Comments +-------------- + +Leave. Lots. Of. Comments. + +A popular web comic has stated that +`WTFs/Minute `_ is the +*only* valid measurement of code quality. + +If something initially strikes you as questionable - you should jot +down a note so you can loop back around to it. + +However, because of the distributed nature of authors and reviewers +it's *imperative* that you try your best to answer your own questions +as part of your review. + +Do not say "Does this blow up if it gets called when xyz" - rather try +and find a test that specifically covers that condition and mention it +in the comment so others can find it more quickly. Of if you can find +no such test, add one to demonstrate the failure, and include a diff +in a comment. Hopefully you can say "I *thought* this would blow up, +so I wrote this test, but it seems fine." + +But if your initial reaction is "I don't understand this" or "How does +this even work?" you should notate it and explain whatever you *were* +able to figure out in order to help subsequent reviewers more quickly +identify and grok the subtle or complex issues. + +Because you will be leaving lots of comments - many of which are +potentially not highlighting anything specific - it is VERY important +to leave a good summary. Your summary should include details of how +you reviewed the change. You may include what you liked most, or +least. + +If you are leaving a negative score ideally you should provide clear +instructions on how the change could be modified such that it would be +suitable for merge - again diffs work best. + +Scoring +------- + +Scoring is subjective. Try to realize you're making a judgment call. + +A positive score means you believe Swift would be undeniably better +off with this code merged than it would be going one more second +without this change running in production immediately. It is indeed +high praise - you should be sure. + +A negative score means that to the best of your abilities you have not +been able to your satisfaction, to justify the value of a change +against the cost of it's deficiencies and risks. It is a surprisingly +difficult chore to be confident about the value of unproven code or a +not well understood use-case in an uncertain world, and unfortunately +all too easy with a **thorough** review to uncover our defects, and be +reminded of the risk of... regression. + +Reviewers must try *very* hard first and foremost to keep master stable. + +If you can demonstrate a change has an incorrect *behavior* it's +almost without exception that the change must be revised to fix the +defect *before* merging rather than letting it in and having to also +file a bug. + +Every commit must be deployable to production. + +Beyond that - almost any change might be merge-able depending on +it's merits! Here's some tips you might be able to use to find more +changes that should merge! + +#. Fixing bugs is HUGELY valuable - the *only* thing which has a + higher cost than the value of fixing a bug - is adding a new + bug - if it's broken and this change makes it fixed (with out + breaking anything else) you have a winner! + +#. Features are INCREDIBLY difficult to justify their value against + the cost of increased complexity, lowered maintainability, risk + of regression, or new defects. Try to focus on what is + *impossible* without the feature - when you make the impossible + possible things are better. Make things better. + +#. Purely test/doc changes, complex refactoring, or mechanical + cleanups are quite nuanced because there's less concrete + objective value. I've seen lots of these kind of changes + get lost to the backlog. I've also seen some success where + multiple authors have collaborated to "push-over" a change + rather than provide a "review" ultimately resulting in a + quorum of three or more "authors" who all agree there is a lot + of value in the change - however subjective. + +Because the bar is high - most reviews will end with a negative score. + +However, for non-material grievances (nits) - you should feel +confident in a positive review if the change is otherwise complete +correct and undeniably makes Swift better (not perfect, *better*). If +you see something worth fixing you should point it out in review +comments, but when applying a score consider if it *need* be fixed +before the change is suitable to merge vs. fixing it in a follow up +change? Consider if the change makes Swift so undeniably *better* +and it was deployed in production without making any additional +changes would it still be correct and complete? Would releasing the +change to production without any additional follow up make it more +difficult to maintain and continue to improve Swift? + +Endeavor to leave a positive or negative score on every change you review. + +Use your best judgment. + +A note on Swift Core Maintainers +================================ + +Swift Core maintainers may provide positive reviews scores that *look* +different from your reviews - a "+2" instead of a "+1" + +But it's *exactly the same* as your "+1" + +It means the change has been thoroughly and positively reviewed. The +only reason it's different is to help identify changes which have +received multiple competent and positive reviews. If you consistently +provide competent reviews you run a *VERY* high risk of being +approached to have your future positive review scores changed from a +"+1" to "+2" in order to make it easier to identify changes which need +to get merged. + +Ideally a review from a core maintainer should provide a clear path +forward for the patch author. If you don't know how to proceed +respond to the reviewers comments on the change and ask for help. +We'd love to try and help. From a1ed76f1cbb3dcd5b4253920278b8bc4f32978a2 Mon Sep 17 00:00:00 2001 From: ZhiQiang Fan Date: Thu, 28 Apr 2016 06:29:41 +0800 Subject: [PATCH 100/141] Fix doc build if git is absent When building packages if git is absent, then we should not set html_last_updated_fmt. It can still be set via the -D switch when building with sphinx-build. Change-Id: I5d0b6cc87f27a052d6d0265546c0d347f00c4bb8 Closes-Bug: #1552251 --- doc/source/conf.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index b5e0d6f071..ec93ed1b7e 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -31,6 +31,7 @@ import os from swift import __version__ import subprocess import sys +import warnings # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the @@ -160,8 +161,12 @@ modindex_common_prefix = ['swift.'] # html_last_updated_fmt = '%b %d, %Y' git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local", "-n1"] -html_last_updated_fmt = subprocess.Popen( - git_cmd, stdout=subprocess.PIPE).communicate()[0] +try: + html_last_updated_fmt = subprocess.Popen( + git_cmd, stdout=subprocess.PIPE).communicate()[0] +except OSError: + warnings.warn('Cannot get last updated time from git repository. ' + 'Not setting "html_last_updated_fmt".') # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. From 9d6a055b31f267e2380d0470021cac44cdf36203 Mon Sep 17 00:00:00 2001 From: Christian Schwede Date: Thu, 28 Apr 2016 12:06:24 -0500 Subject: [PATCH 101/141] Remove threads_per_disk setting This patch removes the threads_per_disk setting. It was already a deprecated setting and by default set to 0, which effectively meant to not use a per-disk thread pool at all. Users are encouraged to use servers_per_port instead. DocImpact Change-Id: Ie76be5c8a74d60a1330627caace19e06d1b9383c --- doc/manpages/object-server.conf.5 | 7 +++---- doc/source/deployment_guide.rst | 14 -------------- etc/object-server.conf-sample | 11 +++-------- swift/common/manager.py | 2 +- swift/obj/diskfile.py | 4 +--- 5 files changed, 8 insertions(+), 30 deletions(-) diff --git a/doc/manpages/object-server.conf.5 b/doc/manpages/object-server.conf.5 index 2e58de32fb..7d92481552 100644 --- a/doc/manpages/object-server.conf.5 +++ b/doc/manpages/object-server.conf.5 @@ -88,10 +88,9 @@ The default is 86400. .IP \fBexpiring_objects_account_name\fR The default is 'expiring_objects'. .IP \fBservers_per_port\fR -Make object-server run this many worker processes per unique port of -"local" ring devices across all storage policies. This can help provide -the isolation of threads_per_disk without the severe overhead. The default -value of 0 disables this feature. +Make object-server run this many worker processes per unique port of "local" +ring devices across all storage policies. The default value of 0 disables this +feature. .IP \fBlog_name\fR Label used when logging. The default is swift. .IP \fBlog_facility\fR diff --git a/doc/source/deployment_guide.rst b/doc/source/deployment_guide.rst index 083a298578..421f436048 100644 --- a/doc/source/deployment_guide.rst +++ b/doc/source/deployment_guide.rst @@ -151,11 +151,6 @@ service a request for any disk, and a slow I/O request blocks the eventlet hub, a single slow disk can impair an entire storage node. This also prevents object servers from fully utilizing all their disks during heavy load. -The :ref:`threads_per_disk ` option was one way to -address this, but came with severe performance overhead which was worse -than the benefit of I/O isolation. Any clusters using threads_per_disk should -switch to using `servers_per_port`. - Another way to get full I/O isolation is to give each disk on a storage node a different port in the storage policy rings. Then set the :ref:`servers_per_port ` @@ -547,15 +542,6 @@ allowed_headers Content-Disposition, Comma separated list of he X-Static-Large-Object Content-Type, etag, Content-Length, or deleted auto_create_account_prefix . Prefix used when automatically creating accounts. -threads_per_disk 0 Size of the per-disk thread pool - used for performing disk I/O. The - default of 0 means to not use a - per-disk thread pool. - This option is no longer - recommended and the - :ref:`servers_per_port - ` - should be used instead. replication_server Configure parameter for creating specific server. To handle all verbs, including replication verbs, do not diff --git a/etc/object-server.conf-sample b/etc/object-server.conf-sample index e01193bff2..0feaf54bef 100644 --- a/etc/object-server.conf-sample +++ b/etc/object-server.conf-sample @@ -16,10 +16,9 @@ bind_port = 6000 # ignored. # workers = auto # -# Make object-server run this many worker processes per unique port of -# "local" ring devices across all storage policies. This can help provide -# the isolation of threads_per_disk without the severe overhead. The default -# value of 0 disables this feature. +# Make object-server run this many worker processes per unique port of "local" +# ring devices across all storage policies. The default value of 0 disables this +# feature. # servers_per_port = 0 # # Maximum concurrent requests per worker @@ -106,10 +105,6 @@ use = egg:swift#object # # auto_create_account_prefix = . # -# A value of 0 means "don't use thread pools". A reasonable starting point is -# 4. -# threads_per_disk = 0 -# # Configure parameter for creating specific server # To handle all verbs, including replication verbs, do not specify # "replication_server" (this is the default). To only handle replication, diff --git a/swift/common/manager.py b/swift/common/manager.py index 92d8f4a6d6..123f27d10f 100644 --- a/swift/common/manager.py +++ b/swift/common/manager.py @@ -55,7 +55,7 @@ WARNING_WAIT = 3 # seconds to wait after message that may just be a warning MAX_DESCRIPTORS = 32768 MAX_MEMORY = (1024 * 1024 * 1024) * 2 # 2 GB -MAX_PROCS = 8192 # workers * disks * threads_per_disk, can get high +MAX_PROCS = 8192 # workers * disks, can get high def setup_env(): diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py index 27b19af58a..a074507487 100644 --- a/swift/obj/diskfile.py +++ b/swift/obj/diskfile.py @@ -536,9 +536,7 @@ class BaseDiskFileManager(object): conf.get('replication_one_per_device', 'true')) self.replication_lock_timeout = int(conf.get( 'replication_lock_timeout', 15)) - threads_per_disk = int(conf.get('threads_per_disk', '0')) - self.threadpools = defaultdict( - lambda: ThreadPool(nthreads=threads_per_disk)) + self.threadpools = defaultdict(lambda: ThreadPool(nthreads=0)) self.use_splice = False self.pipe_size = None From a67d91987dff4859924aeaa3ca8fe30190b4c210 Mon Sep 17 00:00:00 2001 From: Brian Cline Date: Fri, 29 Apr 2016 04:01:39 -0500 Subject: [PATCH 102/141] Fix minor typos in review guidelines Change-Id: I3acb3daeacb784c4038bf54d4103fab66c52f41a --- REVIEW_GUIDELINES.rst | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/REVIEW_GUIDELINES.rst b/REVIEW_GUIDELINES.rst index fe56c98e7c..03f2657ce1 100644 --- a/REVIEW_GUIDELINES.rst +++ b/REVIEW_GUIDELINES.rst @@ -65,7 +65,7 @@ Consider edge cases very seriously Scale is an *amazingly* abusive partner. If you contribute changes to Swift your code is running - in production - at scale - and your bugs -can not hide. I wish on all of us that our bugs may be exceptionally +cannot hide. I wish on all of us that our bugs may be exceptionally rare - meaning they only happen in extremely unlikely edge cases. For example, bad things that happen only 1 out of every 10K times an op is performed will be discovered in minutes. Bad things that happen only @@ -124,7 +124,7 @@ comment that says: If the author (or another reviewer) agrees - it's possible the change will get updated to include that improvement before it is merged; or it may happen in a -follow up. +follow-up change. However, remember that style is non-material - it is useful to provide (via diff) suggestions to improve maintainability as part of your review - but if @@ -148,7 +148,7 @@ would be used - you may have to ask for more details before you can successfully review it. Commit messages need to have a high consistent quality. While many -things under source control can be fixed and improved in a follow up +things under source control can be fixed and improved in a follow-up change - commit messages are forever. Luckily it's easy to fix minor mistakes using the in-line edit feature in Gerrit! If you can avoid ever having to *ask* someone to change a commit message you will find @@ -179,7 +179,7 @@ If it does not - you should write some! ... and offer them to the patch author as a diff indicating to them that "something" like these tests I'm providing as an example will *need* to be included in this change before it is suitable to merge. Bonus points if you -include suggestions for the author as to how they might improve or expanded on +include suggestions for the author as to how they might improve or expand upon the tests stubs you provide. Be *very* careful about asking an author to add a test for a "small change" @@ -206,7 +206,7 @@ change for the change log. Always point out typos or grammar mistakes when you see them in review, but also consider that if you were able to recognize the -intent of the statement - documentation with tpyos may be easier to +intent of the statement - documentation with typos may be easier to iterate and improve on than nothing. If a change does not have adequate documentation it may not be suitable to @@ -218,17 +218,17 @@ Every change could have better documentation. Like with tests, a patch isn't done until it has docs. Any patch that adds a new feature, changes behavior, updates configs, or in any other way is different than previous behavior requires docs. manpages, -sample configs, docstrings, descriptive prose in the source tree +sample configs, docstrings, descriptive prose in the source tree, etc. Reviewers Write Code -------------------- -Reviews have been shown to to provide many benefits - one of which is shared +Reviews have been shown to provide many benefits - one of which is shared ownership. After providing a positive review you should understand how the change works. Doing this will probably require you to "play with" the change. You might functionally test the change in various scenarios. You may need to -write a new unittest to validate the change will degrade gracefully under +write a new unit test to validate the change will degrade gracefully under failure. You might have to write a script to exercise the change under some superficial load. You might have to break the change and validate the new tests fail and provide useful errors. You might have to step through some @@ -249,7 +249,7 @@ e.g. it blew up like this: - ``unittest failure`` + ``unit test failure`` It's not uncommon that a review takes more time than writing a change - @@ -276,7 +276,7 @@ as part of your review. Do not say "Does this blow up if it gets called when xyz" - rather try and find a test that specifically covers that condition and mention it -in the comment so others can find it more quickly. Of if you can find +in the comment so others can find it more quickly. Or if you can find no such test, add one to demonstrate the failure, and include a diff in a comment. Hopefully you can say "I *thought* this would blow up, so I wrote this test, but it seems fine." @@ -308,7 +308,7 @@ high praise - you should be sure. A negative score means that to the best of your abilities you have not been able to your satisfaction, to justify the value of a change -against the cost of it's deficiencies and risks. It is a surprisingly +against the cost of its deficiencies and risks. It is a surprisingly difficult chore to be confident about the value of unproven code or a not well understood use-case in an uncertain world, and unfortunately all too easy with a **thorough** review to uncover our defects, and be @@ -324,19 +324,19 @@ file a bug. Every commit must be deployable to production. Beyond that - almost any change might be merge-able depending on -it's merits! Here's some tips you might be able to use to find more +its merits! Here are some tips you might be able to use to find more changes that should merge! #. Fixing bugs is HUGELY valuable - the *only* thing which has a higher cost than the value of fixing a bug - is adding a new - bug - if it's broken and this change makes it fixed (with out + bug - if it's broken and this change makes it fixed (without breaking anything else) you have a winner! #. Features are INCREDIBLY difficult to justify their value against the cost of increased complexity, lowered maintainability, risk of regression, or new defects. Try to focus on what is *impossible* without the feature - when you make the impossible - possible things are better. Make things better. + possible, things are better. Make things better. #. Purely test/doc changes, complex refactoring, or mechanical cleanups are quite nuanced because there's less concrete @@ -369,9 +369,9 @@ A note on Swift Core Maintainers ================================ Swift Core maintainers may provide positive reviews scores that *look* -different from your reviews - a "+2" instead of a "+1" +different from your reviews - a "+2" instead of a "+1". -But it's *exactly the same* as your "+1" +But it's *exactly the same* as your "+1". It means the change has been thoroughly and positively reviewed. The only reason it's different is to help identify changes which have From 96c1838a15645639154f1048285d53a891809937 Mon Sep 17 00:00:00 2001 From: Brian Cline Date: Fri, 29 Apr 2016 05:00:36 -0500 Subject: [PATCH 103/141] Update mailmap, resolve identity crisis Change-Id: Ia8e30a8c0f81068c259d5d08adad4f1a98460561 --- .mailmap | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.mailmap b/.mailmap index b640039bfb..f823462873 100644 --- a/.mailmap +++ b/.mailmap @@ -101,3 +101,5 @@ Oshrit Feder Larry Rensing Ben Keller Chaozhe Chen +Brian Cline +Brian Cline From c6ac69c552f87d07cd98b6e3f54258913bdc90db Mon Sep 17 00:00:00 2001 From: ZhiQiang Fan Date: Fri, 29 Apr 2016 20:39:02 +0800 Subject: [PATCH 104/141] [Trivial] Remove unnecessary executable privilege swift/common/storage_policy.py is not required to be executable. Change-Id: I733d70a88be25b32d8caf590dee4c53d14757fef --- swift/common/storage_policy.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 swift/common/storage_policy.py diff --git a/swift/common/storage_policy.py b/swift/common/storage_policy.py old mode 100755 new mode 100644 From 33fdd0a356d83f12f63b9a94ff423ca815c96066 Mon Sep 17 00:00:00 2001 From: Bryan Keller Date: Fri, 22 Apr 2016 10:03:23 -0700 Subject: [PATCH 105/141] SwiftLogFormatter will log transaction IDs on INFO level Previously SwiftLogFormatter would make two checks. One to see if the transaction id was already in the message field and another check to make sure the log level wasn't set to info. If either of these was true, then it would not log the transaction ID in the transaction ID field. This commit removes the check for the info log. Now transaction IDs will be recorded in all cases that have them. Change-Id: Ic06538ab55a75d298169ae1745671573ee9c09e8 Closes-Bug: #1504344 --- swift/common/utils.py | 1 - test/unit/common/test_utils.py | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/swift/common/utils.py b/swift/common/utils.py index fb4baa4396..a0d64fed78 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -1625,7 +1625,6 @@ class SwiftLogFormatter(logging.Formatter): msg = msg + record.exc_text if (hasattr(record, 'txn_id') and record.txn_id and - record.levelno != logging.INFO and record.txn_id not in msg): msg = "%s (txn: %s)" % (msg, record.txn_id) if (hasattr(record, 'client_ip') and record.client_ip and diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index 626043de3d..8caeaa2eac 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -1667,12 +1667,12 @@ class TestUtils(unittest.TestCase): log_msg = strip_value(sio) self.assertTrue('txn' in log_msg) self.assertTrue('12345' in log_msg) - # test no txn on info message + # test txn in info message self.assertEqual(logger.txn_id, '12345') logger.info('test') log_msg = strip_value(sio) - self.assertTrue('txn' not in log_msg) - self.assertTrue('12345' not in log_msg) + self.assertTrue('txn' in log_msg) + self.assertTrue('12345' in log_msg) # test txn already in message self.assertEqual(logger.txn_id, '12345') logger.warning('test 12345 test') From 4c11833a9cbff499725365e535e217f3eae3c442 Mon Sep 17 00:00:00 2001 From: Christian Schwede Date: Thu, 28 Apr 2016 11:53:51 -0500 Subject: [PATCH 106/141] Remove ThreadPool class With the removement of threads_per_disk there is no longer a need to use run_in_thread() at all; it was just calling the function itself when running with 0 threads. Similar to force_run_in_thread() - with 0 threads it was basically doing the same like in tpool_reraise(), therefore replacing the call and finally removing the complete ThreadPool class. Note that this might break external consumers that are inheriting BaseDiskFileManager; in this case you need to adopt this change in your codebase then. Change-Id: I39489dd660935bdbfbc26b92af86814369369fb5 --- swift/common/exceptions.py | 4 - swift/common/utils.py | 202 +-------------------------------- swift/obj/diskfile.py | 66 ++++------- test/unit/common/test_utils.py | 163 +------------------------- test/unit/obj/test_diskfile.py | 2 +- 5 files changed, 28 insertions(+), 409 deletions(-) diff --git a/swift/common/exceptions.py b/swift/common/exceptions.py index 2f36ab623d..721ac3421a 100644 --- a/swift/common/exceptions.py +++ b/swift/common/exceptions.py @@ -145,10 +145,6 @@ class LockTimeout(MessageTimeout): pass -class ThreadPoolDead(SwiftException): - pass - - class RingBuilderError(SwiftException): pass diff --git a/swift/common/utils.py b/swift/common/utils.py index fb4baa4396..ec7be0bb7d 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -47,8 +47,7 @@ import datetime import eventlet import eventlet.semaphore -from eventlet import GreenPool, sleep, Timeout, tpool, greenthread, \ - greenio, event +from eventlet import GreenPool, sleep, Timeout, tpool from eventlet.green import socket, threading import eventlet.queue import netifaces @@ -3210,205 +3209,6 @@ def tpool_reraise(func, *args, **kwargs): return resp -class ThreadPool(object): - """ - Perform blocking operations in background threads. - - Call its methods from within greenlets to green-wait for results without - blocking the eventlet reactor (hopefully). - """ - - BYTE = 'a'.encode('utf-8') - - def __init__(self, nthreads=2): - self.nthreads = nthreads - self._run_queue = stdlib_queue.Queue() - self._result_queue = stdlib_queue.Queue() - self._threads = [] - self._alive = True - - if nthreads <= 0: - return - - # We spawn a greenthread whose job it is to pull results from the - # worker threads via a real Queue and send them to eventlet Events so - # that the calling greenthreads can be awoken. - # - # Since each OS thread has its own collection of greenthreads, it - # doesn't work to have the worker thread send stuff to the event, as - # it then notifies its own thread-local eventlet hub to wake up, which - # doesn't do anything to help out the actual calling greenthread over - # in the main thread. - # - # Thus, each worker sticks its results into a result queue and then - # writes a byte to a pipe, signaling the result-consuming greenlet (in - # the main thread) to wake up and consume results. - # - # This is all stuff that eventlet.tpool does, but that code can't have - # multiple instances instantiated. Since the object server uses one - # pool per disk, we have to reimplement this stuff. - _raw_rpipe, self.wpipe = os.pipe() - self.rpipe = greenio.GreenPipe(_raw_rpipe, 'rb') - - for _junk in range(nthreads): - thr = stdlib_threading.Thread( - target=self._worker, - args=(self._run_queue, self._result_queue)) - thr.daemon = True - thr.start() - self._threads.append(thr) - - # This is the result-consuming greenthread that runs in the main OS - # thread, as described above. - self._consumer_coro = greenthread.spawn_n(self._consume_results, - self._result_queue) - - def _worker(self, work_queue, result_queue): - """ - Pulls an item from the queue and runs it, then puts the result into - the result queue. Repeats forever. - - :param work_queue: queue from which to pull work - :param result_queue: queue into which to place results - """ - while True: - item = work_queue.get() - if item is None: - break - ev, func, args, kwargs = item - try: - result = func(*args, **kwargs) - result_queue.put((ev, True, result)) - except BaseException: - result_queue.put((ev, False, sys.exc_info())) - finally: - work_queue.task_done() - os.write(self.wpipe, self.BYTE) - - def _consume_results(self, queue): - """ - Runs as a greenthread in the same OS thread as callers of - run_in_thread(). - - Takes results from the worker OS threads and sends them to the waiting - greenthreads. - """ - while True: - try: - self.rpipe.read(1) - except ValueError: - # can happen at process shutdown when pipe is closed - break - - while True: - try: - ev, success, result = queue.get(block=False) - except stdlib_queue.Empty: - break - - try: - if success: - ev.send(result) - else: - ev.send_exception(*result) - finally: - queue.task_done() - - def run_in_thread(self, func, *args, **kwargs): - """ - Runs ``func(*args, **kwargs)`` in a thread. Blocks the current greenlet - until results are available. - - Exceptions thrown will be reraised in the calling thread. - - If the threadpool was initialized with nthreads=0, it invokes - ``func(*args, **kwargs)`` directly, followed by eventlet.sleep() to - ensure the eventlet hub has a chance to execute. It is more likely the - hub will be invoked when queuing operations to an external thread. - - :returns: result of calling func - :raises: whatever func raises - """ - if not self._alive: - raise swift.common.exceptions.ThreadPoolDead() - - if self.nthreads <= 0: - result = func(*args, **kwargs) - sleep() - return result - - ev = event.Event() - self._run_queue.put((ev, func, args, kwargs), block=False) - - # blocks this greenlet (and only *this* greenlet) until the real - # thread calls ev.send(). - result = ev.wait() - return result - - def _run_in_eventlet_tpool(self, func, *args, **kwargs): - """ - Really run something in an external thread, even if we haven't got any - threads of our own. - """ - def inner(): - try: - return (True, func(*args, **kwargs)) - except (Timeout, BaseException) as err: - return (False, err) - - success, result = tpool.execute(inner) - if success: - return result - else: - raise result - - def force_run_in_thread(self, func, *args, **kwargs): - """ - Runs ``func(*args, **kwargs)`` in a thread. Blocks the current greenlet - until results are available. - - Exceptions thrown will be reraised in the calling thread. - - If the threadpool was initialized with nthreads=0, uses eventlet.tpool - to run the function. This is in contrast to run_in_thread(), which - will (in that case) simply execute func in the calling thread. - - :returns: result of calling func - :raises: whatever func raises - """ - if not self._alive: - raise swift.common.exceptions.ThreadPoolDead() - - if self.nthreads <= 0: - return self._run_in_eventlet_tpool(func, *args, **kwargs) - else: - return self.run_in_thread(func, *args, **kwargs) - - def terminate(self): - """ - Releases the threadpool's resources (OS threads, greenthreads, pipes, - etc.) and renders it unusable. - - Don't call run_in_thread() or force_run_in_thread() after calling - terminate(). - """ - self._alive = False - if self.nthreads <= 0: - return - - for _junk in range(self.nthreads): - self._run_queue.put(None) - for thr in self._threads: - thr.join() - self._threads = [] - self.nthreads = 0 - - greenthread.kill(self._consumer_coro) - - self.rpipe.close() - os.close(self.wpipe) - - def ismount(path): """ Test whether a path is a mount point. This will catch any diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py index a074507487..5295cd5dac 100644 --- a/swift/obj/diskfile.py +++ b/swift/obj/diskfile.py @@ -55,9 +55,10 @@ from swift.common.constraints import check_mount, check_dir from swift.common.request_helpers import is_sys_meta from swift.common.utils import mkdirs, Timestamp, \ storage_directory, hash_path, renamer, fallocate, fsync, fdatasync, \ - fsync_dir, drop_buffer_cache, ThreadPool, lock_path, write_pickle, \ + fsync_dir, drop_buffer_cache, lock_path, write_pickle, \ config_true_value, listdir, split_path, ismount, remove_file, \ - get_md5_socket, F_SETPIPE_SZ, decode_timestamps, encode_timestamps + get_md5_socket, F_SETPIPE_SZ, decode_timestamps, encode_timestamps, \ + tpool_reraise from swift.common.splice import splice, tee from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist, \ DiskFileCollision, DiskFileNoSpace, DiskFileDeviceUnavailable, \ @@ -536,7 +537,6 @@ class BaseDiskFileManager(object): conf.get('replication_one_per_device', 'true')) self.replication_lock_timeout = int(conf.get( 'replication_lock_timeout', 15)) - self.threadpools = defaultdict(lambda: ThreadPool(nthreads=0)) self.use_splice = False self.pipe_size = None @@ -1115,8 +1115,7 @@ class BaseDiskFileManager(object): device_path = self.construct_dev_path(device) async_dir = os.path.join(device_path, get_async_dir(policy)) ohash = hash_path(account, container, obj) - self.threadpools[device].run_in_thread( - write_pickle, + write_pickle( data, os.path.join(async_dir, ohash[-3:], ohash + '-' + Timestamp(timestamp).internal), @@ -1139,7 +1138,7 @@ class BaseDiskFileManager(object): dev_path = self.get_dev_path(device) if not dev_path: raise DiskFileDeviceUnavailable() - return self.diskfile_cls(self, dev_path, self.threadpools[device], + return self.diskfile_cls(self, dev_path, partition, account, container, obj, policy=policy, use_splice=self.use_splice, pipe_size=self.pipe_size, **kwargs) @@ -1215,7 +1214,7 @@ class BaseDiskFileManager(object): metadata.get('name', ''), 3, 3, True) except ValueError: raise DiskFileNotExist() - return self.diskfile_cls(self, dev_path, self.threadpools[device], + return self.diskfile_cls(self, dev_path, partition, account, container, obj, policy=policy, **kwargs) @@ -1235,7 +1234,7 @@ class BaseDiskFileManager(object): partition) if not os.path.exists(partition_path): mkdirs(partition_path) - _junk, hashes = self.threadpools[device].force_run_in_thread( + _junk, hashes = tpool_reraise( self._get_hashes, partition_path, recalculate=suffixes) return hashes @@ -1368,19 +1367,16 @@ class BaseDiskFileWriter(object): :param fd: open file descriptor of temporary file to receive data :param tmppath: full path name of the opened file descriptor :param bytes_per_sync: number bytes written between sync calls - :param threadpool: internal thread pool to use for disk operations :param diskfile: the diskfile creating this DiskFileWriter instance """ - def __init__(self, name, datadir, fd, tmppath, bytes_per_sync, threadpool, - diskfile): + def __init__(self, name, datadir, fd, tmppath, bytes_per_sync, diskfile): # Parameter tracking self._name = name self._datadir = datadir self._fd = fd self._tmppath = tmppath self._bytes_per_sync = bytes_per_sync - self._threadpool = threadpool self._diskfile = diskfile # Internal attributes @@ -1409,18 +1405,15 @@ class BaseDiskFileWriter(object): :returns: the total number of bytes written to an object """ - def _write_entire_chunk(chunk): - while chunk: - written = os.write(self._fd, chunk) - self._upload_size += written - chunk = chunk[written:] - - self._threadpool.run_in_thread(_write_entire_chunk, chunk) + while chunk: + written = os.write(self._fd, chunk) + self._upload_size += written + chunk = chunk[written:] # For large files sync every 512MB (by default) written diff = self._upload_size - self._last_sync if diff >= self._bytes_per_sync: - self._threadpool.force_run_in_thread(fdatasync, self._fd) + tpool_reraise(fdatasync, self._fd) drop_buffer_cache(self._fd, self._last_sync, diff) self._last_sync = self._upload_size @@ -1477,8 +1470,7 @@ class BaseDiskFileWriter(object): metadata['name'] = self._name target_path = join(self._datadir, filename) - self._threadpool.force_run_in_thread( - self._finalize_put, metadata, target_path, cleanup) + tpool_reraise(self._finalize_put, metadata, target_path, cleanup) def put(self, metadata): """ @@ -1521,7 +1513,6 @@ class BaseDiskFileReader(object): :param data_file: on-disk data file name for the object :param obj_size: verified on-disk size of the object :param etag: expected metadata etag value for entire file - :param threadpool: thread pool to use for read operations :param disk_chunk_size: size of reads from disk in bytes :param keep_cache_size: maximum object size that will be kept in cache :param device_path: on-disk device path, used when quarantining an obj @@ -1532,7 +1523,7 @@ class BaseDiskFileReader(object): :param diskfile: the diskfile creating this DiskFileReader instance :param keep_cache: should resulting reads be kept in the buffer cache """ - def __init__(self, fp, data_file, obj_size, etag, threadpool, + def __init__(self, fp, data_file, obj_size, etag, disk_chunk_size, keep_cache_size, device_path, logger, quarantine_hook, use_splice, pipe_size, diskfile, keep_cache=False): @@ -1541,7 +1532,6 @@ class BaseDiskFileReader(object): self._data_file = data_file self._obj_size = obj_size self._etag = etag - self._threadpool = threadpool self._diskfile = diskfile self._disk_chunk_size = disk_chunk_size self._device_path = device_path @@ -1580,8 +1570,7 @@ class BaseDiskFileReader(object): self._started_at_0 = True self._iter_etag = hashlib.md5() while True: - chunk = self._threadpool.run_in_thread( - self._fp.read, self._disk_chunk_size) + chunk = self._fp.read(self._disk_chunk_size) if chunk: if self._iter_etag: self._iter_etag.update(chunk) @@ -1634,8 +1623,8 @@ class BaseDiskFileReader(object): try: while True: # Read data from disk to pipe - (bytes_in_pipe, _1, _2) = self._threadpool.run_in_thread( - splice, rfd, None, client_wpipe, None, pipe_size, 0) + (bytes_in_pipe, _1, _2) = splice( + rfd, None, client_wpipe, None, pipe_size, 0) if bytes_in_pipe == 0: self._read_to_eof = True self._drop_cache(rfd, dropped_cache, @@ -1758,9 +1747,8 @@ class BaseDiskFileReader(object): drop_buffer_cache(fd, offset, length) def _quarantine(self, msg): - self._quarantined_dir = self._threadpool.run_in_thread( - self.manager.quarantine_renamer, self._device_path, - self._data_file) + self._quarantined_dir = self.manager.quarantine_renamer( + self._device_path, self._data_file) self._logger.warning("Quarantined object %s: %s" % ( self._data_file, msg)) self._logger.increment('quarantines') @@ -1824,7 +1812,6 @@ class BaseDiskFile(object): :param mgr: associated DiskFileManager instance :param device_path: path to the target device or drive - :param threadpool: thread pool to use for blocking operations :param partition: partition on the device in which the object lives :param account: account name for the object :param container: container name for the object @@ -1837,12 +1824,11 @@ class BaseDiskFile(object): reader_cls = None # must be set by subclasses writer_cls = None # must be set by subclasses - def __init__(self, mgr, device_path, threadpool, partition, + def __init__(self, mgr, device_path, partition, account=None, container=None, obj=None, _datadir=None, policy=None, use_splice=False, pipe_size=None, **kwargs): self._manager = mgr self._device_path = device_path - self._threadpool = threadpool or ThreadPool(nthreads=0) self._logger = mgr.logger self._disk_chunk_size = mgr.disk_chunk_size self._bytes_per_sync = mgr.bytes_per_sync @@ -2043,8 +2029,8 @@ class BaseDiskFile(object): :param msg: reason for quarantining to be included in the exception :returns: DiskFileQuarantined exception object """ - self._quarantined_dir = self._threadpool.run_in_thread( - self.manager.quarantine_renamer, self._device_path, data_file) + self._quarantined_dir = self.manager.quarantine_renamer( + self._device_path, data_file) self._logger.warning("Quarantined object %s: %s" % ( data_file, msg)) self._logger.increment('quarantines') @@ -2333,7 +2319,7 @@ class BaseDiskFile(object): """ dr = self.reader_cls( self._fp, self._data_file, int(self._metadata['Content-Length']), - self._metadata['ETag'], self._threadpool, self._disk_chunk_size, + self._metadata['ETag'], self._disk_chunk_size, self._manager.keep_cache_size, self._device_path, self._logger, use_splice=self._use_splice, quarantine_hook=_quarantine_hook, pipe_size=self._pipe_size, diskfile=self, keep_cache=keep_cache) @@ -2378,7 +2364,6 @@ class BaseDiskFile(object): raise dfw = self.writer_cls(self._name, self._datadir, fd, tmppath, bytes_per_sync=self._bytes_per_sync, - threadpool=self._threadpool, diskfile=self) yield dfw finally: @@ -2561,8 +2546,7 @@ class ECDiskFileWriter(BaseDiskFileWriter): """ durable_file_path = os.path.join( self._datadir, timestamp.internal + '.durable') - self._threadpool.force_run_in_thread( - self._finalize_durable, durable_file_path) + tpool_reraise(self._finalize_durable, durable_file_path) def put(self, metadata): """ diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index 626043de3d..e46a018a65 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -30,7 +30,6 @@ import mock import random import re import socket -import stat import sys import json import math @@ -43,7 +42,6 @@ from textwrap import dedent import tempfile import time -import traceback import unittest import fcntl import shutil @@ -58,7 +56,7 @@ from six.moves.configparser import NoSectionError, NoOptionError from swift.common.exceptions import Timeout, MessageTimeout, \ ConnectionTimeout, LockTimeout, ReplicationLockTimeout, \ - MimeInvalid, ThreadPoolDead + MimeInvalid from swift.common import utils from swift.common.container_sync_realms import ContainerSyncRealms from swift.common.header_key_dict import HeaderKeyDict @@ -4717,165 +4715,6 @@ class TestStatsdLoggingDelegation(unittest.TestCase): self.assertEqual(called, [12345]) -class TestThreadPool(unittest.TestCase): - - def setUp(self): - self.tp = None - - def tearDown(self): - if self.tp: - self.tp.terminate() - - def _pipe_count(self): - # Counts the number of pipes that this process owns. - fd_dir = "/proc/%d/fd" % os.getpid() - - def is_pipe(path): - try: - stat_result = os.stat(path) - return stat.S_ISFIFO(stat_result.st_mode) - except OSError: - return False - - return len([fd for fd in os.listdir(fd_dir) - if is_pipe(os.path.join(fd_dir, fd))]) - - def _thread_id(self): - return threading.current_thread().ident - - def _capture_args(self, *args, **kwargs): - return {'args': args, 'kwargs': kwargs} - - def _raise_valueerror(self): - return int('fishcakes') - - def test_run_in_thread_with_threads(self): - tp = self.tp = utils.ThreadPool(1) - - my_id = self._thread_id() - other_id = tp.run_in_thread(self._thread_id) - self.assertNotEqual(my_id, other_id) - - result = tp.run_in_thread(self._capture_args, 1, 2, bert='ernie') - self.assertEqual(result, {'args': (1, 2), - 'kwargs': {'bert': 'ernie'}}) - - caught = False - try: - tp.run_in_thread(self._raise_valueerror) - except ValueError: - caught = True - self.assertTrue(caught) - - def test_force_run_in_thread_with_threads(self): - # with nthreads > 0, force_run_in_thread looks just like run_in_thread - tp = self.tp = utils.ThreadPool(1) - - my_id = self._thread_id() - other_id = tp.force_run_in_thread(self._thread_id) - self.assertNotEqual(my_id, other_id) - - result = tp.force_run_in_thread(self._capture_args, 1, 2, bert='ernie') - self.assertEqual(result, {'args': (1, 2), - 'kwargs': {'bert': 'ernie'}}) - self.assertRaises(ValueError, tp.force_run_in_thread, - self._raise_valueerror) - - def test_run_in_thread_without_threads(self): - # with zero threads, run_in_thread doesn't actually do so - tp = utils.ThreadPool(0) - - my_id = self._thread_id() - other_id = tp.run_in_thread(self._thread_id) - self.assertEqual(my_id, other_id) - - result = tp.run_in_thread(self._capture_args, 1, 2, bert='ernie') - self.assertEqual(result, {'args': (1, 2), - 'kwargs': {'bert': 'ernie'}}) - self.assertRaises(ValueError, tp.run_in_thread, - self._raise_valueerror) - - def test_force_run_in_thread_without_threads(self): - # with zero threads, force_run_in_thread uses eventlet.tpool - tp = utils.ThreadPool(0) - - my_id = self._thread_id() - other_id = tp.force_run_in_thread(self._thread_id) - self.assertNotEqual(my_id, other_id) - - result = tp.force_run_in_thread(self._capture_args, 1, 2, bert='ernie') - self.assertEqual(result, {'args': (1, 2), - 'kwargs': {'bert': 'ernie'}}) - self.assertRaises(ValueError, tp.force_run_in_thread, - self._raise_valueerror) - - def test_preserving_stack_trace_from_thread(self): - def gamma(): - return 1 / 0 # ZeroDivisionError - - def beta(): - return gamma() - - def alpha(): - return beta() - - tp = self.tp = utils.ThreadPool(1) - try: - tp.run_in_thread(alpha) - except ZeroDivisionError: - # NB: format is (filename, line number, function name, text) - tb_func = [elem[2] for elem - in traceback.extract_tb(sys.exc_info()[2])] - else: - self.fail("Expected ZeroDivisionError") - - self.assertEqual(tb_func[-1], "gamma") - self.assertEqual(tb_func[-2], "beta") - self.assertEqual(tb_func[-3], "alpha") - # omit the middle; what's important is that the start and end are - # included, not the exact names of helper methods - self.assertEqual(tb_func[1], "run_in_thread") - self.assertEqual(tb_func[0], "test_preserving_stack_trace_from_thread") - - def test_terminate(self): - initial_thread_count = threading.activeCount() - initial_pipe_count = self._pipe_count() - - tp = utils.ThreadPool(4) - # do some work to ensure any lazy initialization happens - tp.run_in_thread(os.path.join, 'foo', 'bar') - tp.run_in_thread(os.path.join, 'baz', 'quux') - - # 4 threads in the ThreadPool, plus one pipe for IPC; this also - # serves as a sanity check that we're actually allocating some - # resources to free later - self.assertEqual(initial_thread_count, threading.activeCount() - 4) - self.assertEqual(initial_pipe_count, self._pipe_count() - 2) - - tp.terminate() - self.assertEqual(initial_thread_count, threading.activeCount()) - self.assertEqual(initial_pipe_count, self._pipe_count()) - - def test_cant_run_after_terminate(self): - tp = utils.ThreadPool(0) - tp.terminate() - self.assertRaises(ThreadPoolDead, tp.run_in_thread, lambda: 1) - self.assertRaises(ThreadPoolDead, tp.force_run_in_thread, lambda: 1) - - def test_double_terminate_doesnt_crash(self): - tp = utils.ThreadPool(0) - tp.terminate() - tp.terminate() - - tp = utils.ThreadPool(1) - tp.terminate() - tp.terminate() - - def test_terminate_no_threads_doesnt_crash(self): - tp = utils.ThreadPool(0) - tp.terminate() - - class TestAuditLocationGenerator(unittest.TestCase): def test_drive_tree_access(self): diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py index 818ad1563c..a4ca8cff26 100644 --- a/test/unit/obj/test_diskfile.py +++ b/test/unit/obj/test_diskfile.py @@ -940,7 +940,7 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): self.df_mgr.get_diskfile_from_hash( 'dev', '9', '9a7175077c01a23ade5956b8a2bba900', POLICIES[0]) dfclass.assert_called_once_with( - self.df_mgr, '/srv/dev/', self.df_mgr.threadpools['dev'], '9', + self.df_mgr, '/srv/dev/', '9', 'a', 'c', 'o', policy=POLICIES[0]) hclistdir.assert_called_once_with( '/srv/dev/objects/9/900/9a7175077c01a23ade5956b8a2bba900', From cf48e75c259350daa667155e3ffd38b17946bf44 Mon Sep 17 00:00:00 2001 From: Shashirekha Gundur Date: Mon, 1 Feb 2016 18:06:54 +0000 Subject: [PATCH 107/141] change default ports for servers Changing the recommended ports for Swift services from ports 6000-6002 to unused ports 6200-6202; so they do not conflict with X-Windows or other services. Updated SAIO docs. DocImpact Closes-Bug: #1521339 Change-Id: Ie1c778b159792c8e259e2a54cb86051686ac9d18 --- doc/manpages/account-server.conf.5 | 2 +- doc/manpages/container-server.conf.5 | 2 +- doc/manpages/object-server.conf.5 | 2 +- doc/manpages/swift-get-nodes.1 | 16 +- doc/source/admin_guide.rst | 8 +- doc/source/apache_deployment_guide.rst | 18 +- doc/source/deployment_guide.rst | 24 +- doc/source/ops_runbook/diagnose.rst | 4 +- doc/source/ops_runbook/maintenance.rst | 114 ++++----- doc/source/ops_runbook/procedures.rst | 8 +- doc/source/ops_runbook/troubleshooting.rst | 36 +-- etc/account-server.conf-sample | 2 +- etc/container-server.conf-sample | 2 +- etc/object-server.conf-sample | 2 +- swift/account/reaper.py | 2 +- swift/account/replicator.py | 2 +- swift/cli/recon.py | 2 +- swift/cli/ring_builder_analyzer.py | 32 +-- swift/common/middleware/list_endpoints.py | 6 +- swift/common/middleware/xprofile.py | 6 +- swift/container/replicator.py | 2 +- swift/container/sync.py | 2 +- swift/obj/reconstructor.py | 2 +- swift/obj/replicator.py | 2 +- test/probe/test_container_failures.py | 2 +- test/probe/test_object_failures.py | 2 +- test/probe/test_object_handoff.py | 8 +- test/probe/test_reconstructor_revert.py | 2 +- test/unit/__init__.py | 6 +- test/unit/account/test_reaper.py | 10 +- test/unit/cli/test_ring_builder_analyzer.py | 6 +- test/unit/cli/test_ringbuilder.py | 218 +++++++++--------- .../common/middleware/test_list_endpoints.py | 78 +++---- test/unit/common/middleware/test_recon.py | 44 ++-- test/unit/common/ring/test_builder.py | 177 +++++++------- test/unit/common/ring/test_ring.py | 92 ++++---- test/unit/common/ring/test_utils.py | 92 ++++---- test/unit/common/test_db_replicator.py | 39 ++-- test/unit/common/test_direct_client.py | 6 +- test/unit/container/test_reconciler.py | 2 +- test/unit/obj/test_diskfile.py | 14 +- test/unit/obj/test_reconstructor.py | 64 ++--- test/unit/obj/test_replicator.py | 62 ++--- test/unit/obj/test_server.py | 14 +- test/unit/proxy/controllers/test_base.py | 4 +- 45 files changed, 621 insertions(+), 619 deletions(-) diff --git a/doc/manpages/account-server.conf.5 b/doc/manpages/account-server.conf.5 index 4a7e8c597e..7de4ff22a4 100644 --- a/doc/manpages/account-server.conf.5 +++ b/doc/manpages/account-server.conf.5 @@ -56,7 +56,7 @@ are acceptable within this section. IP address the account server should bind to. The default is 0.0.0.0 which will make it bind to all available addresses. .IP "\fBbind_port\fR" -TCP port the account server should bind to. The default is 6002. +TCP port the account server should bind to. The default is 6202. .IP "\fBbind_timeout\fR" Timeout to bind socket. The default is 30. .IP \fBbacklog\fR diff --git a/doc/manpages/container-server.conf.5 b/doc/manpages/container-server.conf.5 index 970fa18f2c..7cdb6bc53a 100644 --- a/doc/manpages/container-server.conf.5 +++ b/doc/manpages/container-server.conf.5 @@ -56,7 +56,7 @@ are acceptable within this section. IP address the container server should bind to. The default is 0.0.0.0 which will make it bind to all available addresses. .IP "\fBbind_port\fR" -TCP port the container server should bind to. The default is 6001. +TCP port the container server should bind to. The default is 6201. .IP "\fBbind_timeout\fR" Timeout to bind socket. The default is 30. .IP \fBbacklog\fR diff --git a/doc/manpages/object-server.conf.5 b/doc/manpages/object-server.conf.5 index 2e58de32fb..df8c0f09f3 100644 --- a/doc/manpages/object-server.conf.5 +++ b/doc/manpages/object-server.conf.5 @@ -56,7 +56,7 @@ are acceptable within this section. IP address the object server should bind to. The default is 0.0.0.0 which will make it bind to all available addresses. .IP "\fBbind_port\fR" -TCP port the object server should bind to. The default is 6000. +TCP port the object server should bind to. The default is 6200. .IP "\fBbind_timeout\fR" Timeout to bind socket. The default is 30. .IP \fBbacklog\fR diff --git a/doc/manpages/swift-get-nodes.1 b/doc/manpages/swift-get-nodes.1 index 608a18488d..a9fbc65ee1 100644 --- a/doc/manpages/swift-get-nodes.1 +++ b/doc/manpages/swift-get-nodes.1 @@ -51,16 +51,16 @@ where the container resides by using the container ring. .IP "Partition 221082" .IP "Hash d7e6ba68cfdce0f0e4ca7890e46cacce" -.IP "Server:Port Device 172.24.24.29:6002 sdd" -.IP "Server:Port Device 172.24.24.27:6002 sdr" -.IP "Server:Port Device 172.24.24.32:6002 sde" -.IP "Server:Port Device 172.24.24.26:6002 sdv [Handoff]" +.IP "Server:Port Device 172.24.24.29:6202 sdd" +.IP "Server:Port Device 172.24.24.27:6202 sdr" +.IP "Server:Port Device 172.24.24.32:6202 sde" +.IP "Server:Port Device 172.24.24.26:6202 sdv [Handoff]" -.IP "curl -I -XHEAD http://172.24.24.29:6002/sdd/221082/MyAccount-12ac01446be2" -.IP "curl -I -XHEAD http://172.24.24.27:6002/sdr/221082/MyAccount-12ac01446be2" -.IP "curl -I -XHEAD http://172.24.24.32:6002/sde/221082/MyAccount-12ac01446be2" -.IP "curl -I -XHEAD http://172.24.24.26:6002/sdv/221082/MyAccount-12ac01446be2 # [Handoff]" +.IP "curl -I -XHEAD http://172.24.24.29:6202/sdd/221082/MyAccount-12ac01446be2" +.IP "curl -I -XHEAD http://172.24.24.27:6202/sdr/221082/MyAccount-12ac01446be2" +.IP "curl -I -XHEAD http://172.24.24.32:6202/sde/221082/MyAccount-12ac01446be2" +.IP "curl -I -XHEAD http://172.24.24.26:6202/sdv/221082/MyAccount-12ac01446be2 # [Handoff]" .IP "ssh 172.24.24.29 ls -lah /srv/node/sdd/accounts/221082/cce/d7e6ba68cfdce0f0e4ca7890e46cacce/ " .IP "ssh 172.24.24.27 ls -lah /srv/node/sdr/accounts/221082/cce/d7e6ba68cfdce0f0e4ca7890e46cacce/" diff --git a/doc/source/admin_guide.rst b/doc/source/admin_guide.rst index 73243f2b7b..4f87939c0c 100644 --- a/doc/source/admin_guide.rst +++ b/doc/source/admin_guide.rst @@ -110,15 +110,15 @@ You can create scripts to create the account and container rings and rebalance. cd /etc/swift rm -f account.builder account.ring.gz backups/account.builder backups/account.ring.gz swift-ring-builder account.builder create 18 3 1 - swift-ring-builder account.builder add r1z1-:6002/sdb1 1 - swift-ring-builder account.builder add r1z2-:6002/sdb1 1 + swift-ring-builder account.builder add r1z1-:6202/sdb1 1 + swift-ring-builder account.builder add r1z2-:6202/sdb1 1 swift-ring-builder account.builder rebalance You need to replace the values of , , etc. with the IP addresses of the account servers used in your setup. You can have as many account servers as you need. All account servers are assumed to be listening on port - 6002, and have a storage device called "sdb1" (this is a directory + 6202, and have a storage device called "sdb1" (this is a directory name created under /drives when we setup the account server). The "z1", "z2", etc. designate zones, and you can choose whether you put devices in the same or different zones. The "r1" designates @@ -539,7 +539,7 @@ JSON-formatted response:: {"async_pending": 0} -Note that the default port for the object server is 6000, except on a +Note that the default port for the object server is 6200, except on a Swift All-In-One installation, which uses 6010, 6020, 6030, and 6040. The following metrics and telemetry are currently exposed: diff --git a/doc/source/apache_deployment_guide.rst b/doc/source/apache_deployment_guide.rst index 8151eedf36..1994468df9 100644 --- a/doc/source/apache_deployment_guide.rst +++ b/doc/source/apache_deployment_guide.rst @@ -117,9 +117,9 @@ The Swift default value for max_file_size (when not present) is 5368709122. For example an Apache2 serving as a web front end of a storage node:: #Object Service - NameVirtualHost *:6000 - Listen 6000 - + NameVirtualHost *:6200 + Listen 6200 + ServerName object-server WSGIDaemonProcess object-server processes=5 threads=1 WSGIProcessGroup object-server @@ -131,9 +131,9 @@ For example an Apache2 serving as a web front end of a storage node:: #Container Service - NameVirtualHost *:6001 - Listen 6001 - + NameVirtualHost *:6201 + Listen 6201 + ServerName container-server WSGIDaemonProcess container-server processes=5 threads=1 WSGIProcessGroup container-server @@ -145,9 +145,9 @@ For example an Apache2 serving as a web front end of a storage node:: #Account Service - NameVirtualHost *:6002 - Listen 6002 - + NameVirtualHost *:6202 + Listen 6202 + ServerName account-server WSGIDaemonProcess account-server processes=5 threads=1 WSGIProcessGroup account-server diff --git a/doc/source/deployment_guide.rst b/doc/source/deployment_guide.rst index 083a298578..4ccf87e424 100644 --- a/doc/source/deployment_guide.rst +++ b/doc/source/deployment_guide.rst @@ -169,18 +169,18 @@ Here's an example (abbreviated) old-style ring (2 node cluster with 2 disks each):: Devices: id region zone ip address port replication ip replication port name - 0 1 1 1.1.0.1 6000 1.1.0.1 6000 d1 - 1 1 1 1.1.0.1 6000 1.1.0.1 6000 d2 - 2 1 2 1.1.0.2 6000 1.1.0.2 6000 d3 - 3 1 2 1.1.0.2 6000 1.1.0.2 6000 d4 + 0 1 1 1.1.0.1 6200 1.1.0.1 6200 d1 + 1 1 1 1.1.0.1 6200 1.1.0.1 6200 d2 + 2 1 2 1.1.0.2 6200 1.1.0.2 6200 d3 + 3 1 2 1.1.0.2 6200 1.1.0.2 6200 d4 And here's the same ring set up for `servers_per_port`:: Devices: id region zone ip address port replication ip replication port name - 0 1 1 1.1.0.1 6000 1.1.0.1 6000 d1 - 1 1 1 1.1.0.1 6001 1.1.0.1 6001 d2 - 2 1 2 1.1.0.2 6000 1.1.0.2 6000 d3 - 3 1 2 1.1.0.2 6001 1.1.0.2 6001 d4 + 0 1 1 1.1.0.1 6200 1.1.0.1 6200 d1 + 1 1 1 1.1.0.1 6201 1.1.0.1 6201 d2 + 2 1 2 1.1.0.2 6200 1.1.0.2 6200 d3 + 3 1 2 1.1.0.2 6201 1.1.0.2 6201 d4 When migrating from normal to `servers_per_port`, perform these steps in order: @@ -195,7 +195,7 @@ When migrating from normal to `servers_per_port`, perform these steps in order: #. Push out new rings that actually have different ports per disk on each server. One of the ports in the new ring should be the same as the port - used in the old ring ("6000" in the example above). This will cover + used in the old ring ("6200" in the example above). This will cover existing proxy-server processes who haven't loaded the new ring yet. They can still talk to any storage node regardless of whether or not that storage node has loaded the ring and started object-server processes on the @@ -422,7 +422,7 @@ mount_check true Whether or not check if the devices mounted to prevent accidentally writing to the root device bind_ip 0.0.0.0 IP Address for server to bind to -bind_port 6000 Port for server to bind to +bind_port 6200 Port for server to bind to bind_timeout 30 Seconds to attempt bind before giving up backlog 4096 Maximum number of allowed pending connections @@ -765,7 +765,7 @@ mount_check true Whether or not check if the devices mounted to prevent accidentally writing to the root device bind_ip 0.0.0.0 IP Address for server to bind to -bind_port 6001 Port for server to bind to +bind_port 6201 Port for server to bind to bind_timeout 30 Seconds to attempt bind before giving up backlog 4096 Maximum number of allowed pending connections @@ -976,7 +976,7 @@ mount_check true Whether or not check if the devices mounted to prevent accidentally writing to the root device bind_ip 0.0.0.0 IP Address for server to bind to -bind_port 6002 Port for server to bind to +bind_port 6202 Port for server to bind to bind_timeout 30 Seconds to attempt bind before giving up backlog 4096 Maximum number of allowed pending connections diff --git a/doc/source/ops_runbook/diagnose.rst b/doc/source/ops_runbook/diagnose.rst index c1a2a2e99a..2de3681288 100644 --- a/doc/source/ops_runbook/diagnose.rst +++ b/doc/source/ops_runbook/diagnose.rst @@ -862,8 +862,8 @@ making progress. Another useful way to check this is with the =============================================================================== [2013-07-17 12:56:19] Checking on replication [replication_time] low: 2, high: 80, avg: 28.8, total: 11037, Failed: 0.0%, no_result: 0, reported: 383 - Oldest completion was 2013-06-12 22:46:50 (12 days ago) by 192.168.245.3:6000. - Most recent completion was 2013-07-17 12:56:19 (5 seconds ago) by 192.168.245.5:6000. + Oldest completion was 2013-06-12 22:46:50 (12 days ago) by 192.168.245.3:6200. + Most recent completion was 2013-07-17 12:56:19 (5 seconds ago) by 192.168.245.5:6200. =============================================================================== The ``Oldest completion`` line in this example indicates that the diff --git a/doc/source/ops_runbook/maintenance.rst b/doc/source/ops_runbook/maintenance.rst index f0eaeb2806..0e588b524e 100644 --- a/doc/source/ops_runbook/maintenance.rst +++ b/doc/source/ops_runbook/maintenance.rst @@ -54,8 +54,8 @@ system. Rules-of-thumb for 'good' recon output are: .. code:: - -> [http://.29:6000/recon/load:] - -> [http://.31:6000/recon/load:] + -> [http://.29:6200/recon/load:] + -> [http://.31:6200/recon/load:] - That could be okay or could require investigation. @@ -86,51 +86,51 @@ two entire racks of Swift are down: .. code:: [2012-03-10 16:56:33] Checking async pendings on 384 hosts... - -> http://.22:6000/recon/async: - -> http://.18:6000/recon/async: - -> http://.16:6000/recon/async: - -> http://.13:6000/recon/async: - -> http://.30:6000/recon/async: - -> http://.6:6000/recon/async: + -> http://.22:6200/recon/async: + -> http://.18:6200/recon/async: + -> http://.16:6200/recon/async: + -> http://.13:6200/recon/async: + -> http://.30:6200/recon/async: + -> http://.6:6200/recon/async: ......... - -> http://.5:6000/recon/async: - -> http://.15:6000/recon/async: - -> http://.9:6000/recon/async: - -> http://.27:6000/recon/async: - -> http://.4:6000/recon/async: - -> http://.8:6000/recon/async: + -> http://.5:6200/recon/async: + -> http://.15:6200/recon/async: + -> http://.9:6200/recon/async: + -> http://.27:6200/recon/async: + -> http://.4:6200/recon/async: + -> http://.8:6200/recon/async: Async stats: low: 243, high: 659, avg: 413, total: 132275 =============================================================================== [2012-03-10 16:57:48] Checking replication times on 384 hosts... - -> http://.22:6000/recon/replication: - -> http://.18:6000/recon/replication: - -> http://.16:6000/recon/replication: - -> http://.13:6000/recon/replication: - -> http://.30:6000/recon/replication: - -> http://.6:6000/recon/replication: + -> http://.22:6200/recon/replication: + -> http://.18:6200/recon/replication: + -> http://.16:6200/recon/replication: + -> http://.13:6200/recon/replication: + -> http://.30:6200/recon/replication: + -> http://.6:6200/recon/replication: ............ - -> http://.5:6000/recon/replication: - -> http://.15:6000/recon/replication: - -> http://.9:6000/recon/replication: - -> http://.27:6000/recon/replication: - -> http://.4:6000/recon/replication: - -> http://.8:6000/recon/replication: + -> http://.5:6200/recon/replication: + -> http://.15:6200/recon/replication: + -> http://.9:6200/recon/replication: + -> http://.27:6200/recon/replication: + -> http://.4:6200/recon/replication: + -> http://.8:6200/recon/replication: [Replication Times] shortest: 1.38144306739, longest: 112.620954418, avg: 10.285 9475361 =============================================================================== [2012-03-10 16:59:03] Checking load avg's on 384 hosts... - -> http://.22:6000/recon/load: - -> http://.18:6000/recon/load: - -> http://.16:6000/recon/load: - -> http://.13:6000/recon/load: - -> http://.30:6000/recon/load: - -> http://.6:6000/recon/load: + -> http://.22:6200/recon/load: + -> http://.18:6200/recon/load: + -> http://.16:6200/recon/load: + -> http://.13:6200/recon/load: + -> http://.30:6200/recon/load: + -> http://.6:6200/recon/load: ............ - -> http://.15:6000/recon/load: - -> http://.9:6000/recon/load: - -> http://.27:6000/recon/load: - -> http://.4:6000/recon/load: - -> http://.8:6000/recon/load: + -> http://.15:6200/recon/load: + -> http://.9:6200/recon/load: + -> http://.27:6200/recon/load: + -> http://.4:6200/recon/load: + -> http://.8:6200/recon/load: [5m load average] lowest: 1.71, highest: 4.91, avg: 2.486375 [15m load average] lowest: 1.79, highest: 5.04, avg: 2.506125 [1m load average] lowest: 1.46, highest: 4.55, avg: 2.4929375 @@ -176,33 +176,33 @@ files on .72.61 we see: souzab@:~$ sudo tail -f /var/log/swift/background.log | - grep -i ERROR Mar 14 17:28:06 container-replicator ERROR Remote drive not mounted - {'zone': 5, 'weight': 1952.0, 'ip': '.204.119', 'id': 5481, 'meta': '', 'device': 'disk6', 'port': 6001} + {'zone': 5, 'weight': 1952.0, 'ip': '.204.119', 'id': 5481, 'meta': '', 'device': 'disk6', 'port': 6201} Mar 14 17:28:06 container-replicator ERROR Remote drive not mounted - {'zone': 5, 'weight': 1952.0, 'ip': '.204.119', 'id': 5481, 'meta': '', 'device': 'disk6', 'port': 6001} + {'zone': 5, 'weight': 1952.0, 'ip': '.204.119', 'id': 5481, 'meta': '', 'device': 'disk6', 'port': 6201} Mar 14 17:28:09 container-replicator ERROR Remote drive not mounted - {'zone': 5, 'weight': 1952.0, 'ip': '.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6001} + {'zone': 5, 'weight': 1952.0, 'ip': '.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6201} Mar 14 17:28:11 container-replicator ERROR Remote drive not mounted - {'zone': 5, 'weight': 1952.0, 'ip': '.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6001} + {'zone': 5, 'weight': 1952.0, 'ip': '.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6201} Mar 14 17:28:13 container-replicator ERROR Remote drive not mounted - {'zone': 5, 'weight': 1952.0, 'ip': '.204.119', 'id': 5481, 'meta': '', 'device': 'disk6', 'port': 6001} + {'zone': 5, 'weight': 1952.0, 'ip': '.204.119', 'id': 5481, 'meta': '', 'device': 'disk6', 'port': 6201} Mar 14 17:28:13 container-replicator ERROR Remote drive not mounted - {'zone': 5, 'weight': 1952.0, 'ip': '.204.119', 'id': 5481, 'meta': '', 'device': 'disk6', 'port': 6001} + {'zone': 5, 'weight': 1952.0, 'ip': '.204.119', 'id': 5481, 'meta': '', 'device': 'disk6', 'port': 6201} Mar 14 17:28:15 container-replicator ERROR Remote drive not mounted - {'zone': 5, 'weight': 1952.0, 'ip': '.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6001} + {'zone': 5, 'weight': 1952.0, 'ip': '.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6201} Mar 14 17:28:15 container-replicator ERROR Remote drive not mounted - {'zone': 5, 'weight': 1952.0, 'ip': '.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6001} + {'zone': 5, 'weight': 1952.0, 'ip': '.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6201} Mar 14 17:28:19 container-replicator ERROR Remote drive not mounted - {'zone': 5, 'weight': 1952.0, 'ip': '.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6001} + {'zone': 5, 'weight': 1952.0, 'ip': '.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6201} Mar 14 17:28:19 container-replicator ERROR Remote drive not mounted - {'zone': 5, 'weight': 1952.0, 'ip': '.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6001} + {'zone': 5, 'weight': 1952.0, 'ip': '.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6201} Mar 14 17:28:20 container-replicator ERROR Remote drive not mounted - {'zone': 5, 'weight': 1952.0, 'ip': '.204.119', 'id': 5481, 'meta': '', 'device': 'disk6', 'port': 6001} + {'zone': 5, 'weight': 1952.0, 'ip': '.204.119', 'id': 5481, 'meta': '', 'device': 'disk6', 'port': 6201} Mar 14 17:28:21 container-replicator ERROR Remote drive not mounted - {'zone': 5, 'weight': 1952.0, 'ip': '.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6001} + {'zone': 5, 'weight': 1952.0, 'ip': '.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6201} Mar 14 17:28:21 container-replicator ERROR Remote drive not mounted - {'zone': 5, 'weight': 1952.0, 'ip': '.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6001} + {'zone': 5, 'weight': 1952.0, 'ip': '.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6201} Mar 14 17:28:22 container-replicator ERROR Remote drive not mounted - {'zone': 5, 'weight': 1952.0, 'ip': '.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6001} + {'zone': 5, 'weight': 1952.0, 'ip': '.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6201} That is why this node has a lot of async pendings: a bunch of disks that are not mounted on and . There may be other issues, @@ -242,11 +242,11 @@ Procedure 2097152 partitions, 3 replicas, 5 zones, 1320 devices, 0.02 balance The minimum number of hours before a partition can be reassigned is 24 Devices: id zone ip address port name weight partitions balance meta - 0 1 .4 6000 disk0 1708.00 4259 -0.00 - 1 1 .4 6000 disk1 1708.00 4260 0.02 - 2 1 .4 6000 disk2 1952.00 4868 0.01 - 3 1 .4 6000 disk3 1952.00 4868 0.01 - 4 1 .4 6000 disk4 1952.00 4867 -0.01 + 0 1 .4 6200 disk0 1708.00 4259 -0.00 + 1 1 .4 6200 disk1 1708.00 4260 0.02 + 2 1 .4 6200 disk2 1952.00 4868 0.01 + 3 1 .4 6200 disk3 1952.00 4868 0.01 + 4 1 .4 6200 disk4 1952.00 4867 -0.01 #. Here, node .4 is in zone 1. If two or more of the three nodes under consideration are in the same Swift zone, they do not @@ -327,4 +327,4 @@ Swift startup/shutdown - Use reload - not stop/start/restart. - Try to roll sets of servers (especially proxy) in groups of less - than 20% of your servers. \ No newline at end of file + than 20% of your servers. diff --git a/doc/source/ops_runbook/procedures.rst b/doc/source/ops_runbook/procedures.rst index 8a28dc5281..067c7b32e4 100644 --- a/doc/source/ops_runbook/procedures.rst +++ b/doc/source/ops_runbook/procedures.rst @@ -292,9 +292,9 @@ re-create the account as follows: $ sudo swift-get-nodes /etc/swift/account.ring.gz AUTH_4ebe3039674d4864a11fe0864ae4d905 ... - curl -I -XHEAD "http://192.168.245.5:6002/disk1/3934/AUTH_4ebe3039674d4864a11fe0864ae4d905" - curl -I -XHEAD "http://192.168.245.3:6002/disk0/3934/AUTH_4ebe3039674d4864a11fe0864ae4d905" - curl -I -XHEAD "http://192.168.245.4:6002/disk1/3934/AUTH_4ebe3039674d4864a11fe0864ae4d905" + curl -I -XHEAD "http://192.168.245.5:6202/disk1/3934/AUTH_4ebe3039674d4864a11fe0864ae4d905" + curl -I -XHEAD "http://192.168.245.3:6202/disk0/3934/AUTH_4ebe3039674d4864a11fe0864ae4d905" + curl -I -XHEAD "http://192.168.245.4:6202/disk1/3934/AUTH_4ebe3039674d4864a11fe0864ae4d905" ... Use your own device location of servers: such as "export DEVICE=/srv/node" @@ -310,7 +310,7 @@ re-create the account as follows: .. code:: - $ curl -I -XHEAD "http://192.168.245.5:6002/disk1/3934/AUTH_4ebe3039674d4864a11fe0864ae4d905" + $ curl -I -XHEAD "http://192.168.245.5:6202/disk1/3934/AUTH_4ebe3039674d4864a11fe0864ae4d905" HTTP/1.1 404 Not Found Content-Length: 0 Content-Type: text/html; charset=utf-8 diff --git a/doc/source/ops_runbook/troubleshooting.rst b/doc/source/ops_runbook/troubleshooting.rst index 01c55899b7..0966267ccd 100644 --- a/doc/source/ops_runbook/troubleshooting.rst +++ b/doc/source/ops_runbook/troubleshooting.rst @@ -89,25 +89,25 @@ user's account data is stored: Partition 198875 Hash 1846d99185f8a0edaf65cfbf37439696 - Server:Port Device .31:6002 disk6 - Server:Port Device .204.70:6002 disk6 - Server:Port Device .72.16:6002 disk9 - Server:Port Device .204.64:6002 disk11 [Handoff] - Server:Port Device .26:6002 disk11 [Handoff] - Server:Port Device .72.27:6002 disk11 [Handoff] + Server:Port Device .31:6202 disk6 + Server:Port Device .204.70:6202 disk6 + Server:Port Device .72.16:6202 disk9 + Server:Port Device .204.64:6202 disk11 [Handoff] + Server:Port Device .26:6202 disk11 [Handoff] + Server:Port Device .72.27:6202 disk11 [Handoff] - curl -I -XHEAD "`http://.31:6002/disk6/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af" - `_ - curl -I -XHEAD "`http://.204.70:6002/disk6/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af" - `_ - curl -I -XHEAD "`http://.72.16:6002/disk9/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af" - `_ - curl -I -XHEAD "`http://.204.64:6002/disk11/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af" - `_ # [Handoff] - curl -I -XHEAD "`http://.26:6002/disk11/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af" - `_ # [Handoff] - curl -I -XHEAD "`http://.72.27:6002/disk11/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af" - `_ # [Handoff] + curl -I -XHEAD "`http://.31:6202/disk6/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af" + `_ + curl -I -XHEAD "`http://.204.70:6202/disk6/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af" + `_ + curl -I -XHEAD "`http://.72.16:6202/disk9/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af" + `_ + curl -I -XHEAD "`http://.204.64:6202/disk11/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af" + `_ # [Handoff] + curl -I -XHEAD "`http://.26:6202/disk11/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af" + `_ # [Handoff] + curl -I -XHEAD "`http://.72.27:6202/disk11/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af" + `_ # [Handoff] ssh .31 "ls -lah /srv/node/disk6/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/" ssh .204.70 "ls -lah /srv/node/disk6/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/" diff --git a/etc/account-server.conf-sample b/etc/account-server.conf-sample index 9fa98c6f20..a29856cc39 100644 --- a/etc/account-server.conf-sample +++ b/etc/account-server.conf-sample @@ -1,6 +1,6 @@ [DEFAULT] # bind_ip = 0.0.0.0 -bind_port = 6002 +bind_port = 6202 # bind_timeout = 30 # backlog = 4096 # user = swift diff --git a/etc/container-server.conf-sample b/etc/container-server.conf-sample index 5927f5e230..30276fb278 100644 --- a/etc/container-server.conf-sample +++ b/etc/container-server.conf-sample @@ -1,6 +1,6 @@ [DEFAULT] # bind_ip = 0.0.0.0 -bind_port = 6001 +bind_port = 6201 # bind_timeout = 30 # backlog = 4096 # user = swift diff --git a/etc/object-server.conf-sample b/etc/object-server.conf-sample index e01193bff2..ba42734756 100644 --- a/etc/object-server.conf-sample +++ b/etc/object-server.conf-sample @@ -1,6 +1,6 @@ [DEFAULT] # bind_ip = 0.0.0.0 -bind_port = 6000 +bind_port = 6200 # bind_timeout = 30 # backlog = 4096 # user = swift diff --git a/swift/account/reaper.py b/swift/account/reaper.py index ea7307183b..93e1608ae6 100644 --- a/swift/account/reaper.py +++ b/swift/account/reaper.py @@ -73,7 +73,7 @@ class AccountReaper(Daemon): self.node_timeout = float(conf.get('node_timeout', 10)) self.conn_timeout = float(conf.get('conn_timeout', 0.5)) self.myips = whataremyips(conf.get('bind_ip', '0.0.0.0')) - self.bind_port = int(conf.get('bind_port', 6002)) + self.bind_port = int(conf.get('bind_port', 6202)) self.concurrency = int(conf.get('concurrency', 25)) self.container_concurrency = self.object_concurrency = \ sqrt(self.concurrency) diff --git a/swift/account/replicator.py b/swift/account/replicator.py index ad9ad9fd5b..773a3fb206 100644 --- a/swift/account/replicator.py +++ b/swift/account/replicator.py @@ -21,4 +21,4 @@ class AccountReplicator(db_replicator.Replicator): server_type = 'account' brokerclass = AccountBroker datadir = DATADIR - default_port = 6002 + default_port = 6202 diff --git a/swift/cli/recon.py b/swift/cli/recon.py index df85f7121e..2936c30fe0 100644 --- a/swift/cli/recon.py +++ b/swift/cli/recon.py @@ -72,7 +72,7 @@ class Scout(object): Perform the actual HTTP request to obtain swift recon telemtry. :param base_url: the base url of the host you wish to check. str of the - format 'http://127.0.0.1:6000/recon/' + format 'http://127.0.0.1:6200/recon/' :param recon_type: the swift recon check to request. :returns: tuple of (recon url used, response body, and status) """ diff --git a/swift/cli/ring_builder_analyzer.py b/swift/cli/ring_builder_analyzer.py index 85526d8f90..599af0e4d7 100644 --- a/swift/cli/ring_builder_analyzer.py +++ b/swift/cli/ring_builder_analyzer.py @@ -37,26 +37,26 @@ addition:: "rounds": [ [ - ["add", "r1z2-10.20.30.40:6000/sda", 8000], - ["add", "r1z2-10.20.30.40:6000/sdb", 8000], - ["add", "r1z2-10.20.30.40:6000/sdc", 8000], - ["add", "r1z2-10.20.30.40:6000/sdd", 8000], + ["add", "r1z2-10.20.30.40:6200/sda", 8000], + ["add", "r1z2-10.20.30.40:6200/sdb", 8000], + ["add", "r1z2-10.20.30.40:6200/sdc", 8000], + ["add", "r1z2-10.20.30.40:6200/sdd", 8000], - ["add", "r1z2-10.20.30.41:6000/sda", 8000], - ["add", "r1z2-10.20.30.41:6000/sdb", 8000], - ["add", "r1z2-10.20.30.41:6000/sdc", 8000], - ["add", "r1z2-10.20.30.41:6000/sdd", 8000], + ["add", "r1z2-10.20.30.41:6200/sda", 8000], + ["add", "r1z2-10.20.30.41:6200/sdb", 8000], + ["add", "r1z2-10.20.30.41:6200/sdc", 8000], + ["add", "r1z2-10.20.30.41:6200/sdd", 8000], - ["add", "r1z2-10.20.30.43:6000/sda", 8000], - ["add", "r1z2-10.20.30.43:6000/sdb", 8000], - ["add", "r1z2-10.20.30.43:6000/sdc", 8000], - ["add", "r1z2-10.20.30.43:6000/sdd", 8000], + ["add", "r1z2-10.20.30.43:6200/sda", 8000], + ["add", "r1z2-10.20.30.43:6200/sdb", 8000], + ["add", "r1z2-10.20.30.43:6200/sdc", 8000], + ["add", "r1z2-10.20.30.43:6200/sdd", 8000], - ["add", "r1z2-10.20.30.44:6000/sda", 8000], - ["add", "r1z2-10.20.30.44:6000/sdb", 8000], - ["add", "r1z2-10.20.30.44:6000/sdc", 8000] + ["add", "r1z2-10.20.30.44:6200/sda", 8000], + ["add", "r1z2-10.20.30.44:6200/sdb", 8000], + ["add", "r1z2-10.20.30.44:6200/sdc", 8000] ], [ - ["add", "r1z2-10.20.30.44:6000/sdd", 1000] + ["add", "r1z2-10.20.30.44:6200/sdd", 1000] ], [ ["set_weight", 15, 2000] ], [ diff --git a/swift/common/middleware/list_endpoints.py b/swift/common/middleware/list_endpoints.py index 2e55d45fac..5342f43fa3 100644 --- a/swift/common/middleware/list_endpoints.py +++ b/swift/common/middleware/list_endpoints.py @@ -37,9 +37,9 @@ with a JSON-encoded list of endpoints of the form:: correspondingly, e.g.:: - http://10.1.1.1:6000/sda1/2/a/c2/o1 - http://10.1.1.1:6000/sda1/2/a/c2 - http://10.1.1.1:6000/sda1/2/a + http://10.1.1.1:6200/sda1/2/a/c2/o1 + http://10.1.1.1:6200/sda1/2/a/c2 + http://10.1.1.1:6200/sda1/2/a Using the v2 API, answers requests of the form:: diff --git a/swift/common/middleware/xprofile.py b/swift/common/middleware/xprofile.py index 6db40000d5..deca857f2b 100644 --- a/swift/common/middleware/xprofile.py +++ b/swift/common/middleware/xprofile.py @@ -54,9 +54,9 @@ Retrieve metrics from specific function in json format:: A list of URL examples: http://localhost:8080/__profile__ (proxy server) - http://localhost:6000/__profile__/all (object server) - http://localhost:6001/__profile__/current (container server) - http://localhost:6002/__profile__/12345?format=json (account server) + http://localhost:6200/__profile__/all (object server) + http://localhost:6201/__profile__/current (container server) + http://localhost:6202/__profile__/12345?format=json (account server) The profiling middleware can be configured in paste file for WSGI servers such as proxy, account, container and object servers. Please refer to the sample diff --git a/swift/container/replicator.py b/swift/container/replicator.py index b428086bdd..716b19e5a6 100644 --- a/swift/container/replicator.py +++ b/swift/container/replicator.py @@ -38,7 +38,7 @@ class ContainerReplicator(db_replicator.Replicator): server_type = 'container' brokerclass = ContainerBroker datadir = DATADIR - default_port = 6001 + default_port = 6201 def report_up_to_date(self, full_info): reported_key_map = { diff --git a/swift/container/sync.py b/swift/container/sync.py index 2ff4bff5c4..8fbfe9dba3 100644 --- a/swift/container/sync.py +++ b/swift/container/sync.py @@ -215,7 +215,7 @@ class ContainerSync(Daemon): ring_name='container') bind_ip = conf.get('bind_ip', '0.0.0.0') self._myips = whataremyips(bind_ip) - self._myport = int(conf.get('bind_port', 6001)) + self._myport = int(conf.get('bind_port', 6201)) swift.common.db.DB_PREALLOCATION = \ config_true_value(conf.get('db_preallocation', 'f')) self.conn_timeout = float(conf.get('conn_timeout', 5)) diff --git a/swift/obj/reconstructor.py b/swift/obj/reconstructor.py index e41c478c0a..2c9e2b4c82 100644 --- a/swift/obj/reconstructor.py +++ b/swift/obj/reconstructor.py @@ -127,7 +127,7 @@ class ObjectReconstructor(Daemon): self.bind_ip = conf.get('bind_ip', '0.0.0.0') self.servers_per_port = int(conf.get('servers_per_port', '0') or 0) self.port = None if self.servers_per_port else \ - int(conf.get('bind_port', 6000)) + int(conf.get('bind_port', 6200)) self.concurrency = int(conf.get('concurrency', 1)) self.stats_interval = int(conf.get('stats_interval', '300')) self.ring_check_interval = int(conf.get('ring_check_interval', 15)) diff --git a/swift/obj/replicator.py b/swift/obj/replicator.py index d06e2d4822..7a8613dec7 100644 --- a/swift/obj/replicator.py +++ b/swift/obj/replicator.py @@ -68,7 +68,7 @@ class ObjectReplicator(Daemon): self.bind_ip = conf.get('bind_ip', '0.0.0.0') self.servers_per_port = int(conf.get('servers_per_port', '0') or 0) self.port = None if self.servers_per_port else \ - int(conf.get('bind_port', 6000)) + int(conf.get('bind_port', 6200)) self.concurrency = int(conf.get('concurrency', 1)) self.stats_interval = int(conf.get('stats_interval', '300')) self.ring_check_interval = int(conf.get('ring_check_interval', 15)) diff --git a/test/probe/test_container_failures.py b/test/probe/test_container_failures.py index d8c132c53d..3076ba7922 100755 --- a/test/probe/test_container_failures.py +++ b/test/probe/test_container_failures.py @@ -133,7 +133,7 @@ class TestContainerFailures(ReplProbeTest): onode = onodes[0] db_files = [] for onode in onodes: - node_id = (onode['port'] - 6000) / 10 + node_id = (onode['port'] - 6200) / 10 device = onode['device'] hash_str = hash_path(self.account, container) server_conf = readconf(self.configs['container-server'][node_id]) diff --git a/test/probe/test_object_failures.py b/test/probe/test_object_failures.py index ba53177743..b290a4ec05 100755 --- a/test/probe/test_object_failures.py +++ b/test/probe/test_object_failures.py @@ -61,7 +61,7 @@ class TestObjectFailures(ReplProbeTest): opart, onodes = self.object_ring.get_nodes( self.account, container, obj) onode = onodes[0] - node_id = (onode['port'] - 6000) / 10 + node_id = (onode['port'] - 6200) / 10 device = onode['device'] hash_str = hash_path(self.account, container, obj) obj_server_conf = readconf(self.configs['object-server'][node_id]) diff --git a/test/probe/test_object_handoff.py b/test/probe/test_object_handoff.py index a360021b7c..1b9bd29cba 100755 --- a/test/probe/test_object_handoff.py +++ b/test/probe/test_object_handoff.py @@ -134,13 +134,13 @@ class TestObjectHandoff(ReplProbeTest): port_num = node['replication_port'] except KeyError: port_num = node['port'] - node_id = (port_num - 6000) / 10 + node_id = (port_num - 6200) / 10 Manager(['object-replicator']).once(number=node_id) try: another_port_num = another_onode['replication_port'] except KeyError: another_port_num = another_onode['port'] - another_num = (another_port_num - 6000) / 10 + another_num = (another_port_num - 6200) / 10 Manager(['object-replicator']).once(number=another_num) # Assert the first container/obj primary server now has container/obj @@ -230,9 +230,9 @@ class TestObjectHandoff(ReplProbeTest): port_num = node['replication_port'] except KeyError: port_num = node['port'] - node_id = (port_num - 6000) / 10 + node_id = (port_num - 6200) / 10 Manager(['object-replicator']).once(number=node_id) - another_node_id = (another_port_num - 6000) / 10 + another_node_id = (another_port_num - 6200) / 10 Manager(['object-replicator']).once(number=another_node_id) # Assert primary node no longer has container/obj diff --git a/test/probe/test_reconstructor_revert.py b/test/probe/test_reconstructor_revert.py index 095843624c..845ff1a292 100755 --- a/test/probe/test_reconstructor_revert.py +++ b/test/probe/test_reconstructor_revert.py @@ -131,7 +131,7 @@ class TestReconstructorRevert(ECProbeTest): # fire up reconstructor on handoff nodes only for hnode in hnodes: - hnode_id = (hnode['port'] - 6000) / 10 + hnode_id = (hnode['port'] - 6200) / 10 self.reconstructor.once(number=hnode_id) # first three primaries have data again diff --git a/test/unit/__init__.py b/test/unit/__init__.py index 377ee2ecc2..c4c833a79c 100644 --- a/test/unit/__init__.py +++ b/test/unit/__init__.py @@ -256,9 +256,9 @@ def write_fake_ring(path, *devs): Pretty much just a two node, two replica, 2 part power ring... """ dev1 = {'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1', - 'port': 6000} + 'port': 6200} dev2 = {'id': 0, 'zone': 0, 'device': 'sdb1', 'ip': '127.0.0.1', - 'port': 6000} + 'port': 6200} dev1_updates, dev2_updates = devs or ({}, {}) @@ -278,7 +278,7 @@ class FabricatedRing(Ring): your tests needs. """ - def __init__(self, replicas=6, devices=8, nodes=4, port=6000, + def __init__(self, replicas=6, devices=8, nodes=4, port=6200, part_power=4): self.devices = devices self.nodes = nodes diff --git a/test/unit/account/test_reaper.py b/test/unit/account/test_reaper.py index 998b099b93..3c90673948 100644 --- a/test/unit/account/test_reaper.py +++ b/test/unit/account/test_reaper.py @@ -100,23 +100,23 @@ class FakeRing(object): def __init__(self): self.nodes = [{'id': '1', 'ip': '10.10.10.1', - 'port': 6002, + 'port': 6202, 'device': 'sda1'}, {'id': '2', 'ip': '10.10.10.2', - 'port': 6002, + 'port': 6202, 'device': 'sda1'}, {'id': '3', 'ip': '10.10.10.3', - 'port': 6002, + 'port': 6202, 'device': None}, {'id': '4', 'ip': '10.10.10.1', - 'port': 6002, + 'port': 6202, 'device': 'sda2'}, {'id': '5', 'ip': '10.10.10.1', - 'port': 6002, + 'port': 6202, 'device': 'sda3'}, ] diff --git a/test/unit/cli/test_ring_builder_analyzer.py b/test/unit/cli/test_ring_builder_analyzer.py index db23fec30a..2de2b16a9c 100644 --- a/test/unit/cli/test_ring_builder_analyzer.py +++ b/test/unit/cli/test_ring_builder_analyzer.py @@ -179,11 +179,11 @@ class TestParseScenario(unittest.TestCase): self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) # no weight - busted = dict(base, rounds=[[['add', 'r1z2-1.2.3.4:6000/d7']]]) + busted = dict(base, rounds=[[['add', 'r1z2-1.2.3.4:6200/d7']]]) self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) # too many fields - busted = dict(base, rounds=[[['add', 'r1z2-1.2.3.4:6000/d7', 1, 2]]]) + busted = dict(base, rounds=[[['add', 'r1z2-1.2.3.4:6200/d7', 1, 2]]]) self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) # can't parse @@ -198,7 +198,7 @@ class TestParseScenario(unittest.TestCase): self.assertEqual(str(err), expected) # negative weight - busted = dict(base, rounds=[[['add', 'r1z2-1.2.3.4:6000/d7', -1]]]) + busted = dict(base, rounds=[[['add', 'r1z2-1.2.3.4:6200/d7', -1]]]) self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) def test_bad_remove(self): diff --git a/test/unit/cli/test_ringbuilder.py b/test/unit/cli/test_ringbuilder.py index 55370b6184..35b1f6461c 100644 --- a/test/unit/cli/test_ringbuilder.py +++ b/test/unit/cli/test_ringbuilder.py @@ -75,8 +75,8 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): # These should all match the first device in the sample ring # (see below) but not the second device self.search_values = ["d0", "/sda1", "r0", "z0", "z0-127.0.0.1", - "127.0.0.1", "z0:6000", ":6000", "R127.0.0.1", - "127.0.0.1R127.0.0.1", "R:6000", + "127.0.0.1", "z0:6200", ":6200", "R127.0.0.1", + "127.0.0.1R127.0.0.1", "R:6200", "_some meta data"] def setUp(self): @@ -110,7 +110,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): 'region': 0, 'zone': 0, 'ip': '127.0.0.1', - 'port': 6000, + 'port': 6200, 'device': 'sda1', 'meta': 'some meta data', }) @@ -118,21 +118,21 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): 'region': 1, 'zone': 1, 'ip': '127.0.0.2', - 'port': 6001, + 'port': 6201, 'device': 'sda2' }) ring.add_dev({'weight': 100.0, 'region': 2, 'zone': 2, 'ip': '127.0.0.3', - 'port': 6002, + 'port': 6202, 'device': 'sdc3' }) ring.add_dev({'weight': 100.0, 'region': 3, 'zone': 3, 'ip': '127.0.0.4', - 'port': 6003, + 'port': 6203, 'device': 'sdd4' }) ring.save(self.tmpfile) @@ -144,15 +144,15 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): def test_parse_search_values_old_format(self): # Test old format - argv = ["d0r0z0-127.0.0.1:6000R127.0.0.1:6000/sda1_some meta data"] + argv = ["d0r0z0-127.0.0.1:6200R127.0.0.1:6200/sda1_some meta data"] search_values = ringbuilder._parse_search_values(argv) self.assertEqual(search_values['id'], 0) self.assertEqual(search_values['region'], 0) self.assertEqual(search_values['zone'], 0) self.assertEqual(search_values['ip'], '127.0.0.1') - self.assertEqual(search_values['port'], 6000) + self.assertEqual(search_values['port'], 6200) self.assertEqual(search_values['replication_ip'], '127.0.0.1') - self.assertEqual(search_values['replication_port'], 6000) + self.assertEqual(search_values['replication_port'], 6200) self.assertEqual(search_values['device'], 'sda1') self.assertEqual(search_values['meta'], 'some meta data') @@ -160,9 +160,9 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): # Test new format argv = ["--id", "0", "--region", "0", "--zone", "0", "--ip", "127.0.0.1", - "--port", "6000", + "--port", "6200", "--replication-ip", "127.0.0.1", - "--replication-port", "6000", + "--replication-port", "6200", "--device", "sda1", "--meta", "some meta data", "--weight", "100"] search_values = ringbuilder._parse_search_values(argv) @@ -170,9 +170,9 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): self.assertEqual(search_values['region'], 0) self.assertEqual(search_values['zone'], 0) self.assertEqual(search_values['ip'], '127.0.0.1') - self.assertEqual(search_values['port'], 6000) + self.assertEqual(search_values['port'], 6200) self.assertEqual(search_values['replication_ip'], '127.0.0.1') - self.assertEqual(search_values['replication_port'], 6000) + self.assertEqual(search_values['replication_port'], 6200) self.assertEqual(search_values['device'], 'sda1') self.assertEqual(search_values['meta'], 'some meta data') self.assertEqual(search_values['weight'], 100) @@ -280,7 +280,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): self.create_sample_ring() # Test ipv4(old format) argv = ["", self.tmpfile, "add", - "r2z3-127.0.0.1:6000/sda3_some meta data", "3.14159265359"] + "r2z3-127.0.0.1:6200/sda3_some meta data", "3.14159265359"] self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) # Check that device was created with given data @@ -289,19 +289,19 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): self.assertEqual(dev['region'], 2) self.assertEqual(dev['zone'], 3) self.assertEqual(dev['ip'], '127.0.0.1') - self.assertEqual(dev['port'], 6000) + self.assertEqual(dev['port'], 6200) self.assertEqual(dev['device'], 'sda3') self.assertEqual(dev['weight'], 3.14159265359) self.assertEqual(dev['replication_ip'], '127.0.0.1') - self.assertEqual(dev['replication_port'], 6000) + self.assertEqual(dev['replication_port'], 6200) self.assertEqual(dev['meta'], 'some meta data') def test_add_duplicate_devices(self): self.create_sample_ring() # Test adding duplicate devices argv = ["", self.tmpfile, "add", - "r1z1-127.0.0.1:6000/sda9", "3.14159265359", - "r1z1-127.0.0.1:6000/sda9", "2"] + "r1z1-127.0.0.1:6200/sda9", "3.14159265359", + "r1z1-127.0.0.1:6200/sda9", "2"] self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) def test_add_device_ipv6_old_format(self): @@ -309,7 +309,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): # Test ipv6(old format) argv = \ ["", self.tmpfile, "add", - "r2z3-2001:0000:1234:0000:0000:C1C0:ABCD:0876:6000" + "r2z3-2001:0000:1234:0000:0000:C1C0:ABCD:0876:6200" "R2::10:7000/sda3_some meta data", "3.14159265359"] self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) @@ -320,7 +320,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): self.assertEqual(dev['region'], 2) self.assertEqual(dev['zone'], 3) self.assertEqual(dev['ip'], '2001:0:1234::c1c0:abcd:876') - self.assertEqual(dev['port'], 6000) + self.assertEqual(dev['port'], 6200) self.assertEqual(dev['device'], 'sda3') self.assertEqual(dev['weight'], 3.14159265359) self.assertEqual(dev['replication_ip'], '2::10') @@ -337,9 +337,9 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "add", "--region", "2", "--zone", "3", "--ip", "127.0.0.2", - "--port", "6000", + "--port", "6200", "--replication-ip", "127.0.0.2", - "--replication-port", "6000", + "--replication-port", "6200", "--device", "sda3", "--meta", "some meta data", "--weight", "3.14159265359"] self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) @@ -350,11 +350,11 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): self.assertEqual(dev['region'], 2) self.assertEqual(dev['zone'], 3) self.assertEqual(dev['ip'], '127.0.0.2') - self.assertEqual(dev['port'], 6000) + self.assertEqual(dev['port'], 6200) self.assertEqual(dev['device'], 'sda3') self.assertEqual(dev['weight'], 3.14159265359) self.assertEqual(dev['replication_ip'], '127.0.0.2') - self.assertEqual(dev['replication_port'], 6000) + self.assertEqual(dev['replication_port'], 6200) self.assertEqual(dev['meta'], 'some meta data') # Final check, rebalance and check ring is ok ring.rebalance() @@ -367,7 +367,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "add", "--region", "2", "--zone", "3", "--ip", "[3001:0000:1234:0000:0000:C1C0:ABCD:0876]", - "--port", "6000", + "--port", "6200", "--replication-ip", "[3::10]", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", @@ -380,7 +380,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): self.assertEqual(dev['region'], 2) self.assertEqual(dev['zone'], 3) self.assertEqual(dev['ip'], '3001:0:1234::c1c0:abcd:876') - self.assertEqual(dev['port'], 6000) + self.assertEqual(dev['port'], 6200) self.assertEqual(dev['device'], 'sda3') self.assertEqual(dev['weight'], 3.14159265359) self.assertEqual(dev['replication_ip'], '3::10') @@ -397,7 +397,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "add", "--region", "2", "--zone", "3", "--ip", "test.test.com", - "--port", "6000", + "--port", "6200", "--replication-ip", "r.test.com", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", @@ -410,7 +410,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): self.assertEqual(dev['region'], 2) self.assertEqual(dev['zone'], 3) self.assertEqual(dev['ip'], 'test.test.com') - self.assertEqual(dev['port'], 6000) + self.assertEqual(dev['port'], 6200) self.assertEqual(dev['device'], 'sda3') self.assertEqual(dev['weight'], 3.14159265359) self.assertEqual(dev['replication_ip'], 'r.test.com') @@ -428,14 +428,14 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): def test_add_device_already_exists(self): # Test Add a device that already exists argv = ["", self.tmpfile, "add", - "r0z0-127.0.0.1:6000/sda1_some meta data", "100"] + "r0z0-127.0.0.1:6200/sda1_some meta data", "100"] self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) def test_add_device_old_missing_region(self): self.create_sample_ring() # Test add device without specifying a region argv = ["", self.tmpfile, "add", - "z3-127.0.0.1:6000/sde3_some meta data", "3.14159265359"] + "z3-127.0.0.1:6200/sde3_some meta data", "3.14159265359"] exp_results = {'valid_exit_codes': [2]} self.run_srb(*argv, exp_results=exp_results) # Check that ring was created with sane value for region @@ -458,11 +458,11 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): self.assertEqual(dev['region'], 0) self.assertEqual(dev['zone'], 0) self.assertEqual(dev['ip'], '127.0.0.1') - self.assertEqual(dev['port'], 6000) + self.assertEqual(dev['port'], 6200) self.assertEqual(dev['device'], 'sda1') self.assertEqual(dev['weight'], 0) self.assertEqual(dev['replication_ip'], '127.0.0.1') - self.assertEqual(dev['replication_port'], 6000) + self.assertEqual(dev['replication_port'], 6200) self.assertEqual(dev['meta'], 'some meta data') # Check that second device in ring is not affected @@ -478,7 +478,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): self.create_sample_ring() # Test ipv4(old format) argv = ["", self.tmpfile, "remove", - "d0r0z0-127.0.0.1:6000R127.0.0.1:6000/sda1_some meta data"] + "d0r0z0-127.0.0.1:6200R127.0.0.1:6200/sda1_some meta data"] self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) @@ -490,11 +490,11 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): self.assertEqual(dev['region'], 0) self.assertEqual(dev['zone'], 0) self.assertEqual(dev['ip'], '127.0.0.1') - self.assertEqual(dev['port'], 6000) + self.assertEqual(dev['port'], 6200) self.assertEqual(dev['device'], 'sda1') self.assertEqual(dev['weight'], 0) self.assertEqual(dev['replication_ip'], '127.0.0.1') - self.assertEqual(dev['replication_port'], 6000) + self.assertEqual(dev['replication_port'], 6200) self.assertEqual(dev['meta'], 'some meta data') # Check that second device in ring is not affected @@ -513,7 +513,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "add", "--region", "2", "--zone", "3", "--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]", - "--port", "6000", + "--port", "6200", "--replication-ip", "[2::10]", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", @@ -522,7 +522,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): # Test ipv6(old format) argv = ["", self.tmpfile, "remove", - "d4r2z3-[2001:0000:1234:0000:0000:C1C0:ABCD:0876]:6000" + "d4r2z3-[2001:0000:1234:0000:0000:C1C0:ABCD:0876]:6200" "R[2::10]:7000/sda3_some meta data"] self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) @@ -545,7 +545,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): self.assertEqual(dev['region'], 2) self.assertEqual(dev['zone'], 3) self.assertEqual(dev['ip'], '2001:0:1234::c1c0:abcd:876') - self.assertEqual(dev['port'], 6000) + self.assertEqual(dev['port'], 6200) self.assertEqual(dev['device'], 'sda3') self.assertEqual(dev['weight'], 0) self.assertEqual(dev['replication_ip'], '2::10') @@ -563,9 +563,9 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "remove", "--id", "0", "--region", "0", "--zone", "0", "--ip", "127.0.0.1", - "--port", "6000", + "--port", "6200", "--replication-ip", "127.0.0.1", - "--replication-port", "6000", + "--replication-port", "6200", "--device", "sda1", "--meta", "some meta data"] self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) @@ -578,11 +578,11 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): self.assertEqual(dev['region'], 0) self.assertEqual(dev['zone'], 0) self.assertEqual(dev['ip'], '127.0.0.1') - self.assertEqual(dev['port'], 6000) + self.assertEqual(dev['port'], 6200) self.assertEqual(dev['device'], 'sda1') self.assertEqual(dev['weight'], 0) self.assertEqual(dev['replication_ip'], '127.0.0.1') - self.assertEqual(dev['replication_port'], 6000) + self.assertEqual(dev['replication_port'], 6200) self.assertEqual(dev['meta'], 'some meta data') # Check that second device in ring is not affected @@ -655,7 +655,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "add", "--region", "2", "--zone", "3", "--ip", "test.test.com", - "--port", "6000", + "--port", "6200", "--replication-ip", "r.test.com", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", @@ -667,7 +667,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "remove", "--id", "4", "--region", "2", "--zone", "3", "--ip", "test.test.com", - "--port", "6000", + "--port", "6200", "--replication-ip", "r.test.com", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data"] @@ -692,7 +692,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): self.assertEqual(dev['region'], 2) self.assertEqual(dev['zone'], 3) self.assertEqual(dev['ip'], 'test.test.com') - self.assertEqual(dev['port'], 6000) + self.assertEqual(dev['port'], 6200) self.assertEqual(dev['device'], 'sda3') self.assertEqual(dev['weight'], 0) self.assertEqual(dev['replication_ip'], 'r.test.com') @@ -741,7 +741,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): self.create_sample_ring() # Test ipv4(old format) argv = ["", self.tmpfile, "set_weight", - "d0r0z0-127.0.0.1:6000R127.0.0.1:6000/sda1_some meta data", + "d0r0z0-127.0.0.1:6200R127.0.0.1:6200/sda1_some meta data", "3.14159265359"] self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) @@ -765,7 +765,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "add", "--region", "2", "--zone", "3", "--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]", - "--port", "6000", + "--port", "6200", "--replication-ip", "[2::10]", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", @@ -774,7 +774,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): # Test ipv6(old format) argv = ["", self.tmpfile, "set_weight", - "d4r2z3-[2001:0000:1234:0000:0000:C1C0:ABCD:0876]:6000" + "d4r2z3-[2001:0000:1234:0000:0000:C1C0:ABCD:0876]:6200" "R[2::10]:7000/sda3_some meta data", "3.14159265359"] self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) @@ -802,9 +802,9 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "set_weight", "--id", "0", "--region", "0", "--zone", "0", "--ip", "127.0.0.1", - "--port", "6000", + "--port", "6200", "--replication-ip", "127.0.0.1", - "--replication-port", "6000", + "--replication-port", "6200", "--device", "sda1", "--meta", "some meta data", "3.14159265359"] self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) @@ -828,7 +828,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "add", "--region", "2", "--zone", "3", "--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]", - "--port", "6000", + "--port", "6200", "--replication-ip", "[2::10]", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", @@ -840,7 +840,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "set_weight", "--id", "4", "--region", "2", "--zone", "3", "--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]", - "--port", "6000", + "--port", "6200", "--replication-ip", "[2::10]", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", "3.14159265359"] @@ -870,7 +870,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "add", "--region", "2", "--zone", "3", "--ip", "test.test.com", - "--port", "6000", + "--port", "6200", "--replication-ip", "r.test.com", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", @@ -882,7 +882,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "set_weight", "--id", "4", "--region", "2", "--zone", "3", "--ip", "test.test.com", - "--port", "6000", + "--port", "6200", "--replication-ip", "r.test.com", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", "3.14159265359"] @@ -937,7 +937,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): # Check that second device in ring is not affected dev = ring.devs[1] self.assertEqual(dev['ip'], '127.0.0.2') - self.assertEqual(dev['port'], 6001) + self.assertEqual(dev['port'], 6201) self.assertEqual(dev['device'], 'sda2') self.assertEqual(dev['meta'], '') @@ -949,7 +949,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): self.create_sample_ring() # Test ipv4(old format) argv = ["", self.tmpfile, "set_info", - "d0r0z0-127.0.0.1:6000R127.0.0.1:6000/sda1_some meta data", + "d0r0z0-127.0.0.1:6200R127.0.0.1:6200/sda1_some meta data", "127.0.1.1:8000R127.0.1.1:8000/sda10_other meta data"] self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) @@ -966,7 +966,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): # Check that second device in ring is not affected dev = ring.devs[1] self.assertEqual(dev['ip'], '127.0.0.2') - self.assertEqual(dev['port'], 6001) + self.assertEqual(dev['port'], 6201) self.assertEqual(dev['device'], 'sda2') self.assertEqual(dev['meta'], '') @@ -981,7 +981,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "add", "--region", "2", "--zone", "3", "--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]", - "--port", "6000", + "--port", "6200", "--replication-ip", "[2::10]", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", @@ -990,7 +990,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): # Test ipv6(old format) argv = ["", self.tmpfile, "set_info", - "d4r2z3-[2001:0000:1234:0000:0000:C1C0:ABCD:0876]:6000" + "d4r2z3-[2001:0000:1234:0000:0000:C1C0:ABCD:0876]:6200" "R[2::10]:7000/sda3_some meta data", "[3001:0000:1234:0000:0000:C1C0:ABCD:0876]:8000" "R[3::10]:8000/sda30_other meta data"] @@ -1000,16 +1000,16 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): # Check that second device in ring is not affected dev = ring.devs[0] self.assertEqual(dev['ip'], '127.0.0.1') - self.assertEqual(dev['port'], 6000) + self.assertEqual(dev['port'], 6200) self.assertEqual(dev['replication_ip'], '127.0.0.1') - self.assertEqual(dev['replication_port'], 6000) + self.assertEqual(dev['replication_port'], 6200) self.assertEqual(dev['device'], 'sda1') self.assertEqual(dev['meta'], 'some meta data') # Check that second device in ring is not affected dev = ring.devs[1] self.assertEqual(dev['ip'], '127.0.0.2') - self.assertEqual(dev['port'], 6001) + self.assertEqual(dev['port'], 6201) self.assertEqual(dev['device'], 'sda2') self.assertEqual(dev['meta'], '') @@ -1033,9 +1033,9 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "set_info", "--id", "0", "--region", "0", "--zone", "0", "--ip", "127.0.0.1", - "--port", "6000", + "--port", "6200", "--replication-ip", "127.0.0.1", - "--replication-port", "6000", + "--replication-port", "6200", "--device", "sda1", "--meta", "some meta data", "--change-ip", "127.0.2.1", "--change-port", "9000", @@ -1057,7 +1057,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): # Check that second device in ring is not affected dev = ring.devs[1] self.assertEqual(dev['ip'], '127.0.0.2') - self.assertEqual(dev['port'], 6001) + self.assertEqual(dev['port'], 6201) self.assertEqual(dev['device'], 'sda2') self.assertEqual(dev['meta'], '') @@ -1072,7 +1072,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "add", "--region", "2", "--zone", "3", "--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]", - "--port", "6000", + "--port", "6200", "--replication-ip", "[2::10]", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", @@ -1084,7 +1084,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "set_info", "--id", "4", "--region", "2", "--zone", "3", "--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]", - "--port", "6000", + "--port", "6200", "--replication-ip", "[2::10]", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", @@ -1099,16 +1099,16 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): # Check that second device in ring is not affected dev = ring.devs[0] self.assertEqual(dev['ip'], '127.0.0.1') - self.assertEqual(dev['port'], 6000) + self.assertEqual(dev['port'], 6200) self.assertEqual(dev['replication_ip'], '127.0.0.1') - self.assertEqual(dev['replication_port'], 6000) + self.assertEqual(dev['replication_port'], 6200) self.assertEqual(dev['device'], 'sda1') self.assertEqual(dev['meta'], 'some meta data') # Check that second device in ring is not affected dev = ring.devs[1] self.assertEqual(dev['ip'], '127.0.0.2') - self.assertEqual(dev['port'], 6001) + self.assertEqual(dev['port'], 6201) self.assertEqual(dev['device'], 'sda2') self.assertEqual(dev['meta'], '') @@ -1133,7 +1133,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "add", "--region", "2", "--zone", "3", "--ip", "test.test.com", - "--port", "6000", + "--port", "6200", "--replication-ip", "r.test.com", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", @@ -1145,7 +1145,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "set_info", "--id", "4", "--region", "2", "--zone", "3", "--ip", "test.test.com", - "--port", "6000", + "--port", "6200", "--replication-ip", "r.test.com", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", @@ -1160,16 +1160,16 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): # Check that second device in ring is not affected dev = ring.devs[0] self.assertEqual(dev['ip'], '127.0.0.1') - self.assertEqual(dev['port'], 6000) + self.assertEqual(dev['port'], 6200) self.assertEqual(dev['replication_ip'], '127.0.0.1') - self.assertEqual(dev['replication_port'], 6000) + self.assertEqual(dev['replication_port'], 6200) self.assertEqual(dev['device'], 'sda1') self.assertEqual(dev['meta'], 'some meta data') # Check that second device in ring is not affected dev = ring.devs[1] self.assertEqual(dev['ip'], '127.0.0.2') - self.assertEqual(dev['port'], 6001) + self.assertEqual(dev['port'], 6201) self.assertEqual(dev['device'], 'sda2') self.assertEqual(dev['meta'], '') @@ -1206,14 +1206,14 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "set_info", "--id", "0", "--region", "0", "--zone", "0", "--ip", "127.0.0.1", - "--port", "6000", + "--port", "6200", "--replication-ip", "127.0.0.1", - "--replication-port", "6000", + "--replication-port", "6200", "--device", "sda1", "--meta", "some meta data", "--change-ip", "127.0.0.2", - "--change-port", "6001", + "--change-port", "6201", "--change-replication-ip", "127.0.0.2", - "--change-replication-port", "6001", + "--change-replication-port", "6201", "--change-device", "sda2", "--change-meta", ""] self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) @@ -1365,7 +1365,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): self.create_sample_ring() # Test ipv4(old format) argv = ["", self.tmpfile, "search", - "d0r0z0-127.0.0.1:6000R127.0.0.1:6000/sda1_some meta data"] + "d0r0z0-127.0.0.1:6200R127.0.0.1:6200/sda1_some meta data"] self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) def test_search_device_ipv6_old_format(self): @@ -1375,7 +1375,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "add", "--region", "2", "--zone", "3", "--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]", - "--port", "6000", + "--port", "6200", "--replication-ip", "[2::10]", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", @@ -1389,7 +1389,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): # Test ipv6(old format) argv = ["", self.tmpfile, "search", - "d4r2z3-[2001:0000:1234:0000:0000:C1C0:ABCD:0876]:6000" + "d4r2z3-[2001:0000:1234:0000:0000:C1C0:ABCD:0876]:6200" "R[2::10]:7000/sda3_some meta data"] self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) @@ -1400,9 +1400,9 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "search", "--id", "0", "--region", "0", "--zone", "0", "--ip", "127.0.0.1", - "--port", "6000", + "--port", "6200", "--replication-ip", "127.0.0.1", - "--replication-port", "6000", + "--replication-port", "6200", "--device", "sda1", "--meta", "some meta data"] self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) @@ -1413,7 +1413,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "add", "--region", "2", "--zone", "3", "--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]", - "--port", "6000", + "--port", "6200", "--replication-ip", "[2::10]", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", @@ -1430,7 +1430,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "search", "--id", "4", "--region", "2", "--zone", "3", "--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]", - "--port", "6000", + "--port", "6200", "--replication-ip", "[2::10]", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data"] @@ -1443,7 +1443,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "add", "--region", "2", "--zone", "3", "--ip", "test.test.com", - "--port", "6000", + "--port", "6200", "--replication-ip", "r.test.com", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", @@ -1459,7 +1459,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "search", "--id", "4", "--region", "2", "--zone", "3", "--ip", "test.test.com", - "--port", "6000", + "--port", "6200", "--replication-ip", "r.test.com", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data"] @@ -1485,7 +1485,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ring.save(self.tmpfile) # Test ipv4(old format) argv = ["", self.tmpfile, "list_parts", - "d0r0z0-127.0.0.1:6000R127.0.0.1:6000/sda1_some meta data"] + "d0r0z0-127.0.0.1:6200R127.0.0.1:6200/sda1_some meta data"] self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) def test_list_parts_ipv6_old_format(self): @@ -1495,7 +1495,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "add", "--region", "2", "--zone", "3", "--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]", - "--port", "6000", + "--port", "6200", "--replication-ip", "[2::10]", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", @@ -1509,7 +1509,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): # Test ipv6(old format) argv = ["", self.tmpfile, "list_parts", - "d4r2z3-[2001:0000:1234:0000:0000:C1C0:ABCD:0876]:6000" + "d4r2z3-[2001:0000:1234:0000:0000:C1C0:ABCD:0876]:6200" "R[2::10]:7000/sda3_some meta data"] self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) @@ -1523,9 +1523,9 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "list_parts", "--id", "0", "--region", "0", "--zone", "0", "--ip", "127.0.0.1", - "--port", "6000", + "--port", "6200", "--replication-ip", "127.0.0.1", - "--replication-port", "6000", + "--replication-port", "6200", "--device", "sda1", "--meta", "some meta data"] self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) @@ -1536,7 +1536,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "add", "--region", "2", "--zone", "3", "--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]", - "--port", "6000", + "--port", "6200", "--replication-ip", "[2::10]", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", @@ -1553,7 +1553,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "list_parts", "--id", "4", "--region", "2", "--zone", "3", "--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]", - "--port", "6000", + "--port", "6200", "--replication-ip", "[2::10]", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data"] @@ -1566,7 +1566,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "add", "--region", "2", "--zone", "3", "--ip", "test.test.com", - "--port", "6000", + "--port", "6200", "--replication-ip", "r.test.com", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", @@ -1583,7 +1583,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ["", self.tmpfile, "list_parts", "--id", "4", "--region", "2", "--zone", "3", "--ip", "test.test.com", - "--port", "6000", + "--port", "6200", "--replication-ip", "r.test.com", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data"] @@ -1643,17 +1643,17 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "Devices: id region zone ip address port " \ "replication ip replication port name weight " \ "partitions balance flags meta\n" \ - " 0 0 0 127.0.0.1 6000 " \ - "127.0.0.1 6000 sda1 100.00" \ + " 0 0 0 127.0.0.1 6200 " \ + "127.0.0.1 6200 sda1 100.00" \ " 0 -100.00 some meta data\n" \ - " 1 1 1 127.0.0.2 6001 " \ - "127.0.0.2 6001 sda2 0.00" \ + " 1 1 1 127.0.0.2 6201 " \ + "127.0.0.2 6201 sda2 0.00" \ " 0 0.00 DEL \n" \ - " 2 2 2 127.0.0.3 6002 " \ - "127.0.0.3 6002 sdc3 100.00" \ + " 2 2 2 127.0.0.3 6202 " \ + "127.0.0.3 6202 sdc3 100.00" \ " 0 -100.00 \n" \ - " 3 3 3 127.0.0.4 6003 " \ - "127.0.0.4 6003 sdd4 0.00" \ + " 3 3 3 127.0.0.4 6203 " \ + "127.0.0.4 6203 sdd4 0.00" \ " 0 0.00 \n" % (self.tmpfile, self.tmpfile) self.assertEqual(expected, mock_stdout.getvalue()) @@ -1841,9 +1841,9 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): # and lost the builder file self.create_sample_ring() - argv = ["", self.tmpfile, "add", "r1z1-127.0.0.1:6000/sdb", "1.0"] + argv = ["", self.tmpfile, "add", "r1z1-127.0.0.1:6200/sdb", "1.0"] self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) - argv = ["", self.tmpfile, "add", "r1z1-127.0.0.1:6000/sdc", "1.0"] + argv = ["", self.tmpfile, "add", "r1z1-127.0.0.1:6200/sdc", "1.0"] self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) argv = ["", self.tmpfile, "rebalance"] self.assertSystemExit(EXIT_WARNING, ringbuilder.main, argv) @@ -1899,7 +1899,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): argv = ["", self.tmpfile, "add", - "r1z1-127.0.0.1:6000/%s" % device_name, + "r1z1-127.0.0.1:6200/%s" % device_name, "1"] self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) @@ -1909,7 +1909,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "--region", "1", "--zone", "1", "--ip", "127.0.0.1", - "--port", "6000", + "--port", "6200", "--device", device_name, "--weight", "100"] self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) diff --git a/test/unit/common/middleware/test_list_endpoints.py b/test/unit/common/middleware/test_list_endpoints.py index cc78e8f479..bd366fb159 100644 --- a/test/unit/common/middleware/test_list_endpoints.py +++ b/test/unit/common/middleware/test_list_endpoints.py @@ -73,17 +73,17 @@ class TestListEndpoints(unittest.TestCase): array.array('H', [1, 0, 1, 0]), array.array('H', [4, 3, 4, 3])] intended_devs = [{'id': 0, 'zone': 0, 'weight': 1.0, - 'ip': '10.1.1.1', 'port': 6000, + 'ip': '10.1.1.1', 'port': 6200, 'device': 'sda1'}, {'id': 1, 'zone': 0, 'weight': 1.0, - 'ip': '10.1.1.1', 'port': 6000, + 'ip': '10.1.1.1', 'port': 6200, 'device': 'sdb1'}, None, {'id': 3, 'zone': 2, 'weight': 1.0, - 'ip': '10.1.2.1', 'port': 6000, + 'ip': '10.1.2.1', 'port': 6200, 'device': 'sdc1'}, {'id': 4, 'zone': 2, 'weight': 1.0, - 'ip': '10.1.2.2', 'port': 6000, + 'ip': '10.1.2.2', 'port': 6200, 'device': 'sdd1'}] intended_part_shift = 30 ring.RingData(intended_replica2part2dev_id_a, @@ -241,16 +241,16 @@ class TestListEndpoints(unittest.TestCase): self.assertEqual(resp.status_int, 200) self.assertEqual(resp.content_type, 'application/json') self.assertEqual(json.loads(resp.body), [ - "http://10.1.1.1:6000/sdb1/1/a/c/o1", - "http://10.1.2.2:6000/sdd1/1/a/c/o1" + "http://10.1.1.1:6200/sdb1/1/a/c/o1", + "http://10.1.2.2:6200/sdd1/1/a/c/o1" ]) # test policies with no version endpoint name expected = [[ - "http://10.1.1.1:6000/sdb1/1/a/c/o1", - "http://10.1.2.2:6000/sdd1/1/a/c/o1"], [ - "http://10.1.1.1:6000/sda1/1/a/c/o1", - "http://10.1.2.1:6000/sdc1/1/a/c/o1" + "http://10.1.1.1:6200/sdb1/1/a/c/o1", + "http://10.1.2.2:6200/sdd1/1/a/c/o1"], [ + "http://10.1.1.1:6200/sda1/1/a/c/o1", + "http://10.1.2.1:6200/sdc1/1/a/c/o1" ]] PATCHGI = 'swift.common.middleware.list_endpoints.get_container_info' for pol in POLICIES: @@ -267,25 +267,25 @@ class TestListEndpoints(unittest.TestCase): self.list_endpoints) self.assertEqual(resp.status_int, 200) self.assertEqual(json.loads(resp.body), [ - "http://10.1.1.1:6000/sdb1/3/a/c/o1/", - "http://10.1.2.2:6000/sdd1/3/a/c/o1/" + "http://10.1.1.1:6200/sdb1/3/a/c/o1/", + "http://10.1.2.2:6200/sdd1/3/a/c/o1/" ]) resp = Request.blank('/endpoints/a/c2').get_response( self.list_endpoints) self.assertEqual(resp.status_int, 200) self.assertEqual(json.loads(resp.body), [ - "http://10.1.1.1:6000/sda1/2/a/c2", - "http://10.1.2.1:6000/sdc1/2/a/c2" + "http://10.1.1.1:6200/sda1/2/a/c2", + "http://10.1.2.1:6200/sdc1/2/a/c2" ]) resp = Request.blank('/endpoints/a1').get_response( self.list_endpoints) self.assertEqual(resp.status_int, 200) self.assertEqual(json.loads(resp.body), [ - "http://10.1.2.1:6000/sdc1/0/a1", - "http://10.1.1.1:6000/sda1/0/a1", - "http://10.1.1.1:6000/sdb1/0/a1" + "http://10.1.2.1:6200/sdc1/0/a1", + "http://10.1.1.1:6200/sda1/0/a1", + "http://10.1.1.1:6200/sdb1/0/a1" ]) resp = Request.blank('/endpoints/').get_response( @@ -296,24 +296,24 @@ class TestListEndpoints(unittest.TestCase): self.list_endpoints) self.assertEqual(resp.status_int, 200) self.assertEqual(json.loads(resp.body), [ - "http://10.1.1.1:6000/sdb1/3/a/c%202", - "http://10.1.2.2:6000/sdd1/3/a/c%202" + "http://10.1.1.1:6200/sdb1/3/a/c%202", + "http://10.1.2.2:6200/sdd1/3/a/c%202" ]) resp = Request.blank('/endpoints/a/c%202').get_response( self.list_endpoints) self.assertEqual(resp.status_int, 200) self.assertEqual(json.loads(resp.body), [ - "http://10.1.1.1:6000/sdb1/3/a/c%202", - "http://10.1.2.2:6000/sdd1/3/a/c%202" + "http://10.1.1.1:6200/sdb1/3/a/c%202", + "http://10.1.2.2:6200/sdd1/3/a/c%202" ]) resp = Request.blank('/endpoints/ac%20count/con%20tainer/ob%20ject') \ .get_response(self.list_endpoints) self.assertEqual(resp.status_int, 200) self.assertEqual(json.loads(resp.body), [ - "http://10.1.1.1:6000/sdb1/3/ac%20count/con%20tainer/ob%20ject", - "http://10.1.2.2:6000/sdd1/3/ac%20count/con%20tainer/ob%20ject" + "http://10.1.1.1:6200/sdb1/3/ac%20count/con%20tainer/ob%20ject", + "http://10.1.2.2:6200/sdd1/3/ac%20count/con%20tainer/ob%20ject" ]) resp = Request.blank('/endpoints/a/c/o1', {'REQUEST_METHOD': 'POST'}) \ @@ -359,16 +359,16 @@ class TestListEndpoints(unittest.TestCase): def test_v1_response(self): req = Request.blank('/endpoints/v1/a/c/o1') resp = req.get_response(self.list_endpoints) - expected = ["http://10.1.1.1:6000/sdb1/1/a/c/o1", - "http://10.1.2.2:6000/sdd1/1/a/c/o1"] + expected = ["http://10.1.1.1:6200/sdb1/1/a/c/o1", + "http://10.1.2.2:6200/sdd1/1/a/c/o1"] self.assertEqual(resp.body, json.dumps(expected)) def test_v2_obj_response(self): req = Request.blank('/endpoints/v2/a/c/o1') resp = req.get_response(self.list_endpoints) expected = { - 'endpoints': ["http://10.1.1.1:6000/sdb1/1/a/c/o1", - "http://10.1.2.2:6000/sdd1/1/a/c/o1"], + 'endpoints': ["http://10.1.1.1:6200/sdb1/1/a/c/o1", + "http://10.1.2.2:6200/sdd1/1/a/c/o1"], 'headers': {'X-Backend-Storage-Policy-Index': "0"}, } self.assertEqual(resp.body, json.dumps(expected)) @@ -394,9 +394,9 @@ class TestListEndpoints(unittest.TestCase): req = Request.blank('/endpoints/v2/a') resp = req.get_response(self.list_endpoints) expected = { - 'endpoints': ["http://10.1.2.1:6000/sdc1/0/a", - "http://10.1.1.1:6000/sda1/0/a", - "http://10.1.1.1:6000/sdb1/0/a"], + 'endpoints': ["http://10.1.2.1:6200/sdc1/0/a", + "http://10.1.1.1:6200/sda1/0/a", + "http://10.1.1.1:6200/sdb1/0/a"], 'headers': {}, } # container @@ -404,9 +404,9 @@ class TestListEndpoints(unittest.TestCase): req = Request.blank('/endpoints/v2/a/c') resp = req.get_response(self.list_endpoints) expected = { - 'endpoints': ["http://10.1.2.2:6000/sdd1/0/a/c", - "http://10.1.1.1:6000/sda1/0/a/c", - "http://10.1.2.1:6000/sdc1/0/a/c"], + 'endpoints': ["http://10.1.2.2:6200/sdd1/0/a/c", + "http://10.1.1.1:6200/sda1/0/a/c", + "http://10.1.2.1:6200/sdc1/0/a/c"], 'headers': {}, } self.assertEqual(resp.body, json.dumps(expected)) @@ -414,9 +414,9 @@ class TestListEndpoints(unittest.TestCase): def test_version_account_response(self): req = Request.blank('/endpoints/a') resp = req.get_response(self.list_endpoints) - expected = ["http://10.1.2.1:6000/sdc1/0/a", - "http://10.1.1.1:6000/sda1/0/a", - "http://10.1.1.1:6000/sdb1/0/a"] + expected = ["http://10.1.2.1:6200/sdc1/0/a", + "http://10.1.1.1:6200/sda1/0/a", + "http://10.1.1.1:6200/sdb1/0/a"] self.assertEqual(resp.body, json.dumps(expected)) req = Request.blank('/endpoints/v1.0/a') resp = req.get_response(self.list_endpoints) @@ -425,9 +425,9 @@ class TestListEndpoints(unittest.TestCase): req = Request.blank('/endpoints/v2/a') resp = req.get_response(self.list_endpoints) expected = { - 'endpoints': ["http://10.1.2.1:6000/sdc1/0/a", - "http://10.1.1.1:6000/sda1/0/a", - "http://10.1.1.1:6000/sdb1/0/a"], + 'endpoints': ["http://10.1.2.1:6200/sdc1/0/a", + "http://10.1.1.1:6200/sda1/0/a", + "http://10.1.1.1:6200/sdb1/0/a"], 'headers': {}, } self.assertEqual(resp.body, json.dumps(expected)) diff --git a/test/unit/common/middleware/test_recon.py b/test/unit/common/middleware/test_recon.py index 1dbf4c6344..a194378083 100644 --- a/test/unit/common/middleware/test_recon.py +++ b/test/unit/common/middleware/test_recon.py @@ -234,17 +234,17 @@ class TestReconSuccess(TestCase): self.ring_part_shift = 5 self.ring_devs = [{'id': 0, 'zone': 0, 'weight': 1.0, - 'ip': '10.1.1.1', 'port': 6000, + 'ip': '10.1.1.1', 'port': 6200, 'device': 'sda1'}, {'id': 1, 'zone': 0, 'weight': 1.0, - 'ip': '10.1.1.1', 'port': 6000, + 'ip': '10.1.1.1', 'port': 6200, 'device': 'sdb1'}, None, {'id': 3, 'zone': 2, 'weight': 1.0, - 'ip': '10.1.2.1', 'port': 6000, + 'ip': '10.1.2.1', 'port': 6200, 'device': 'sdc1'}, {'id': 4, 'zone': 2, 'weight': 1.0, - 'ip': '10.1.2.2', 'port': 6000, + 'ip': '10.1.2.2', 'port': 6200, 'device': 'sdd1'}] self._create_rings() @@ -314,15 +314,15 @@ class TestReconSuccess(TestCase): # We should only see configured and present rings, so to handle the # "normal" case just patch the policies to match the existing rings. expt_out = {'%s/account.ring.gz' % self.tempdir: - 'd288bdf39610e90d4f0b67fa00eeec4f', + '11e0c98abb209474d40d6a9a8a523803', '%s/container.ring.gz' % self.tempdir: - '9a5a05a8a4fbbc61123de792dbe4592d', + '6685496a4045ce0be123068e0165a64d', '%s/object.ring.gz' % self.tempdir: - 'da02bfbd0bf1e7d56faea15b6fe5ab1e', + '782728be98644fb725e165d4bf5728d4', '%s/object-1.ring.gz' % self.tempdir: - '3f1899b27abf5f2efcc67d6fae1e1c64', + '7c3a4bc9f724d4eb69c9b797cdc28b8c', '%s/object-2.ring.gz' % self.tempdir: - '8f0e57079b3c245d9b3d5a428e9312ee'} + '324b9c4da20cf7ef097edbd219d296e0'} # We need to instantiate app after overriding the configured policies. # object-{1,2}.ring.gz should both appear as they are present on disk @@ -372,7 +372,7 @@ class TestReconSuccess(TestCase): expt_out = {'%s/account.ring.gz' % self.tempdir: None, '%s/container.ring.gz' % self.tempdir: None, '%s/object.ring.gz' % self.tempdir: - 'da02bfbd0bf1e7d56faea15b6fe5ab1e'} + '782728be98644fb725e165d4bf5728d4'} ringmd5 = self.app.get_ring_md5(openr=fake_open_objonly) self.assertEqual(sorted(ringmd5.items()), sorted(expt_out.items())) @@ -387,13 +387,13 @@ class TestReconSuccess(TestCase): # later moved into place, we shouldn't need to restart object-server # for it to appear in recon. expt_out = {'%s/account.ring.gz' % self.tempdir: - 'd288bdf39610e90d4f0b67fa00eeec4f', + '11e0c98abb209474d40d6a9a8a523803', '%s/container.ring.gz' % self.tempdir: - '9a5a05a8a4fbbc61123de792dbe4592d', + '6685496a4045ce0be123068e0165a64d', '%s/object.ring.gz' % self.tempdir: - 'da02bfbd0bf1e7d56faea15b6fe5ab1e', + '782728be98644fb725e165d4bf5728d4', '%s/object-2.ring.gz' % self.tempdir: - '8f0e57079b3c245d9b3d5a428e9312ee'} + '324b9c4da20cf7ef097edbd219d296e0'} # We need to instantiate app after overriding the configured policies. # object-1.ring.gz should not appear as it's present but unconfigured. @@ -412,7 +412,7 @@ class TestReconSuccess(TestCase): array.array('H', [1, 1, 0, 3])] self._create_ring(os.path.join(self.tempdir, ringfn), ringmap, self.ring_devs, self.ring_part_shift) - expt_out[ringpath] = 'acfa4b85396d2a33f361ebc07d23031d' + expt_out[ringpath] = 'a7e591642beea6933f64aebd56f357d9' # We should now see it in the ringmd5 response, without a restart # (using the same app instance) @@ -428,13 +428,13 @@ class TestReconSuccess(TestCase): # Object rings that are configured but missing aren't meant to appear # in the ringmd5 response. expt_out = {'%s/account.ring.gz' % self.tempdir: - 'd288bdf39610e90d4f0b67fa00eeec4f', + '11e0c98abb209474d40d6a9a8a523803', '%s/container.ring.gz' % self.tempdir: - '9a5a05a8a4fbbc61123de792dbe4592d', + '6685496a4045ce0be123068e0165a64d', '%s/object.ring.gz' % self.tempdir: - 'da02bfbd0bf1e7d56faea15b6fe5ab1e', + '782728be98644fb725e165d4bf5728d4', '%s/object-2.ring.gz' % self.tempdir: - '8f0e57079b3c245d9b3d5a428e9312ee'} + '324b9c4da20cf7ef097edbd219d296e0'} # We need to instantiate app after overriding the configured policies. # object-1.ring.gz should not appear as it's present but unconfigured. @@ -451,11 +451,11 @@ class TestReconSuccess(TestCase): # Object rings that are present but not configured in swift.conf # aren't meant to appear in the ringmd5 response. expt_out = {'%s/account.ring.gz' % self.tempdir: - 'd288bdf39610e90d4f0b67fa00eeec4f', + '11e0c98abb209474d40d6a9a8a523803', '%s/container.ring.gz' % self.tempdir: - '9a5a05a8a4fbbc61123de792dbe4592d', + '6685496a4045ce0be123068e0165a64d', '%s/object.ring.gz' % self.tempdir: - 'da02bfbd0bf1e7d56faea15b6fe5ab1e'} + '782728be98644fb725e165d4bf5728d4'} # We need to instantiate app after overriding the configured policies. # object-{1,2}.ring.gz should not appear as they are present on disk diff --git a/test/unit/common/ring/test_builder.py b/test/unit/common/ring/test_builder.py index c858c51977..bdcd96f765 100644 --- a/test/unit/common/ring/test_builder.py +++ b/test/unit/common/ring/test_builder.py @@ -217,12 +217,12 @@ class TestRingBuilder(unittest.TestCase): rb = ring.RingBuilder(8, 3, 1) # test add new dev with no id dev_id = rb.add_dev({'zone': 0, 'region': 1, 'weight': 1, - 'ip': '127.0.0.1', 'port': 6000}) + 'ip': '127.0.0.1', 'port': 6200}) self.assertEqual(rb.devs[0]['id'], 0) self.assertEqual(dev_id, 0) # test add another dev with no id dev_id = rb.add_dev({'zone': 3, 'region': 2, 'weight': 1, - 'ip': '127.0.0.1', 'port': 6000}) + 'ip': '127.0.0.1', 'port': 6200}) self.assertEqual(rb.devs[1]['id'], 1) self.assertEqual(dev_id, 1) @@ -284,17 +284,17 @@ class TestRingBuilder(unittest.TestCase): def test_remove_a_lot(self): rb = ring.RingBuilder(3, 3, 1) rb.add_dev({'id': 0, 'device': 'd0', 'ip': '10.0.0.1', - 'port': 6002, 'weight': 1000.0, 'region': 0, 'zone': 1}) + 'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 1}) rb.add_dev({'id': 1, 'device': 'd1', 'ip': '10.0.0.2', - 'port': 6002, 'weight': 1000.0, 'region': 0, 'zone': 2}) + 'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 2}) rb.add_dev({'id': 2, 'device': 'd2', 'ip': '10.0.0.3', - 'port': 6002, 'weight': 1000.0, 'region': 0, 'zone': 3}) + 'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 3}) rb.add_dev({'id': 3, 'device': 'd3', 'ip': '10.0.0.1', - 'port': 6002, 'weight': 1000.0, 'region': 0, 'zone': 1}) + 'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 1}) rb.add_dev({'id': 4, 'device': 'd4', 'ip': '10.0.0.2', - 'port': 6002, 'weight': 1000.0, 'region': 0, 'zone': 2}) + 'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 2}) rb.add_dev({'id': 5, 'device': 'd5', 'ip': '10.0.0.3', - 'port': 6002, 'weight': 1000.0, 'region': 0, 'zone': 3}) + 'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 3}) rb.rebalance() rb.validate() @@ -314,13 +314,13 @@ class TestRingBuilder(unittest.TestCase): def test_remove_zero_weighted(self): rb = ring.RingBuilder(8, 3, 0) rb.add_dev({'id': 0, 'device': 'd0', 'ip': '10.0.0.1', - 'port': 6002, 'weight': 1000.0, 'region': 0, 'zone': 1}) + 'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 1}) rb.add_dev({'id': 1, 'device': 'd1', 'ip': '10.0.0.2', - 'port': 6002, 'weight': 0.0, 'region': 0, 'zone': 2}) + 'port': 6202, 'weight': 0.0, 'region': 0, 'zone': 2}) rb.add_dev({'id': 2, 'device': 'd2', 'ip': '10.0.0.3', - 'port': 6002, 'weight': 1000.0, 'region': 0, 'zone': 3}) + 'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 3}) rb.add_dev({'id': 3, 'device': 'd3', 'ip': '10.0.0.1', - 'port': 6002, 'weight': 1000.0, 'region': 0, 'zone': 1}) + 'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 1}) rb.rebalance() rb.remove_dev(1) @@ -1117,14 +1117,14 @@ class TestRingBuilder(unittest.TestCase): def test_multiple_duplicate_device_assignment(self): rb = ring.RingBuilder(4, 4, 1) devs = [ - 'r1z1-127.0.0.1:33440/d1', - 'r1z1-127.0.0.1:33441/d2', - 'r1z1-127.0.0.1:33442/d3', + 'r1z1-127.0.0.1:6200/d1', + 'r1z1-127.0.0.1:6201/d2', + 'r1z1-127.0.0.1:6202/d3', 'r1z1-127.0.0.1:33443/d4', - 'r1z1-127.0.0.2:33440/d5', - 'r1z1-127.0.0.2:33441/d6', - 'r1z1-127.0.0.2:33442/d7', - 'r1z1-127.0.0.2:33442/d8', + 'r1z1-127.0.0.2:6200/d5', + 'r1z1-127.0.0.2:6201/d6', + 'r1z1-127.0.0.2:6202/d7', + 'r1z1-127.0.0.2:6202/d8', ] for add_value in devs: dev = utils.parse_add_value(add_value) @@ -2357,11 +2357,11 @@ class TestRingBuilder(unittest.TestCase): def test_more_devices_than_replicas_validation_when_removed_dev(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', - 'port': 6000, 'weight': 1.0, 'device': 'sda'}) + 'port': 6200, 'weight': 1.0, 'device': 'sda'}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', - 'port': 6000, 'weight': 1.0, 'device': 'sdb'}) + 'port': 6200, 'weight': 1.0, 'device': 'sdb'}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', - 'port': 6000, 'weight': 1.0, 'device': 'sdc'}) + 'port': 6200, 'weight': 1.0, 'device': 'sdc'}) rb.rebalance() rb.remove_dev(2) with self.assertRaises(ValueError) as e: @@ -2383,7 +2383,7 @@ class TestRingBuilder(unittest.TestCase): else: dev_name = 'sda' rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', - 'port': 6000, 'weight': 1.0, 'device': dev_name}) + 'port': 6200, 'weight': 1.0, 'device': dev_name}) rb.rebalance() if (n > 0): rb.pretend_min_part_hours_passed() @@ -2399,7 +2399,8 @@ class TestRingBuilder(unittest.TestCase): add_dev_count = 6 rb = self._add_dev_delete_first_n(add_dev_count, add_dev_count - 3) new_dev_id = rb.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1', - 'port': 6000, 'weight': 1.0, 'device': 'sda'}) + 'port': 6200, 'weight': 1.0, + 'device': 'sda'}) self.assertTrue(new_dev_id < add_dev_count) # try with non-contiguous holes @@ -2407,7 +2408,7 @@ class TestRingBuilder(unittest.TestCase): rb2 = ring.RingBuilder(8, 3, 1) for i in range(6): rb2.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1', - 'port': 6000, 'weight': 1.0, 'device': 'sda'}) + 'port': 6200, 'weight': 1.0, 'device': 'sda'}) rb2.rebalance() rb2.pretend_min_part_hours_passed() rb2.remove_dev(2) @@ -2415,12 +2416,12 @@ class TestRingBuilder(unittest.TestCase): rb2.pretend_min_part_hours_passed() rb2.rebalance() first = rb2.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1', - 'port': 6000, 'weight': 1.0, 'device': 'sda'}) + 'port': 6200, 'weight': 1.0, 'device': 'sda'}) second = rb2.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1', - 'port': 6000, 'weight': 1.0, 'device': 'sda'}) + 'port': 6200, 'weight': 1.0, 'device': 'sda'}) # add a new one (without reusing a hole) third = rb2.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1', - 'port': 6000, 'weight': 1.0, 'device': 'sda'}) + 'port': 6200, 'weight': 1.0, 'device': 'sda'}) self.assertEqual(first, 2) self.assertEqual(second, 5) self.assertEqual(third, 6) @@ -2434,7 +2435,7 @@ class TestRingBuilder(unittest.TestCase): try: new_dev_id = rb.add_dev({'id': exp_new_dev_id, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', - 'port': 6000, 'weight': 1.0, + 'port': 6200, 'weight': 1.0, 'device': 'sda'}) self.assertEqual(new_dev_id, exp_new_dev_id) except exceptions.DuplicateDeviceError: @@ -3073,23 +3074,23 @@ class TestGetRequiredOverload(unittest.TestCase): # z0 rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', - 'port': 6000, 'device': 'sda', 'weight': 1000}) + 'port': 6200, 'device': 'sda', 'weight': 1000}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', - 'port': 6000, 'device': 'sdb', 'weight': 1000}) + 'port': 6200, 'device': 'sdb', 'weight': 1000}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', - 'port': 6000, 'device': 'sdc', 'weight': 1000}) + 'port': 6200, 'device': 'sdc', 'weight': 1000}) # z1 rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.0.2', - 'port': 6000, 'device': 'sda', 'weight': 1000}) + 'port': 6200, 'device': 'sda', 'weight': 1000}) rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'ip': '127.0.0.2', - 'port': 6000, 'device': 'sdb', 'weight': 1000}) + 'port': 6200, 'device': 'sdb', 'weight': 1000}) rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'ip': '127.0.0.2', - 'port': 6000, 'device': 'sdc', 'weight': 1000}) + 'port': 6200, 'device': 'sdc', 'weight': 1000}) # z1 - extra small server rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'ip': '127.0.0.3', - 'port': 6000, 'device': 'sda', 'weight': 50}) + 'port': 6200, 'device': 'sda', 'weight': 50}) expected = { (0, 0): 2.479338842975207, @@ -3122,16 +3123,16 @@ class TestGetRequiredOverload(unittest.TestCase): rb = ring.RingBuilder(8, 5, 0) # z0 rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', - 'port': 6000, 'device': 'sda', 'weight': 100}) + 'port': 6200, 'device': 'sda', 'weight': 100}) # z1 rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'ip': '127.0.1.1', - 'port': 6000, 'device': 'sda', 'weight': 100}) + 'port': 6200, 'device': 'sda', 'weight': 100}) rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'ip': '127.0.1.1', - 'port': 6000, 'device': 'sdb', 'weight': 100}) + 'port': 6200, 'device': 'sdb', 'weight': 100}) rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.1.2', - 'port': 6000, 'device': 'sdc', 'weight': 100}) + 'port': 6200, 'device': 'sdc', 'weight': 100}) rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'ip': '127.0.1.2', - 'port': 6000, 'device': 'sdd', 'weight': 100}) + 'port': 6200, 'device': 'sdd', 'weight': 100}) # first things first, make sure we do this right rb.rebalance() @@ -3187,19 +3188,19 @@ class TestGetRequiredOverload(unittest.TestCase): # z0 rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', - 'port': 6000, 'device': 'sda', 'weight': 100}) + 'port': 6200, 'device': 'sda', 'weight': 100}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.2', - 'port': 6000, 'device': 'sda', 'weight': 100}) + 'port': 6200, 'device': 'sda', 'weight': 100}) # z1 rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'ip': '127.0.1.1', - 'port': 6000, 'device': 'sda', 'weight': 100}) + 'port': 6200, 'device': 'sda', 'weight': 100}) rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.1.2', - 'port': 6000, 'device': 'sda', 'weight': 100}) + 'port': 6200, 'device': 'sda', 'weight': 100}) # z2 rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'ip': '127.0.2.1', - 'port': 6000, 'device': 'sda', 'weight': 100}) + 'port': 6200, 'device': 'sda', 'weight': 100}) rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'ip': '127.0.2.2', - 'port': 6000, 'device': 'sda', 'weight': 10000}) + 'port': 6200, 'device': 'sda', 'weight': 10000}) # obviously d5 gets one whole replica; the other two replicas # are split evenly among the five other devices @@ -3306,19 +3307,19 @@ class TestGetRequiredOverload(unittest.TestCase): # z0 rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', - 'port': 6000, 'device': 'sda', 'weight': 10000}) + 'port': 6200, 'device': 'sda', 'weight': 10000}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.2', - 'port': 6000, 'device': 'sda', 'weight': 10000}) + 'port': 6200, 'device': 'sda', 'weight': 10000}) # z1 rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'ip': '127.0.1.1', - 'port': 6000, 'device': 'sda', 'weight': 10000}) + 'port': 6200, 'device': 'sda', 'weight': 10000}) rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.1.2', - 'port': 6000, 'device': 'sda', 'weight': 10000}) + 'port': 6200, 'device': 'sda', 'weight': 10000}) # z2 rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'ip': '127.0.2.1', - 'port': 6000, 'device': 'sda', 'weight': 10000}) + 'port': 6200, 'device': 'sda', 'weight': 10000}) rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'ip': '127.0.2.2', - 'port': 6000, 'device': 'sda', 'weight': 100}) + 'port': 6200, 'device': 'sda', 'weight': 100}) # it's almost like 3.0 / 5 ~= 0.6, but that one little guy get's # his fair share @@ -3415,21 +3416,21 @@ class TestGetRequiredOverload(unittest.TestCase): rb = ring.RingBuilder(8, 3, 0) # z0 rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', - 'port': 6000, 'device': 'sda', 'weight': 60}) + 'port': 6200, 'device': 'sda', 'weight': 60}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.2', - 'port': 6000, 'device': 'sda', 'weight': 60}) + 'port': 6200, 'device': 'sda', 'weight': 60}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'ip': '127.0.0.3', - 'port': 6000, 'device': 'sda', 'weight': 60}) + 'port': 6200, 'device': 'sda', 'weight': 60}) # z1 rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.1.1', - 'port': 6000, 'device': 'sda', 'weight': 80}) + 'port': 6200, 'device': 'sda', 'weight': 80}) rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'ip': '127.0.1.2', - 'port': 6000, 'device': 'sda', 'weight': 128}) + 'port': 6200, 'device': 'sda', 'weight': 128}) # z2 rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'ip': '127.0.2.1', - 'port': 6000, 'device': 'sda', 'weight': 80}) + 'port': 6200, 'device': 'sda', 'weight': 80}) rb.add_dev({'id': 6, 'region': 0, 'zone': 2, 'ip': '127.0.2.2', - 'port': 6000, 'device': 'sda', 'weight': 240}) + 'port': 6200, 'device': 'sda', 'weight': 240}) rb.set_overload(0.1) rb.rebalance() @@ -3445,19 +3446,19 @@ class TestGetRequiredOverload(unittest.TestCase): def test_multi_zone_with_failed_device(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', - 'port': 6000, 'device': 'sda', 'weight': 2000}) + 'port': 6200, 'device': 'sda', 'weight': 2000}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', - 'port': 6000, 'device': 'sdb', 'weight': 2000}) + 'port': 6200, 'device': 'sdb', 'weight': 2000}) rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'ip': '127.0.0.2', - 'port': 6000, 'device': 'sda', 'weight': 2000}) + 'port': 6200, 'device': 'sda', 'weight': 2000}) rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.0.2', - 'port': 6000, 'device': 'sdb', 'weight': 2000}) + 'port': 6200, 'device': 'sdb', 'weight': 2000}) rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'ip': '127.0.0.3', - 'port': 6000, 'device': 'sda', 'weight': 2000}) + 'port': 6200, 'device': 'sda', 'weight': 2000}) rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'ip': '127.0.0.3', - 'port': 6000, 'device': 'sdb', 'weight': 2000}) + 'port': 6200, 'device': 'sdb', 'weight': 2000}) # sanity, balanced and dispersed expected = { @@ -3561,19 +3562,19 @@ class TestGetRequiredOverload(unittest.TestCase): rb = ring.RingBuilder(8, 3, 1) # zone 0 server 127.0.0.1 rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', - 'port': 6000, 'device': 'sda', 'weight': 3000}) + 'port': 6200, 'device': 'sda', 'weight': 3000}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', - 'port': 6000, 'device': 'sdb', 'weight': 3000}) + 'port': 6200, 'device': 'sdb', 'weight': 3000}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', - 'port': 6000, 'device': 'sda', 'weight': 3000}) + 'port': 6200, 'device': 'sda', 'weight': 3000}) # zone 1 server 127.0.0.2 rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'ip': '127.0.0.2', - 'port': 6000, 'device': 'sda', 'weight': 4000}) + 'port': 6200, 'device': 'sda', 'weight': 4000}) rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'ip': '127.0.0.2', - 'port': 6000, 'device': 'sdb', 'weight': 4000}) + 'port': 6200, 'device': 'sdb', 'weight': 4000}) # zone 1 (again) server 127.0.0.3 rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'ip': '127.0.0.3', - 'port': 6000, 'device': 'sda', 'weight': 1000}) + 'port': 6200, 'device': 'sda', 'weight': 1000}) weighted_replicas = rb._build_weighted_replicas_by_tier() @@ -3632,19 +3633,19 @@ class TestGetRequiredOverload(unittest.TestCase): rb = ring.RingBuilder(3, 3, 1) # zone 0 server 127.0.0.1 rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', - 'port': 6000, 'device': 'sda', 'weight': 2000}) + 'port': 6200, 'device': 'sda', 'weight': 2000}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', - 'port': 6000, 'device': 'sdb', 'weight': 2000}) + 'port': 6200, 'device': 'sdb', 'weight': 2000}) # zone 0 server 127.0.0.2 rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'ip': '127.0.0.2', - 'port': 6000, 'device': 'sda', 'weight': 2000}) + 'port': 6200, 'device': 'sda', 'weight': 2000}) rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'ip': '127.0.0.2', - 'port': 6000, 'device': 'sdb', 'weight': 2000}) + 'port': 6200, 'device': 'sdb', 'weight': 2000}) # zone 0 server 127.0.0.3 rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'ip': '127.0.0.3', - 'port': 6000, 'device': 'sda', 'weight': 2000}) + 'port': 6200, 'device': 'sda', 'weight': 2000}) rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'ip': '127.0.0.3', - 'port': 6000, 'device': 'sdb', 'weight': 2000}) + 'port': 6200, 'device': 'sdb', 'weight': 2000}) # sanity, balanced and dispersed expected = { @@ -3669,19 +3670,19 @@ class TestGetRequiredOverload(unittest.TestCase): # zone 1 server 127.0.1.1 rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'ip': '127.0.1.1', - 'port': 6000, 'device': 'sda', 'weight': 100}) + 'port': 6200, 'device': 'sda', 'weight': 100}) rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'ip': '127.0.1.1', - 'port': 6000, 'device': 'sdb', 'weight': 100}) + 'port': 6200, 'device': 'sdb', 'weight': 100}) # zone 1 server 127.0.1.2 rb.add_dev({'id': 8, 'region': 0, 'zone': 1, 'ip': '127.0.1.2', - 'port': 6000, 'device': 'sda', 'weight': 100}) + 'port': 6200, 'device': 'sda', 'weight': 100}) rb.add_dev({'id': 9, 'region': 0, 'zone': 1, 'ip': '127.0.1.2', - 'port': 6000, 'device': 'sdb', 'weight': 100}) + 'port': 6200, 'device': 'sdb', 'weight': 100}) # zone 1 server 127.0.1.3 rb.add_dev({'id': 10, 'region': 0, 'zone': 1, 'ip': '127.0.1.3', - 'port': 6000, 'device': 'sda', 'weight': 100}) + 'port': 6200, 'device': 'sda', 'weight': 100}) rb.add_dev({'id': 11, 'region': 0, 'zone': 1, 'ip': '127.0.1.3', - 'port': 6000, 'device': 'sdb', 'weight': 100}) + 'port': 6200, 'device': 'sdb', 'weight': 100}) # this messes things up pretty royally expected = { @@ -3739,13 +3740,13 @@ class TestGetRequiredOverload(unittest.TestCase): def test_gradual_replica_count(self): rb = ring.RingBuilder(3, 2.5, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', - 'port': 6000, 'device': 'sda', 'weight': 2000}) + 'port': 6200, 'device': 'sda', 'weight': 2000}) rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', - 'port': 6000, 'device': 'sdb', 'weight': 2000}) + 'port': 6200, 'device': 'sdb', 'weight': 2000}) rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'ip': '127.0.0.2', - 'port': 6000, 'device': 'sda', 'weight': 2000}) + 'port': 6200, 'device': 'sda', 'weight': 2000}) rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'ip': '127.0.0.2', - 'port': 6000, 'device': 'sdb', 'weight': 2000}) + 'port': 6200, 'device': 'sdb', 'weight': 2000}) expected = { 0: 0.625, diff --git a/test/unit/common/ring/test_ring.py b/test/unit/common/ring/test_ring.py index 0ce33e853e..7adceae17e 100644 --- a/test/unit/common/ring/test_ring.py +++ b/test/unit/common/ring/test_ring.py @@ -147,20 +147,20 @@ class TestRing(TestRingBase): array.array('H', [0, 1, 0, 1]), array.array('H', [3, 4, 3, 4])] self.intended_devs = [{'id': 0, 'region': 0, 'zone': 0, 'weight': 1.0, - 'ip': '10.1.1.1', 'port': 6000, + 'ip': '10.1.1.1', 'port': 6200, 'replication_ip': '10.1.0.1', 'replication_port': 6066}, {'id': 1, 'region': 0, 'zone': 0, 'weight': 1.0, - 'ip': '10.1.1.1', 'port': 6000, + 'ip': '10.1.1.1', 'port': 6200, 'replication_ip': '10.1.0.2', 'replication_port': 6066}, None, {'id': 3, 'region': 0, 'zone': 2, 'weight': 1.0, - 'ip': '10.1.2.1', 'port': 6000, + 'ip': '10.1.2.1', 'port': 6200, 'replication_ip': '10.2.0.1', 'replication_port': 6066}, {'id': 4, 'region': 0, 'zone': 2, 'weight': 1.0, - 'ip': '10.1.2.2', 'port': 6000, + 'ip': '10.1.2.2', 'port': 6200, 'replication_ip': '10.2.0.1', 'replication_port': 6066}] self.intended_part_shift = 30 @@ -243,7 +243,7 @@ class TestRing(TestRingBase): self.assertEqual(len(self.ring.devs), 7) self.intended_devs.append( {'id': 6, 'region': 0, 'zone': 5, 'weight': 1.0, - 'ip': '10.6.6.6', 'port': 6000}) + 'ip': '10.6.6.6', 'port': 6200}) ring.RingData( self.intended_replica2part2dev_id, self.intended_devs, self.intended_part_shift).save(self.testgz) @@ -259,7 +259,7 @@ class TestRing(TestRingBase): self.assertEqual(len(self.ring.devs), 8) self.intended_devs.append( {'id': 5, 'region': 0, 'zone': 4, 'weight': 1.0, - 'ip': '10.5.5.5', 'port': 6000}) + 'ip': '10.5.5.5', 'port': 6200}) ring.RingData( self.intended_replica2part2dev_id, self.intended_devs, self.intended_part_shift).save(self.testgz) @@ -270,34 +270,34 @@ class TestRing(TestRingBase): def test_reload_without_replication(self): replication_less_devs = [{'id': 0, 'region': 0, 'zone': 0, 'weight': 1.0, 'ip': '10.1.1.1', - 'port': 6000}, + 'port': 6200}, {'id': 1, 'region': 0, 'zone': 0, 'weight': 1.0, 'ip': '10.1.1.1', - 'port': 6000}, + 'port': 6200}, None, {'id': 3, 'region': 0, 'zone': 2, 'weight': 1.0, 'ip': '10.1.2.1', - 'port': 6000}, + 'port': 6200}, {'id': 4, 'region': 0, 'zone': 2, 'weight': 1.0, 'ip': '10.1.2.2', - 'port': 6000}] + 'port': 6200}] intended_devs = [{'id': 0, 'region': 0, 'zone': 0, 'weight': 1.0, - 'ip': '10.1.1.1', 'port': 6000, + 'ip': '10.1.1.1', 'port': 6200, 'replication_ip': '10.1.1.1', - 'replication_port': 6000}, + 'replication_port': 6200}, {'id': 1, 'region': 0, 'zone': 0, 'weight': 1.0, - 'ip': '10.1.1.1', 'port': 6000, + 'ip': '10.1.1.1', 'port': 6200, 'replication_ip': '10.1.1.1', - 'replication_port': 6000}, + 'replication_port': 6200}, None, {'id': 3, 'region': 0, 'zone': 2, 'weight': 1.0, - 'ip': '10.1.2.1', 'port': 6000, + 'ip': '10.1.2.1', 'port': 6200, 'replication_ip': '10.1.2.1', - 'replication_port': 6000}, + 'replication_port': 6200}, {'id': 4, 'region': 0, 'zone': 2, 'weight': 1.0, - 'ip': '10.1.2.2', 'port': 6000, + 'ip': '10.1.2.2', 'port': 6200, 'replication_ip': '10.1.2.2', - 'replication_port': 6000}] + 'replication_port': 6200}] testgz = os.path.join(self.testdir, 'without_replication.ring.gz') ring.RingData( self.intended_replica2part2dev_id, @@ -311,34 +311,34 @@ class TestRing(TestRingBase): def test_reload_old_style_pickled_ring(self): devs = [{'id': 0, 'zone': 0, 'weight': 1.0, 'ip': '10.1.1.1', - 'port': 6000}, + 'port': 6200}, {'id': 1, 'zone': 0, 'weight': 1.0, 'ip': '10.1.1.1', - 'port': 6000}, + 'port': 6200}, None, {'id': 3, 'zone': 2, 'weight': 1.0, 'ip': '10.1.2.1', - 'port': 6000}, + 'port': 6200}, {'id': 4, 'zone': 2, 'weight': 1.0, 'ip': '10.1.2.2', - 'port': 6000}] + 'port': 6200}] intended_devs = [{'id': 0, 'region': 1, 'zone': 0, 'weight': 1.0, - 'ip': '10.1.1.1', 'port': 6000, + 'ip': '10.1.1.1', 'port': 6200, 'replication_ip': '10.1.1.1', - 'replication_port': 6000}, + 'replication_port': 6200}, {'id': 1, 'region': 1, 'zone': 0, 'weight': 1.0, - 'ip': '10.1.1.1', 'port': 6000, + 'ip': '10.1.1.1', 'port': 6200, 'replication_ip': '10.1.1.1', - 'replication_port': 6000}, + 'replication_port': 6200}, None, {'id': 3, 'region': 1, 'zone': 2, 'weight': 1.0, - 'ip': '10.1.2.1', 'port': 6000, + 'ip': '10.1.2.1', 'port': 6200, 'replication_ip': '10.1.2.1', - 'replication_port': 6000}, + 'replication_port': 6200}, {'id': 4, 'region': 1, 'zone': 2, 'weight': 1.0, - 'ip': '10.1.2.2', 'port': 6000, + 'ip': '10.1.2.2', 'port': 6200, 'replication_ip': '10.1.2.2', - 'replication_port': 6000}] + 'replication_port': 6200}] # simulate an old-style pickled ring testgz = os.path.join(self.testdir, @@ -823,22 +823,22 @@ class TestRing(TestRingBase): rb = ring.RingBuilder(8, 3, 1) devs = [ ring_utils.parse_add_value(v) for v in [ - 'r1z1-127.0.0.1:6000/d1', - 'r1z1-127.0.0.1:6001/d2', - 'r1z1-127.0.0.1:6002/d3', - 'r1z1-127.0.0.1:6003/d4', - 'r1z2-127.0.0.2:6000/d1', - 'r1z2-127.0.0.2:6001/d2', - 'r1z2-127.0.0.2:6002/d3', - 'r1z2-127.0.0.2:6003/d4', - 'r2z1-127.0.1.1:6000/d1', - 'r2z1-127.0.1.1:6001/d2', - 'r2z1-127.0.1.1:6002/d3', - 'r2z1-127.0.1.1:6003/d4', - 'r2z2-127.0.1.2:6000/d1', - 'r2z2-127.0.1.2:6001/d2', - 'r2z2-127.0.1.2:6002/d3', - 'r2z2-127.0.1.2:6003/d4', + 'r1z1-127.0.0.1:6200/d1', + 'r1z1-127.0.0.1:6201/d2', + 'r1z1-127.0.0.1:6202/d3', + 'r1z1-127.0.0.1:6203/d4', + 'r1z2-127.0.0.2:6200/d1', + 'r1z2-127.0.0.2:6201/d2', + 'r1z2-127.0.0.2:6202/d3', + 'r1z2-127.0.0.2:6203/d4', + 'r2z1-127.0.1.1:6200/d1', + 'r2z1-127.0.1.1:6201/d2', + 'r2z1-127.0.1.1:6202/d3', + 'r2z1-127.0.1.1:6203/d4', + 'r2z2-127.0.1.2:6200/d1', + 'r2z2-127.0.1.2:6201/d2', + 'r2z2-127.0.1.2:6202/d3', + 'r2z2-127.0.1.2:6203/d4', ] ] for dev in devs: diff --git a/test/unit/common/ring/test_utils.py b/test/unit/common/ring/test_utils.py index 705d619b9b..fda4cdfdcd 100644 --- a/test/unit/common/ring/test_utils.py +++ b/test/unit/common/ring/test_utils.py @@ -34,33 +34,33 @@ class TestUtils(unittest.TestCase): def setUp(self): self.test_dev = {'region': 1, 'zone': 1, 'ip': '192.168.1.1', - 'port': '6000', 'id': 0} + 'port': '6200', 'id': 0} def get_test_devs(): dev0 = {'region': 1, 'zone': 1, 'ip': '192.168.1.1', - 'port': '6000', 'id': 0} + 'port': '6200', 'id': 0} dev1 = {'region': 1, 'zone': 1, 'ip': '192.168.1.1', - 'port': '6000', 'id': 1} + 'port': '6200', 'id': 1} dev2 = {'region': 1, 'zone': 1, 'ip': '192.168.1.1', - 'port': '6000', 'id': 2} + 'port': '6200', 'id': 2} dev3 = {'region': 1, 'zone': 1, 'ip': '192.168.1.2', - 'port': '6000', 'id': 3} + 'port': '6200', 'id': 3} dev4 = {'region': 1, 'zone': 1, 'ip': '192.168.1.2', - 'port': '6000', 'id': 4} + 'port': '6200', 'id': 4} dev5 = {'region': 1, 'zone': 1, 'ip': '192.168.1.2', - 'port': '6000', 'id': 5} + 'port': '6200', 'id': 5} dev6 = {'region': 1, 'zone': 2, 'ip': '192.168.2.1', - 'port': '6000', 'id': 6} + 'port': '6200', 'id': 6} dev7 = {'region': 1, 'zone': 2, 'ip': '192.168.2.1', - 'port': '6000', 'id': 7} + 'port': '6200', 'id': 7} dev8 = {'region': 1, 'zone': 2, 'ip': '192.168.2.1', - 'port': '6000', 'id': 8} + 'port': '6200', 'id': 8} dev9 = {'region': 1, 'zone': 2, 'ip': '192.168.2.2', - 'port': '6000', 'id': 9} + 'port': '6200', 'id': 9} dev10 = {'region': 1, 'zone': 2, 'ip': '192.168.2.2', - 'port': '6000', 'id': 10} + 'port': '6200', 'id': 10} dev11 = {'region': 1, 'zone': 2, 'ip': '192.168.2.2', - 'port': '6000', 'id': 11} + 'port': '6200', 'id': 11} return [dev0, dev1, dev2, dev3, dev4, dev5, dev6, dev7, dev8, dev9, dev10, dev11] @@ -188,7 +188,7 @@ class TestUtils(unittest.TestCase): def test_is_local_device(self): # localhost shows up in whataremyips() output as "::1" for IPv6 my_ips = ["127.0.0.1", "::1"] - my_port = 6000 + my_port = 6200 self.assertTrue(is_local_device(my_ips, my_port, "127.0.0.1", my_port)) self.assertTrue(is_local_device(my_ips, my_port, @@ -281,14 +281,14 @@ class TestUtils(unittest.TestCase): argv = \ ["--id", "1", "--region", "2", "--zone", "3", "--ip", "test.test.com", - "--port", "6000", + "--port", "6200", "--replication-ip", "r.test.com", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", "--weight", "3.14159265359", "--change-ip", "change.test.test.com", - "--change-port", "6001", + "--change-port", "6201", "--change-replication-ip", "change.r.test.com", "--change-replication-port", "7001", "--change-device", "sdb3", @@ -298,7 +298,7 @@ class TestUtils(unittest.TestCase): 'region': 2, 'zone': 3, 'ip': "test.test.com", - 'port': 6000, + 'port': 6200, 'replication_ip': "r.test.com", 'replication_port': 7000, 'device': "sda3", @@ -312,14 +312,14 @@ class TestUtils(unittest.TestCase): argv = \ ["--id", "1", "--region", "2", "--zone", "3", "--ip", "127.0.0.1", - "--port", "6000", + "--port", "6200", "--replication-ip", "127.0.0.10", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", "--weight", "3.14159265359", "--change-ip", "127.0.0.2", - "--change-port", "6001", + "--change-port", "6201", "--change-replication-ip", "127.0.0.20", "--change-replication-port", "7001", "--change-device", "sdb3", @@ -329,7 +329,7 @@ class TestUtils(unittest.TestCase): 'region': 2, 'zone': 3, 'ip': "127.0.0.1", - 'port': 6000, + 'port': 6200, 'replication_ip': "127.0.0.10", 'replication_port': 7000, 'device': "sda3", @@ -343,14 +343,14 @@ class TestUtils(unittest.TestCase): argv = \ ["--id", "1", "--region", "2", "--zone", "3", "--ip", "[127.0.0.1]", - "--port", "6000", + "--port", "6200", "--replication-ip", "[127.0.0.10]", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", "--weight", "3.14159265359", "--change-ip", "[127.0.0.2]", - "--change-port", "6001", + "--change-port", "6201", "--change-replication-ip", "[127.0.0.20]", "--change-replication-port", "7001", "--change-device", "sdb3", @@ -363,21 +363,21 @@ class TestUtils(unittest.TestCase): argv = \ ["--id", "1", "--region", "2", "--zone", "3", "--ip", "test.test.com", - "--port", "6000", + "--port", "6200", "--replication-ip", "r.test.com", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", "--weight", "3.14159265359", "--change-ip", "change.test.test.com", - "--change-port", "6001", + "--change-port", "6201", "--change-replication-ip", "change.r.test.com", "--change-replication-port", "7001", "--change-device", "sdb3", "--change-meta", "some meta data for change"] expected = { 'ip': "change.test.test.com", - 'port': 6001, + 'port': 6201, 'replication_ip': "change.r.test.com", 'replication_port': 7001, 'device': "sdb3", @@ -390,21 +390,21 @@ class TestUtils(unittest.TestCase): argv = \ ["--id", "1", "--region", "2", "--zone", "3", "--ip", "127.0.0.1", - "--port", "6000", + "--port", "6200", "--replication-ip", "127.0.0.10", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", "--weight", "3.14159265359", "--change-ip", "127.0.0.2", - "--change-port", "6001", + "--change-port", "6201", "--change-replication-ip", "127.0.0.20", "--change-replication-port", "7001", "--change-device", "sdb3", "--change-meta", "some meta data for change"] expected = { 'ip': "127.0.0.2", - 'port': 6001, + 'port': 6201, 'replication_ip': "127.0.0.20", 'replication_port': 7001, 'device': "sdb3", @@ -417,14 +417,14 @@ class TestUtils(unittest.TestCase): argv = \ ["--id", "1", "--region", "2", "--zone", "3", "--ip", "[127.0.0.1]", - "--port", "6000", + "--port", "6200", "--replication-ip", "[127.0.0.10]", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", "--weight", "3.14159265359", "--change-ip", "[127.0.0.2]", - "--change-port", "6001", + "--change-port", "6201", "--change-replication-ip", "[127.0.0.20]", "--change-replication-port", "7001", "--change-device", "sdb3", @@ -437,14 +437,14 @@ class TestUtils(unittest.TestCase): argv = \ ["--id", "1", "--region", "2", "--zone", "3", "--ip", "test.test.com", - "--port", "6000", + "--port", "6200", "--replication-ip", "r.test.com", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", "--weight", "3.14159265359", "--change-ip", "change.test.test.com", - "--change-port", "6001", + "--change-port", "6201", "--change-replication-ip", "change.r.test.com", "--change-replication-port", "7001", "--change-device", "sdb3", @@ -455,14 +455,14 @@ class TestUtils(unittest.TestCase): self.assertEqual(opts.region, 2) self.assertEqual(opts.zone, 3) self.assertEqual(opts.ip, "test.test.com") - self.assertEqual(opts.port, 6000) + self.assertEqual(opts.port, 6200) self.assertEqual(opts.replication_ip, "r.test.com") self.assertEqual(opts.replication_port, 7000) self.assertEqual(opts.device, "sda3") self.assertEqual(opts.meta, "some meta data") self.assertEqual(opts.weight, 3.14159265359) self.assertEqual(opts.change_ip, "change.test.test.com") - self.assertEqual(opts.change_port, 6001) + self.assertEqual(opts.change_port, 6201) self.assertEqual(opts.change_replication_ip, "change.r.test.com") self.assertEqual(opts.change_replication_port, 7001) self.assertEqual(opts.change_device, "sdb3") @@ -497,7 +497,7 @@ class TestUtils(unittest.TestCase): "--meta", "", "--weight", None, "--change-ip", "change.test.test.com", - "--change-port", "6001", + "--change-port", "6201", "--change-replication-ip", "change.r.test.com", "--change-replication-port", "7001", "--change-device", "sdb3", @@ -526,14 +526,14 @@ class TestUtils(unittest.TestCase): argv = \ ["--id", "1", "--region", "2", "--zone", "3", "--ip", "test.test.com", - "--port", "6000", + "--port", "6200", "--replication-ip", "r.test.com", "--replication-port", "7000", "--device", "sda3", "--meta", "some meta data", "--weight", "3.14159265359", "--change-ip", "change.test.test.com", - "--change-port", "6001", + "--change-port", "6201", "--change-replication-ip", "change.r.test.com", "--change-replication-port", "7001", "--change-device", "sdb3", @@ -544,14 +544,14 @@ class TestUtils(unittest.TestCase): self.assertEqual(opts.region, 2) self.assertEqual(opts.zone, 3) self.assertEqual(opts.ip, "test.test.com") - self.assertEqual(opts.port, 6000) + self.assertEqual(opts.port, 6200) self.assertEqual(opts.replication_ip, "r.test.com") self.assertEqual(opts.replication_port, 7000) self.assertEqual(opts.device, "sda3") self.assertEqual(opts.meta, "some meta data") self.assertEqual(opts.weight, 3.14159265359) self.assertEqual(opts.change_ip, "change.test.test.com") - self.assertEqual(opts.change_port, 6001) + self.assertEqual(opts.change_port, 6201) self.assertEqual(opts.change_replication_ip, "change.r.test.com") self.assertEqual(opts.change_replication_port, 7001) self.assertEqual(opts.change_device, "sdb3") @@ -581,7 +581,7 @@ class TestUtils(unittest.TestCase): argv = \ ["--region", "2", "--zone", "3", "--ip", "test.test.com", - "--port", "6000", + "--port", "6200", "--replication-ip", "r.test.com", "--replication-port", "7000", "--device", "sda3", @@ -591,7 +591,7 @@ class TestUtils(unittest.TestCase): 'region': 2, 'zone': 3, 'ip': "test.test.com", - 'port': 6000, + 'port': 6200, 'replication_ip': "r.test.com", 'replication_port': 7000, 'device': "sda3", @@ -605,7 +605,7 @@ class TestUtils(unittest.TestCase): argv = \ ["--region", "2", "--zone", "3", "--ip", "[test.test.com]", - "--port", "6000", + "--port", "6200", "--replication-ip", "[r.test.com]", "--replication-port", "7000", "--device", "sda3", @@ -617,7 +617,7 @@ class TestUtils(unittest.TestCase): argv = \ ["--region", "2", "--zone", "3", "--ip", "[test.test.com]", - "--port", "6000", + "--port", "6200", "--replication-ip", "[r.test.com]", "--replication-port", "7000", "--meta", "some meta data", @@ -750,11 +750,11 @@ class TestUtils(unittest.TestCase): def test_parse_address_old_format(self): # Test old format - argv = "127.0.0.1:6000R127.0.0.1:6000/sda1_some meta data" + argv = "127.0.0.1:6200R127.0.0.1:6200/sda1_some meta data" ip, port, rest = parse_address(argv) self.assertEqual(ip, '127.0.0.1') - self.assertEqual(port, 6000) - self.assertEqual(rest, 'R127.0.0.1:6000/sda1_some meta data') + self.assertEqual(port, 6200) + self.assertEqual(rest, 'R127.0.0.1:6200/sda1_some meta data') if __name__ == '__main__': diff --git a/test/unit/common/test_db_replicator.py b/test/unit/common/test_db_replicator.py index 5c5912e19b..be17d42bf8 100644 --- a/test/unit/common/test_db_replicator.py +++ b/test/unit/common/test_db_replicator.py @@ -72,8 +72,8 @@ class FakeRing(object): class FakeRingWithSingleNode(object): class Ring(object): devs = [dict( - id=1, weight=10.0, zone=1, ip='1.1.1.1', port=6000, device='sdb', - meta='', replication_ip='1.1.1.1', replication_port=6000 + id=1, weight=10.0, zone=1, ip='1.1.1.1', port=6200, device='sdb', + meta='', replication_ip='1.1.1.1', replication_port=6200 )] def __init__(self, path, reload_time=15, ring_name=None): @@ -92,23 +92,23 @@ class FakeRingWithSingleNode(object): class FakeRingWithNodes(object): class Ring(object): devs = [dict( - id=1, weight=10.0, zone=1, ip='1.1.1.1', port=6000, device='sdb', - meta='', replication_ip='1.1.1.1', replication_port=6000, region=1 + id=1, weight=10.0, zone=1, ip='1.1.1.1', port=6200, device='sdb', + meta='', replication_ip='1.1.1.1', replication_port=6200, region=1 ), dict( - id=2, weight=10.0, zone=2, ip='1.1.1.2', port=6000, device='sdb', - meta='', replication_ip='1.1.1.2', replication_port=6000, region=2 + id=2, weight=10.0, zone=2, ip='1.1.1.2', port=6200, device='sdb', + meta='', replication_ip='1.1.1.2', replication_port=6200, region=2 ), dict( - id=3, weight=10.0, zone=3, ip='1.1.1.3', port=6000, device='sdb', - meta='', replication_ip='1.1.1.3', replication_port=6000, region=1 + id=3, weight=10.0, zone=3, ip='1.1.1.3', port=6200, device='sdb', + meta='', replication_ip='1.1.1.3', replication_port=6200, region=1 ), dict( - id=4, weight=10.0, zone=4, ip='1.1.1.4', port=6000, device='sdb', - meta='', replication_ip='1.1.1.4', replication_port=6000, region=2 + id=4, weight=10.0, zone=4, ip='1.1.1.4', port=6200, device='sdb', + meta='', replication_ip='1.1.1.4', replication_port=6200, region=2 ), dict( - id=5, weight=10.0, zone=5, ip='1.1.1.5', port=6000, device='sdb', - meta='', replication_ip='1.1.1.5', replication_port=6000, region=1 + id=5, weight=10.0, zone=5, ip='1.1.1.5', port=6200, device='sdb', + meta='', replication_ip='1.1.1.5', replication_port=6200, region=1 ), dict( - id=6, weight=10.0, zone=6, ip='1.1.1.6', port=6000, device='sdb', - meta='', replication_ip='1.1.1.6', replication_port=6000, region=2 + id=6, weight=10.0, zone=6, ip='1.1.1.6', port=6200, device='sdb', + meta='', replication_ip='1.1.1.6', replication_port=6200, region=2 )] def __init__(self, path, reload_time=15, ring_name=None): @@ -486,7 +486,7 @@ class TestDBReplicator(unittest.TestCase): base = 'swift.common.db_replicator.' with patch(base + 'whataremyips', return_value=['1.1.1.1']), \ patch(base + 'ring', FakeRingWithNodes()): - replicator = TestReplicator({'bind_port': 6000, + replicator = TestReplicator({'bind_port': 6200, 'recon_cache_path': self.recon_cache}, logger=logger) replicator.run_once() @@ -507,10 +507,11 @@ class TestDBReplicator(unittest.TestCase): db_replicator.ring = FakeRingWithSingleNode() # If a bind_ip is specified, it's plumbed into whataremyips() and # returned by itself. - conf = {'mount_check': 'true', 'bind_ip': '1.1.1.1', 'bind_port': 6000} + conf = {'mount_check': 'true', 'bind_ip': '1.1.1.1', + 'bind_port': 6200} replicator = TestReplicator(conf, logger=unit.FakeLogger()) self.assertEqual(replicator.mount_check, True) - self.assertEqual(replicator.port, 6000) + self.assertEqual(replicator.port, 6200) def mock_ismount(path): self.assertEqual(path, @@ -528,10 +529,10 @@ class TestDBReplicator(unittest.TestCase): def test_run_once_node_is_mounted(self): db_replicator.ring = FakeRingWithSingleNode() - conf = {'mount_check': 'true', 'bind_port': 6000} + conf = {'mount_check': 'true', 'bind_port': 6200} replicator = TestReplicator(conf, logger=unit.FakeLogger()) self.assertEqual(replicator.mount_check, True) - self.assertEqual(replicator.port, 6000) + self.assertEqual(replicator.port, 6200) def mock_unlink_older_than(path, mtime): self.assertEqual(path, diff --git a/test/unit/common/test_direct_client.py b/test/unit/common/test_direct_client.py index 503a941186..aae8129340 100644 --- a/test/unit/common/test_direct_client.py +++ b/test/unit/common/test_direct_client.py @@ -99,7 +99,7 @@ def mocked_http_conn(*args, **kwargs): class TestDirectClient(unittest.TestCase): def setUp(self): - self.node = {'ip': '1.2.3.4', 'port': '6000', 'device': 'sda', + self.node = {'ip': '1.2.3.4', 'port': '6200', 'device': 'sda', 'replication_ip': '1.2.3.5', 'replication_port': '7000'} self.part = '0' @@ -244,7 +244,7 @@ class TestDirectClient(unittest.TestCase): self.assertTrue('GET' in str(err)) def test_direct_delete_account(self): - node = {'ip': '1.2.3.4', 'port': '6000', 'device': 'sda'} + node = {'ip': '1.2.3.4', 'port': '6200', 'device': 'sda'} part = '0' account = 'a' @@ -261,7 +261,7 @@ class TestDirectClient(unittest.TestCase): self.assertTrue('X-Timestamp' in headers) def test_direct_delete_account_failure(self): - node = {'ip': '1.2.3.4', 'port': '6000', 'device': 'sda'} + node = {'ip': '1.2.3.4', 'port': '6200', 'device': 'sda'} part = '0' account = 'a' diff --git a/test/unit/container/test_reconciler.py b/test/unit/container/test_reconciler.py index 0e1346273d..56ce3965ad 100644 --- a/test/unit/container/test_reconciler.py +++ b/test/unit/container/test_reconciler.py @@ -587,7 +587,7 @@ class TestReconcilerUtils(unittest.TestCase): socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)), ClientException( 'Container Server blew up', - '10.0.0.12', 6001, 'sdj', 404, 'Not Found' + '10.0.0.12', 6201, 'sdj', 404, 'Not Found' ), ] mock_direct_delete = mock.MagicMock() diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py index 818ad1563c..e57b82d062 100644 --- a/test/unit/obj/test_diskfile.py +++ b/test/unit/obj/test_diskfile.py @@ -91,20 +91,20 @@ def _create_test_ring(path, policy): [2, 3, 0, 1, 6, 4, 5]] intended_devs = [ {'id': 0, 'device': 'sda1', 'zone': 0, 'ip': '127.0.0.0', - 'port': 6000}, + 'port': 6200}, {'id': 1, 'device': 'sda1', 'zone': 1, 'ip': '127.0.0.1', - 'port': 6000}, + 'port': 6200}, {'id': 2, 'device': 'sda1', 'zone': 2, 'ip': '127.0.0.2', - 'port': 6000}, + 'port': 6200}, {'id': 3, 'device': 'sda1', 'zone': 4, 'ip': '127.0.0.3', - 'port': 6000}, + 'port': 6200}, {'id': 4, 'device': 'sda1', 'zone': 5, 'ip': '127.0.0.4', - 'port': 6000}, + 'port': 6200}, {'id': 5, 'device': 'sda1', 'zone': 6, - 'ip': 'fe80::202:b3ff:fe1e:8329', 'port': 6000}, + 'ip': 'fe80::202:b3ff:fe1e:8329', 'port': 6200}, {'id': 6, 'device': 'sda1', 'zone': 7, 'ip': '2001:0db8:85a3:0000:0000:8a2e:0370:7334', - 'port': 6000}] + 'port': 6200}] intended_part_shift = 30 intended_reload_time = 15 with closing(GzipFile(testgz, 'wb')) as f: diff --git a/test/unit/obj/test_reconstructor.py b/test/unit/obj/test_reconstructor.py index 19fc1f8bba..13d29562a2 100755 --- a/test/unit/obj/test_reconstructor.py +++ b/test/unit/obj/test_reconstructor.py @@ -90,13 +90,13 @@ def _create_test_rings(path): intended_devs = [ {'id': 0, 'device': 'sda1', 'zone': 0, 'ip': '127.0.0.0', - 'port': 6000}, + 'port': 6200}, {'id': 1, 'device': 'sda1', 'zone': 1, 'ip': '127.0.0.1', - 'port': 6000}, + 'port': 6200}, {'id': 2, 'device': 'sda1', 'zone': 2, 'ip': '127.0.0.2', - 'port': 6000}, + 'port': 6200}, {'id': 3, 'device': 'sda1', 'zone': 4, 'ip': '127.0.0.3', - 'port': 6000} + 'port': 6200} ] intended_part_shift = 30 with closing(GzipFile(testgz, 'wb')) as f: @@ -313,11 +313,11 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase): [{ 'sync_to': [{ 'index': 2, - 'replication_port': 6000, + 'replication_port': 6200, 'zone': 2, 'ip': '127.0.0.2', 'region': 1, - 'port': 6000, + 'port': 6200, 'replication_ip': '127.0.0.2', 'device': 'sda1', 'id': 2, @@ -328,13 +328,13 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase): 'frag_index': 2, 'device': 'sda1', 'local_dev': { - 'replication_port': 6000, + 'replication_port': 6200, 'zone': 1, 'ip': '127.0.0.1', 'region': 1, 'id': 1, 'replication_ip': '127.0.0.1', - 'device': 'sda1', 'port': 6000, + 'device': 'sda1', 'port': 6200, }, 'hashes': { '061': { @@ -350,20 +350,20 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase): }, { 'sync_to': [{ 'index': 0, - 'replication_port': 6000, + 'replication_port': 6200, 'zone': 0, 'ip': '127.0.0.0', 'region': 1, - 'port': 6000, + 'port': 6200, 'replication_ip': '127.0.0.0', 'device': 'sda1', 'id': 0, }, { 'index': 2, - 'replication_port': 6000, + 'replication_port': 6200, 'zone': 2, 'ip': '127.0.0.2', 'region': 1, - 'port': 6000, + 'port': 6200, 'replication_ip': '127.0.0.2', 'device': 'sda1', 'id': 2, @@ -375,14 +375,14 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase): 'frag_index': 1, 'device': 'sda1', 'local_dev': { - 'replication_port': 6000, + 'replication_port': 6200, 'zone': 1, 'ip': '127.0.0.1', 'region': 1, 'id': 1, 'replication_ip': '127.0.0.1', 'device': 'sda1', - 'port': 6000, + 'port': 6200, }, 'hashes': { @@ -403,11 +403,11 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase): [{ 'sync_to': [{ 'index': 1, - 'replication_port': 6000, + 'replication_port': 6200, 'zone': 2, 'ip': '127.0.0.2', 'region': 1, - 'port': 6000, + 'port': 6200, 'replication_ip': '127.0.0.2', 'device': 'sda1', 'id': 2, @@ -418,14 +418,14 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase): 'frag_index': 1, 'device': 'sda1', 'local_dev': { - 'replication_port': 6000, + 'replication_port': 6200, 'zone': 1, 'ip': '127.0.0.1', 'region': 1, 'id': 1, 'replication_ip': '127.0.0.1', 'device': 'sda1', - 'port': 6000, + 'port': 6200, }, 'hashes': { @@ -442,20 +442,20 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase): }, { 'sync_to': [{ 'index': 2, - 'replication_port': 6000, + 'replication_port': 6200, 'zone': 4, 'ip': '127.0.0.3', 'region': 1, - 'port': 6000, + 'port': 6200, 'replication_ip': '127.0.0.3', 'device': 'sda1', 'id': 3, }, { 'index': 1, - 'replication_port': 6000, + 'replication_port': 6200, 'zone': 2, 'ip': '127.0.0.2', 'region': 1, - 'port': 6000, + 'port': 6200, 'replication_ip': '127.0.0.2', 'device': 'sda1', 'id': 2, @@ -467,14 +467,14 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase): 'frag_index': 0, 'device': 'sda1', 'local_dev': { - 'replication_port': 6000, + 'replication_port': 6200, 'zone': 1, 'ip': '127.0.0.1', 'region': 1, 'id': 1, 'replication_ip': '127.0.0.1', 'device': 'sda1', - 'port': 6000, + 'port': 6200, }, 'hashes': { '061': { @@ -494,11 +494,11 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase): [{ 'sync_to': [{ 'index': 0, - 'replication_port': 6000, + 'replication_port': 6200, 'zone': 2, 'ip': '127.0.0.2', 'region': 1, - 'port': 6000, + 'port': 6200, 'replication_ip': '127.0.0.2', 'device': 'sda1', 'id': 2, }], @@ -508,14 +508,14 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase): 'frag_index': 0, 'device': 'sda1', 'local_dev': { - 'replication_port': 6000, + 'replication_port': 6200, 'zone': 1, 'ip': '127.0.0.1', 'region': 1, 'id': 1, 'replication_ip': '127.0.0.1', 'device': 'sda1', - 'port': 6000, + 'port': 6200, }, 'hashes': { '061': { @@ -530,11 +530,11 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase): }, { 'sync_to': [{ 'index': 2, - 'replication_port': 6000, + 'replication_port': 6200, 'zone': 0, 'ip': '127.0.0.0', 'region': 1, - 'port': 6000, + 'port': 6200, 'replication_ip': '127.0.0.0', 'device': 'sda1', 'id': 0, @@ -545,14 +545,14 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase): 'frag_index': 2, 'device': 'sda1', 'local_dev': { - 'replication_port': 6000, + 'replication_port': 6200, 'zone': 1, 'ip': '127.0.0.1', 'region': 1, 'id': 1, 'replication_ip': '127.0.0.1', 'device': 'sda1', - 'port': 6000 + 'port': 6200 }, 'hashes': { '061': { diff --git a/test/unit/obj/test_replicator.py b/test/unit/obj/test_replicator.py index a3220887ed..38a5f0a2d4 100644 --- a/test/unit/obj/test_replicator.py +++ b/test/unit/obj/test_replicator.py @@ -140,19 +140,19 @@ def _create_test_rings(path, devs=None): ] intended_devs = devs or [ {'id': 0, 'device': 'sda', 'zone': 0, - 'region': 1, 'ip': '127.0.0.0', 'port': 6000}, + 'region': 1, 'ip': '127.0.0.0', 'port': 6200}, {'id': 1, 'device': 'sda', 'zone': 1, - 'region': 2, 'ip': '127.0.0.1', 'port': 6000}, + 'region': 2, 'ip': '127.0.0.1', 'port': 6200}, {'id': 2, 'device': 'sda', 'zone': 2, - 'region': 3, 'ip': '127.0.0.2', 'port': 6000}, + 'region': 3, 'ip': '127.0.0.2', 'port': 6200}, {'id': 3, 'device': 'sda', 'zone': 4, - 'region': 2, 'ip': '127.0.0.3', 'port': 6000}, + 'region': 2, 'ip': '127.0.0.3', 'port': 6200}, {'id': 4, 'device': 'sda', 'zone': 5, - 'region': 1, 'ip': '127.0.0.4', 'port': 6000}, + 'region': 1, 'ip': '127.0.0.4', 'port': 6200}, {'id': 5, 'device': 'sda', 'zone': 6, - 'region': 3, 'ip': 'fe80::202:b3ff:fe1e:8329', 'port': 6000}, + 'region': 3, 'ip': 'fe80::202:b3ff:fe1e:8329', 'port': 6200}, {'id': 6, 'device': 'sda', 'zone': 7, 'region': 1, - 'ip': '2001:0db8:85a3:0000:0000:8a2e:0370:7334', 'port': 6000}, + 'ip': '2001:0db8:85a3:0000:0000:8a2e:0370:7334', 'port': 6200}, ] intended_part_shift = 30 with closing(GzipFile(testgz, 'wb')) as f: @@ -195,7 +195,7 @@ class TestObjectReplicator(unittest.TestCase): _create_test_rings(self.testdir) self.logger = debug_logger('test-replicator') self.conf = dict( - bind_ip=_ips()[0], bind_port=6000, + bind_ip=_ips()[0], bind_port=6200, swift_dir=self.testdir, devices=self.devices, mount_check='false', timeout='300', stats_interval='1', sync_method='rsync') self._create_replicator() @@ -267,9 +267,9 @@ class TestObjectReplicator(unittest.TestCase): logger=self.logger) replicator.run_once() expected = [ - "Can't find itself 1.1.1.1 with port 6000 " + "Can't find itself 1.1.1.1 with port 6200 " "in ring file, not replicating", - "Can't find itself 1.1.1.1 with port 6000 " + "Can't find itself 1.1.1.1 with port 6200 " "in ring file, not replicating", ] self.assertEqual(expected, self.logger.get_lines_for_level('error')) @@ -423,17 +423,17 @@ class TestObjectReplicator(unittest.TestCase): # Two disks on same IP/port {'id': 0, 'device': 'sda', 'zone': 0, 'region': 1, 'ip': '1.1.1.1', 'port': 1111, - 'replication_ip': '127.0.0.0', 'replication_port': 6000}, + 'replication_ip': '127.0.0.0', 'replication_port': 6200}, {'id': 1, 'device': 'sdb', 'zone': 1, 'region': 1, 'ip': '1.1.1.1', 'port': 1111, - 'replication_ip': '127.0.0.0', 'replication_port': 6000}, + 'replication_ip': '127.0.0.0', 'replication_port': 6200}, # Two disks on same server, different ports {'id': 2, 'device': 'sdc', 'zone': 2, 'region': 2, 'ip': '1.1.1.2', 'port': 1112, - 'replication_ip': '127.0.0.1', 'replication_port': 6000}, + 'replication_ip': '127.0.0.1', 'replication_port': 6200}, {'id': 3, 'device': 'sdd', 'zone': 4, 'region': 2, 'ip': '1.1.1.2', 'port': 1112, - 'replication_ip': '127.0.0.1', 'replication_port': 6001}, + 'replication_ip': '127.0.0.1', 'replication_port': 6201}, ] objects_sdb, objects_1_sdb, _, _ = self._write_disk_data('sdb') objects_sdc, objects_1_sdc, _, _ = self._write_disk_data('sdc') @@ -522,22 +522,22 @@ class TestObjectReplicator(unittest.TestCase): def test_collect_jobs_multi_disk_diff_ports_normal(self, mock_shuffle): # Normally (servers_per_port=0), replication_ip AND replication_port # are used to determine local ring device entries. Here we show that - # with bind_ip='127.0.0.1', bind_port=6000, only "sdc" is local. + # with bind_ip='127.0.0.1', bind_port=6200, only "sdc" is local. devs = [ # Two disks on same IP/port {'id': 0, 'device': 'sda', 'zone': 0, 'region': 1, 'ip': '1.1.1.1', 'port': 1111, - 'replication_ip': '127.0.0.0', 'replication_port': 6000}, + 'replication_ip': '127.0.0.0', 'replication_port': 6200}, {'id': 1, 'device': 'sdb', 'zone': 1, 'region': 1, 'ip': '1.1.1.1', 'port': 1111, - 'replication_ip': '127.0.0.0', 'replication_port': 6000}, + 'replication_ip': '127.0.0.0', 'replication_port': 6200}, # Two disks on same server, different ports {'id': 2, 'device': 'sdc', 'zone': 2, 'region': 2, 'ip': '1.1.1.2', 'port': 1112, - 'replication_ip': '127.0.0.1', 'replication_port': 6000}, + 'replication_ip': '127.0.0.1', 'replication_port': 6200}, {'id': 3, 'device': 'sdd', 'zone': 4, 'region': 2, 'ip': '1.1.1.2', 'port': 1112, - 'replication_ip': '127.0.0.1', 'replication_port': 6001}, + 'replication_ip': '127.0.0.1', 'replication_port': 6201}, ] objects_sdb, objects_1_sdb, _, _ = self._write_disk_data('sdb') objects_sdc, objects_1_sdc, _, _ = self._write_disk_data('sdc') @@ -601,23 +601,23 @@ class TestObjectReplicator(unittest.TestCase): def test_collect_jobs_multi_disk_servers_per_port(self, mock_shuffle): # Normally (servers_per_port=0), replication_ip AND replication_port # are used to determine local ring device entries. Here we show that - # with servers_per_port > 0 and bind_ip='127.0.0.1', bind_port=6000, + # with servers_per_port > 0 and bind_ip='127.0.0.1', bind_port=6200, # then both "sdc" and "sdd" are local. devs = [ # Two disks on same IP/port {'id': 0, 'device': 'sda', 'zone': 0, 'region': 1, 'ip': '1.1.1.1', 'port': 1111, - 'replication_ip': '127.0.0.0', 'replication_port': 6000}, + 'replication_ip': '127.0.0.0', 'replication_port': 6200}, {'id': 1, 'device': 'sdb', 'zone': 1, 'region': 1, 'ip': '1.1.1.1', 'port': 1111, - 'replication_ip': '127.0.0.0', 'replication_port': 6000}, + 'replication_ip': '127.0.0.0', 'replication_port': 6200}, # Two disks on same server, different ports {'id': 2, 'device': 'sdc', 'zone': 2, 'region': 2, 'ip': '1.1.1.2', 'port': 1112, - 'replication_ip': '127.0.0.1', 'replication_port': 6000}, + 'replication_ip': '127.0.0.1', 'replication_port': 6200}, {'id': 3, 'device': 'sdd', 'zone': 4, 'region': 2, 'ip': '1.1.1.2', 'port': 1112, - 'replication_ip': '127.0.0.1', 'replication_port': 6001}, + 'replication_ip': '127.0.0.1', 'replication_port': 6201}, ] objects_sdb, objects_1_sdb, _, _ = self._write_disk_data('sdb') objects_sdc, objects_1_sdc, _, _ = self._write_disk_data('sdc') @@ -1055,19 +1055,19 @@ class TestObjectReplicator(unittest.TestCase): def test_delete_partition_ssync_single_region(self): devs = [ {'id': 0, 'device': 'sda', 'zone': 0, - 'region': 1, 'ip': '127.0.0.0', 'port': 6000}, + 'region': 1, 'ip': '127.0.0.0', 'port': 6200}, {'id': 1, 'device': 'sda', 'zone': 1, - 'region': 1, 'ip': '127.0.0.1', 'port': 6000}, + 'region': 1, 'ip': '127.0.0.1', 'port': 6200}, {'id': 2, 'device': 'sda', 'zone': 2, - 'region': 1, 'ip': '127.0.0.2', 'port': 6000}, + 'region': 1, 'ip': '127.0.0.2', 'port': 6200}, {'id': 3, 'device': 'sda', 'zone': 4, - 'region': 1, 'ip': '127.0.0.3', 'port': 6000}, + 'region': 1, 'ip': '127.0.0.3', 'port': 6200}, {'id': 4, 'device': 'sda', 'zone': 5, - 'region': 1, 'ip': '127.0.0.4', 'port': 6000}, + 'region': 1, 'ip': '127.0.0.4', 'port': 6200}, {'id': 5, 'device': 'sda', 'zone': 6, - 'region': 1, 'ip': 'fe80::202:b3ff:fe1e:8329', 'port': 6000}, + 'region': 1, 'ip': 'fe80::202:b3ff:fe1e:8329', 'port': 6200}, {'id': 6, 'device': 'sda', 'zone': 7, 'region': 1, - 'ip': '2001:0db8:85a3:0000:0000:8a2e:0370:7334', 'port': 6000}, + 'ip': '2001:0db8:85a3:0000:0000:8a2e:0370:7334', 'port': 6200}, ] _create_test_rings(self.testdir, devs=devs) self.conf['sync_method'] = 'ssync' diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py index 075aeac6d1..c35ea76490 100755 --- a/test/unit/obj/test_server.py +++ b/test/unit/obj/test_server.py @@ -3818,7 +3818,7 @@ class TestObjectController(unittest.TestCase): 'X-Container-Device': 'sdb1', 'X-Delete-At': 9999999999, 'X-Delete-At-Container': '9999999960', - 'X-Delete-At-Host': "10.1.1.1:6001,10.2.2.2:6002", + 'X-Delete-At-Host': "10.1.1.1:6201,10.2.2.2:6202", 'X-Delete-At-Partition': '6237', 'X-Delete-At-Device': 'sdp,sdq'}) @@ -3853,7 +3853,7 @@ class TestObjectController(unittest.TestCase): self.assertEqual( http_connect_args[1], {'ipaddr': '10.1.1.1', - 'port': '6001', + 'port': '6201', 'path': '/exp/9999999960/9999999999-a/c/o', 'device': 'sdp', 'partition': '6237', @@ -3872,7 +3872,7 @@ class TestObjectController(unittest.TestCase): self.assertEqual( http_connect_args[2], {'ipaddr': '10.2.2.2', - 'port': '6002', + 'port': '6202', 'path': '/exp/9999999960/9999999999-a/c/o', 'device': 'sdq', 'partition': '6237', @@ -4000,13 +4000,13 @@ class TestObjectController(unittest.TestCase): headers = { 'Content-Type': 'text/plain', 'X-Timestamp': put_timestamp, - 'X-Container-Host': '10.0.0.1:6001', + 'X-Container-Host': '10.0.0.1:6201', 'X-Container-Device': 'sda1', 'X-Container-Partition': 'p', 'X-Delete-At': delete_at_timestamp, 'X-Delete-At-Container': delete_at_container, 'X-Delete-At-Partition': 'p', - 'X-Delete-At-Host': '10.0.0.2:6002', + 'X-Delete-At-Host': '10.0.0.2:6202', 'X-Delete-At-Device': 'sda1', 'X-Backend-Storage-Policy-Index': int(policy)} if policy.policy_type == EC_POLICY: @@ -4024,7 +4024,7 @@ class TestObjectController(unittest.TestCase): # delete_at_update ip, port, method, path, headers = delete_at_update self.assertEqual(ip, '10.0.0.2') - self.assertEqual(port, '6002') + self.assertEqual(port, '6202') self.assertEqual(method, 'PUT') self.assertEqual(path, '/sda1/p/.expiring_objects/%s/%s-a/c/o' % (delete_at_container, delete_at_timestamp)) @@ -4038,7 +4038,7 @@ class TestObjectController(unittest.TestCase): # container_update ip, port, method, path, headers = container_update self.assertEqual(ip, '10.0.0.1') - self.assertEqual(port, '6001') + self.assertEqual(port, '6201') self.assertEqual(method, 'PUT') self.assertEqual(path, '/sda1/p/a/c/o') expected = { diff --git a/test/unit/proxy/controllers/test_base.py b/test/unit/proxy/controllers/test_base.py index 330250e2c9..13c8b89f37 100644 --- a/test/unit/proxy/controllers/test_base.py +++ b/test/unit/proxy/controllers/test_base.py @@ -832,7 +832,7 @@ class TestFuncs(unittest.TestCase): def getheaders(self): return [('content-length', self.getheader('content-length'))] - node = {'ip': '1.2.3.4', 'port': 6000, 'device': 'sda'} + node = {'ip': '1.2.3.4', 'port': 6200, 'device': 'sda'} source1 = TestSource(['abcd', '1234', 'abc', None]) source2 = TestSource(['efgh5678']) @@ -872,7 +872,7 @@ class TestFuncs(unittest.TestCase): def getheaders(self): return self.headers - node = {'ip': '1.2.3.4', 'port': 6000, 'device': 'sda'} + node = {'ip': '1.2.3.4', 'port': 6200, 'device': 'sda'} source1 = TestChunkedSource(['abcd', '1234', 'abc', None]) source2 = TestChunkedSource(['efgh5678']) From 99305b93000f28530731c05b34c69e0c7ce0463f Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Mon, 2 May 2016 17:29:32 -0700 Subject: [PATCH 108/141] Fix probe tests from commit cf48e75 Commit cf48e75 changed the default account/container/object ports in a lot of places, including the probetests. However, it didn't change them in doc/saio/bin/remakerings, and since the probe tests must match the rings, they started failing. This commit just backs out the changes to the test/probe directory so that remakerings and the probe tests match again. Change-Id: I316a09e6ee1a911f37ce9df3d641644739f88eeb --- test/probe/test_container_failures.py | 2 +- test/probe/test_object_failures.py | 2 +- test/probe/test_object_handoff.py | 8 ++++---- test/probe/test_reconstructor_revert.py | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/test/probe/test_container_failures.py b/test/probe/test_container_failures.py index 3076ba7922..d8c132c53d 100755 --- a/test/probe/test_container_failures.py +++ b/test/probe/test_container_failures.py @@ -133,7 +133,7 @@ class TestContainerFailures(ReplProbeTest): onode = onodes[0] db_files = [] for onode in onodes: - node_id = (onode['port'] - 6200) / 10 + node_id = (onode['port'] - 6000) / 10 device = onode['device'] hash_str = hash_path(self.account, container) server_conf = readconf(self.configs['container-server'][node_id]) diff --git a/test/probe/test_object_failures.py b/test/probe/test_object_failures.py index b290a4ec05..ba53177743 100755 --- a/test/probe/test_object_failures.py +++ b/test/probe/test_object_failures.py @@ -61,7 +61,7 @@ class TestObjectFailures(ReplProbeTest): opart, onodes = self.object_ring.get_nodes( self.account, container, obj) onode = onodes[0] - node_id = (onode['port'] - 6200) / 10 + node_id = (onode['port'] - 6000) / 10 device = onode['device'] hash_str = hash_path(self.account, container, obj) obj_server_conf = readconf(self.configs['object-server'][node_id]) diff --git a/test/probe/test_object_handoff.py b/test/probe/test_object_handoff.py index 1b9bd29cba..a360021b7c 100755 --- a/test/probe/test_object_handoff.py +++ b/test/probe/test_object_handoff.py @@ -134,13 +134,13 @@ class TestObjectHandoff(ReplProbeTest): port_num = node['replication_port'] except KeyError: port_num = node['port'] - node_id = (port_num - 6200) / 10 + node_id = (port_num - 6000) / 10 Manager(['object-replicator']).once(number=node_id) try: another_port_num = another_onode['replication_port'] except KeyError: another_port_num = another_onode['port'] - another_num = (another_port_num - 6200) / 10 + another_num = (another_port_num - 6000) / 10 Manager(['object-replicator']).once(number=another_num) # Assert the first container/obj primary server now has container/obj @@ -230,9 +230,9 @@ class TestObjectHandoff(ReplProbeTest): port_num = node['replication_port'] except KeyError: port_num = node['port'] - node_id = (port_num - 6200) / 10 + node_id = (port_num - 6000) / 10 Manager(['object-replicator']).once(number=node_id) - another_node_id = (another_port_num - 6200) / 10 + another_node_id = (another_port_num - 6000) / 10 Manager(['object-replicator']).once(number=another_node_id) # Assert primary node no longer has container/obj diff --git a/test/probe/test_reconstructor_revert.py b/test/probe/test_reconstructor_revert.py index 845ff1a292..095843624c 100755 --- a/test/probe/test_reconstructor_revert.py +++ b/test/probe/test_reconstructor_revert.py @@ -131,7 +131,7 @@ class TestReconstructorRevert(ECProbeTest): # fire up reconstructor on handoff nodes only for hnode in hnodes: - hnode_id = (hnode['port'] - 6200) / 10 + hnode_id = (hnode['port'] - 6000) / 10 self.reconstructor.once(number=hnode_id) # first three primaries have data again From b3dd6a5df1ac4a5265b330787abcd52c7e116208 Mon Sep 17 00:00:00 2001 From: Christopher Bartz Date: Tue, 3 May 2016 14:33:05 +0200 Subject: [PATCH 109/141] Improves log message in swift.obj.replicator Before this commit, when a local device has not found been found in a object-replication run, the policy was not mentioned in the error log. But it is of interest to know the policy, for example for error searching, when no local device has been found. Change-Id: Icb9f9f1d4aec5c4a70dd8abdf5483d4816720418 --- swift/obj/replicator.py | 7 ++++--- test/unit/obj/test_replicator.py | 8 ++++---- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/swift/obj/replicator.py b/swift/obj/replicator.py index 7a8613dec7..052213aade 100644 --- a/swift/obj/replicator.py +++ b/swift/obj/replicator.py @@ -642,9 +642,10 @@ class ObjectReplicator(Daemon): if failure_dev]) continue if not found_local: - self.logger.error("Can't find itself %s with port %s in ring " - "file, not replicating", - ", ".join(ips), self.port) + self.logger.error("Can't find itself in policy with index %d with" + " ips %s and with port %s in ring file, not" + " replicating", + int(policy), ", ".join(ips), self.port) return jobs def collect_jobs(self, override_devices=None, override_partitions=None, diff --git a/test/unit/obj/test_replicator.py b/test/unit/obj/test_replicator.py index 38a5f0a2d4..2609e2a60a 100644 --- a/test/unit/obj/test_replicator.py +++ b/test/unit/obj/test_replicator.py @@ -267,10 +267,10 @@ class TestObjectReplicator(unittest.TestCase): logger=self.logger) replicator.run_once() expected = [ - "Can't find itself 1.1.1.1 with port 6200 " - "in ring file, not replicating", - "Can't find itself 1.1.1.1 with port 6200 " - "in ring file, not replicating", + "Can't find itself in policy with index 0 with ips 1.1.1.1 and" + " with port 6200 in ring file, not replicating", + "Can't find itself in policy with index 1 with ips 1.1.1.1 and" + " with port 6200 in ring file, not replicating", ] self.assertEqual(expected, self.logger.get_lines_for_level('error')) From deaef2f9d62bb8ae7f4e8538e9fd9ddffbb8ebb2 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Wed, 4 May 2016 16:48:28 -0700 Subject: [PATCH 110/141] Remove unneeded setting of SO_REUSEADDR. This has been in eventlet.listen() since version 0.15. Change-Id: Ib77b28231a2180f1ea082f356c4687c39681a6f7 --- swift/common/wsgi.py | 1 - test/unit/common/test_wsgi.py | 1 - 2 files changed, 2 deletions(-) diff --git a/swift/common/wsgi.py b/swift/common/wsgi.py index 2c169eb2a6..a44d9d6c93 100644 --- a/swift/common/wsgi.py +++ b/swift/common/wsgi.py @@ -199,7 +199,6 @@ def get_socket(conf): raise Exception(_('Could not bind to %s:%s ' 'after trying for %s seconds') % ( bind_addr[0], bind_addr[1], bind_timeout)) - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # in my experience, sockets can hang around forever without keepalive sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) diff --git a/test/unit/common/test_wsgi.py b/test/unit/common/test_wsgi.py index f39f215499..d4c1d68f39 100644 --- a/test/unit/common/test_wsgi.py +++ b/test/unit/common/test_wsgi.py @@ -276,7 +276,6 @@ class TestWSGI(unittest.TestCase): self.assertTrue(isinstance(sock, MockSocket)) expected_socket_opts = { socket.SOL_SOCKET: { - socket.SO_REUSEADDR: 1, socket.SO_KEEPALIVE: 1, }, socket.IPPROTO_TCP: { From 6827affe62771949a041d958db17ba6cd29232ab Mon Sep 17 00:00:00 2001 From: John Dickinson Date: Tue, 5 Apr 2016 11:35:22 -0700 Subject: [PATCH 111/141] Rework the contributor docs This started as a new "new_contributor" doc. But we've already got at least 3 different docs like that. Change-Id: Ia2303ab55eeea01cc71acbccaeab55dad0ef5ff9 --- CONTRIBUTING.rst | 123 ++++++++++++++++++++++++++++++++++++----------- README.rst | 59 ++++++++++++++++++++--- 2 files changed, 148 insertions(+), 34 deletions(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 0bec893829..e4958f8772 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -1,19 +1,92 @@ -If you would like to contribute to the development of OpenStack, you -must follow the steps in this page: -http://docs.openstack.org/infra/manual/developers.html +Contributing to OpenStack Swift +=============================== -Once those steps have been completed, changes to OpenStack should be -submitted for review via the Gerrit tool, following the workflow -documented at +Who is a Contributor? +--------------------- + +Put simply, if you improve Swift, you're a contributor. The easiest way to +improve the project is to tell us where there's a bug. In other words, filing +a bug is a valuable and helpful way to contribute to the project. + +Once a bug has been filed, someone will work on writing a patch to fix the +bug. Perhaps you'd like to fix a bug. Writing code to fix a bug or add new +functionality is tremendously important. + +Once code has been written, it is submitted upstream for review. All code, +even that written by the most senior members of the community, must pass code +review and all tests before it can be included in the project. Reviewing +proposed patches is a very helpful way to be a contributor. + +Swift is nothing without the community behind it. We'd love to welcome you to +our community. Come find us in #openstack-swift on freenode IRC or on the +OpenStack dev mailing list. + +Filing a Bug +~~~~~~~~~~~~ + +Filing a bug is the easiest way to contribute. We use Launchpad as a bug +tracker; you can find currently-tracked bugs at +https://bugs.launchpad.net/swift. +Use the `Report a bug `__ link to +file a new bug. + +If you find something in Swift that doesn't match the documentation or doesn't +meet your expectations with how it should work, please let us know. Of course, +if you ever get an error (like a Traceback message in the logs), we definitely +want to know about that. We'll do our best to diagnose any problem and patch +it as soon as possible. + +A bug report, at minimum, should describe what you were doing that caused the +bug. "Swift broke, pls fix" is not helpful. Instead, something like "When I +restarted syslog, Swift started logging traceback messages" is very helpful. +The goal is that we can reproduce the bug and isolate the issue in order to +apply a fix. If you don't have full details, that's ok. Anything you can +provide is helpful. + +You may have noticed that there are many tracked bugs, but not all of them +have been confirmed. If you take a look at an old bug report and you can +reproduce the issue described, please leave a comment on the bug about that. +It lets us all know that the bug is very likely to be valid. + +Reviewing Someone Else's Code +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +All code reviews in OpenStack projects are done on +https://review.openstack.org/. Reviewing patches is one of the most effective +ways you can contribute to the community. + +We've written REVIEW_GUIDELINES.rst (found in this source tree) to help you +give good reviews. + +https://wiki.openstack.org/wiki/Swift/PriorityReviews is a starting point to +find what reviews are priority in the community. + +What do I work on? +------------------ + +If you're looking for a way to write and contribute code, but you're not sure +what to work on, check out the "wishlist" bugs in the bug tracker. These are +normally smaller items that someone took the time to write down but didn't +have time to implement. + +And please join #openstack-swift on freenode IRC to tell us what you're +working on. + +Getting Started +--------------- + +http://docs.openstack.org/developer/swift/first_contribution_swift.html + +Once those steps have been completed, changes to OpenStack +should be submitted for review via the Gerrit tool, following +the workflow documented at http://docs.openstack.org/infra/manual/developers.html#development-workflow. -Gerrit is the review system used in the OpenStack projects. We're sorry, -but we won't be able to respond to pull requests submitted through -GitHub. +Gerrit is the review system used in the OpenStack projects. We're sorry, but +we won't be able to respond to pull requests submitted through GitHub. -Bugs should be filed `on -Launchpad `__, not in GitHub's issue -tracker. +Bugs should be filed `on Launchpad `__, +not in GitHub's issue tracker. Swift Design Principles ======================= @@ -93,21 +166,17 @@ Once your patch has been reviewed and approved by two core reviewers and has passed all automated tests, it will be merged into the Swift source tree. -Specs +Ideas ===== -.. |swift-specs| replace:: ``swift-specs`` -.. _swift-specs: https://github.com/openstack/swift-specs +https://wiki.openstack.org/wiki/Swift/ideas -The |swift-specs|_ repo -can be used for collaborative design work before a feature is -implemented. - -OpenStack's gerrit system is used to collaborate on the design spec. -Once approved OpenStack provides a doc site to easily read these -`specs `__ - -A spec is needed for more impactful features. Coordinating a feature -between many devs (especially across companies) is a great example of -when a spec is needed. If you are unsure if a spec document is needed, -please feel free to ask in #openstack-swift on freenode IRC. +If you're working on something, it's a very good idea to write down +what you're thinking about. This lets others get up to speed, helps +you collaborate, and serves as a great record for future reference. +Write down your thoughts somewhere and put a link to it here. It +doesn't matter what form your thoughts are in; use whatever is best +for you. Your document should include why your idea is needed and your +thoughts on particular design choices and tradeoffs. Please include +some contact information (ideally, your IRC nick) so that people can +collaborate with you. diff --git a/README.rst b/README.rst index aba20cb903..984d160e21 100644 --- a/README.rst +++ b/README.rst @@ -27,17 +27,50 @@ http://docs.openstack.org/developer/swift/. For Developers -------------- -The best place to get started is the `"SAIO - Swift All In -One" `__. +Getting Started +~~~~~~~~~~~~~~~ + +Swift is part of OpenStack and follows the code contribution, review, and testing processes common to all OpenStack projects. + +If you would like to start contributing, check out these +`notes `__ to help you get started. + +The best place to get started is the +`"SAIO - Swift All In One" `__. This document will walk you through setting up a development cluster of Swift in a VM. The SAIO environment is ideal for running small-scale tests against swift and trying out new features and bug fixes. -You can run unit tests with ``.unittests`` and functional tests with -``.functests``. +Tests +~~~~~ -If you would like to start contributing, check out these -`notes `__ to help you get started. +There are three types of tests included in Swift's source tree. + + #. Unit tests + #. Functional tests + #. Probe tests + +Unit tests check that small sections of the code behave properly. For example, +a unit test may test a single function to ensure that various input gives the +expected output. This validates that the code is correct and regressions are +not introduced. + +Functional tests check that the client API is working as expected. These can +be run against any endpoint claiming to support the Swift API (although some +tests require multiple accounts with different privilege levels). These are +"black box" tests that ensure that client apps written against Swift will +continue to work. + +Probe tests are "white box" tests that validate the internal workings of a +Swift cluster. They are written to work against the +`"SAIO - Swift All In One" `__ +dev environment. For example, a probe test may create an object, delete one +replica, and ensure that the background consistency processes find and correct +the error. + +You can run unit tests with ``.unittests``, functional tests with +``.functests``, and probe tests with ``.probetests``. There is an +additional ``.alltests`` script that wraps the other three. Code Organization ~~~~~~~~~~~~~~~~~ @@ -45,19 +78,22 @@ Code Organization - bin/: Executable scripts that are the processes run by the deployer - doc/: Documentation - etc/: Sample config files +- examples/: Config snippets used in the docs - swift/: Core code - account/: account server + - cli/: code that backs some of the CLI tools in bin/ - common/: code shared by different modules - middleware/: "standard", officially-supported middleware - ring/: code implementing Swift's ring - container/: container server + - locale/: internationalization (translation) data - obj/: object server - proxy/: proxy server -- test/: Unit and functional tests +- test/: Unit, functional, and probe tests Data Flow ~~~~~~~~~ @@ -77,6 +113,10 @@ Deployer docs are also available at http://docs.openstack.org/developer/swift/. A good starting point is at http://docs.openstack.org/developer/swift/deployment\_guide.html +There is an `ops runbook `__ +that gives information about how to diagnose and troubleshoot common issues +when running a Swift cluster. + You can run functional tests against a swift cluster with ``.functests``. These functional tests require ``/etc/swift/test.conf`` to run. A sample config file can be found in this source tree in @@ -91,6 +131,11 @@ at http://github.com/openstack/python-swiftclient. Complete API documentation at http://docs.openstack.org/api/openstack-object-storage/1.0/content/ +There is a large ecosystem of applications and libraries that support and +work with OpenStack Swift. Several are listed on the +`associated projects `__ +page. + -------------- For more information come hang out in #openstack-swift on freenode. From ba1a568f815a8a40bf75c0beb96b789bd07adcb6 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Wed, 9 Mar 2016 10:48:54 +0000 Subject: [PATCH 112/141] Rename hash_cleanup_listdir tests hash_cleanup_listdir was removed in [1], this patch renames all references to it in test_diskfile to refer to the cleanup_ondisk_files method that is now tested directly. Also remove the final references to the now non-existent function in a few comments. [1] I0b96dfde32b4c666eebda6e88228516dd693ef92 Change-Id: I1e151799fc2774de9a1af092afff875af24a630c Related-Bug: #1550569 --- swift/obj/diskfile.py | 2 +- test/unit/obj/test_diskfile.py | 219 +++++++++++++++++---------------- test/unit/obj/test_server.py | 2 +- 3 files changed, 112 insertions(+), 111 deletions(-) diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py index b6c38d1267..32cd550e8d 100644 --- a/swift/obj/diskfile.py +++ b/swift/obj/diskfile.py @@ -2659,7 +2659,7 @@ class ECDiskFile(BaseDiskFile): reverting it to its primary node. The hash will be invalidated, and if empty or invalid the - hsh_path will be removed on next hash_cleanup_listdir. + hsh_path will be removed on next cleanup_ondisk_files. :param timestamp: the object timestamp, an instance of :class:`~swift.common.utils.Timestamp` diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py index 08c4f01c55..d5665d0a39 100644 --- a/test/unit/obj/test_diskfile.py +++ b/test/unit/obj/test_diskfile.py @@ -623,7 +623,7 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): self.fail('%s with files %s' % (str(e), files)) shuffle(files) - def _test_hash_cleanup_listdir_files(self, scenarios, policy, + def _test_cleanup_ondisk_files_files(self, scenarios, policy, reclaim_age=None): # check that expected files are left in hashdir after cleanup for test in scenarios: @@ -638,12 +638,12 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): if (f[2] if len(f) > 2 else f[1])]) if reclaim_age: class_under_test.cleanup_ondisk_files( - hashdir, reclaim_age=reclaim_age)['files'] + hashdir, reclaim_age=reclaim_age) else: with mock.patch('swift.obj.diskfile.time') as mock_time: # don't reclaim anything mock_time.time.return_value = 0.0 - class_under_test.cleanup_ondisk_files(hashdir)['files'] + class_under_test.cleanup_ondisk_files(hashdir) after_cleanup = set(os.listdir(hashdir)) errmsg = "expected %r, got %r for test %r" % ( sorted(expected_after_cleanup), sorted(after_cleanup), test @@ -652,7 +652,7 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): def _test_yield_hashes_cleanup(self, scenarios, policy): # opportunistic test to check that yield_hashes cleans up dir using - # same scenarios as passed to _test_hash_cleanup_listdir_files + # same scenarios as passed to _test_cleanup_ondisk_files_files for test in scenarios: class_under_test = self.df_router[policy] # list(zip(...)) for py3 compatibility (zip is lazy there) @@ -724,7 +724,7 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): 'Unexpected file %s' % os.path.join(datadir, unexpected))) - def test_hash_cleanup_listdir_reclaim_non_data_files(self): + def test_cleanup_ondisk_files_reclaim_non_data_files(self): # Each scenario specifies a list of (filename, extension, [survives]) # tuples. If extension is set or 'survives' is True, the filename # should still be in the dir after cleanup. @@ -755,7 +755,7 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): [('%s.meta' % older, False, False), ('%s.ts' % older, False, False)]] - self._test_hash_cleanup_listdir_files(scenarios, POLICIES.default, + self._test_cleanup_ondisk_files_files(scenarios, POLICIES.default, reclaim_age=1000) def test_construct_dev_path(self): @@ -848,9 +848,9 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): self.df_mgr.get_dev_path = mock.MagicMock(return_value=None) with mock.patch(self._manager_mock('diskfile_cls')), \ mock.patch(self._manager_mock( - 'cleanup_ondisk_files')) as hclistdir, \ + 'cleanup_ondisk_files')) as cleanup, \ mock.patch('swift.obj.diskfile.read_metadata') as readmeta: - hclistdir.return_value = {'files': ['1381679759.90941.data']} + cleanup.return_value = {'files': ['1381679759.90941.data']} readmeta.return_value = {'name': '/a/c/o'} self.assertRaises( DiskFileDeviceUnavailable, @@ -861,13 +861,13 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/') with mock.patch(self._manager_mock('diskfile_cls')), \ mock.patch(self._manager_mock( - 'cleanup_ondisk_files')) as hclistdir, \ + 'cleanup_ondisk_files')) as cleanup, \ mock.patch('swift.obj.diskfile.read_metadata') as readmeta, \ mock.patch(self._manager_mock( 'quarantine_renamer')) as quarantine_renamer: osexc = OSError() osexc.errno = errno.ENOTDIR - hclistdir.side_effect = osexc + cleanup.side_effect = osexc readmeta.return_value = {'name': '/a/c/o'} self.assertRaises( DiskFileNotExist, @@ -881,11 +881,11 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/') with mock.patch(self._manager_mock('diskfile_cls')), \ mock.patch(self._manager_mock( - 'cleanup_ondisk_files')) as hclistdir, \ + 'cleanup_ondisk_files')) as cleanup, \ mock.patch('swift.obj.diskfile.read_metadata') as readmeta: osexc = OSError() osexc.errno = errno.ENOENT - hclistdir.side_effect = osexc + cleanup.side_effect = osexc readmeta.return_value = {'name': '/a/c/o'} self.assertRaises( DiskFileNotExist, @@ -896,10 +896,10 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/') with mock.patch(self._manager_mock('diskfile_cls')), \ mock.patch(self._manager_mock( - 'cleanup_ondisk_files')) as hclistdir, \ + 'cleanup_ondisk_files')) as cleanup, \ mock.patch('swift.obj.diskfile.read_metadata') as readmeta: osexc = OSError() - hclistdir.side_effect = osexc + cleanup.side_effect = osexc readmeta.return_value = {'name': '/a/c/o'} self.assertRaises( OSError, @@ -910,9 +910,9 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/') with mock.patch(self._manager_mock('diskfile_cls')), \ mock.patch(self._manager_mock( - 'cleanup_ondisk_files')) as hclistdir, \ + 'cleanup_ondisk_files')) as cleanup, \ mock.patch('swift.obj.diskfile.read_metadata') as readmeta: - hclistdir.return_value = {'files': []} + cleanup.return_value = {'files': []} readmeta.return_value = {'name': '/a/c/o'} self.assertRaises( DiskFileNotExist, @@ -923,9 +923,9 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/') with mock.patch(self._manager_mock('diskfile_cls')), \ mock.patch(self._manager_mock( - 'cleanup_ondisk_files')) as hclistdir, \ + 'cleanup_ondisk_files')) as cleanup, \ mock.patch('swift.obj.diskfile.read_metadata') as readmeta: - hclistdir.return_value = {'files': ['1381679759.90941.data']} + cleanup.return_value = {'files': ['1381679759.90941.data']} readmeta.side_effect = EOFError() self.assertRaises( DiskFileNotExist, @@ -936,9 +936,9 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/') with mock.patch(self._manager_mock('diskfile_cls')), \ mock.patch(self._manager_mock( - 'cleanup_ondisk_files')) as hclistdir, \ + 'cleanup_ondisk_files')) as cleanup, \ mock.patch('swift.obj.diskfile.read_metadata') as readmeta: - hclistdir.return_value = {'files': ['1381679759.90941.data']} + cleanup.return_value = {'files': ['1381679759.90941.data']} readmeta.return_value = {} try: self.df_mgr.get_diskfile_from_hash( @@ -952,9 +952,9 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/') with mock.patch(self._manager_mock('diskfile_cls')), \ mock.patch(self._manager_mock( - 'cleanup_ondisk_files')) as hclistdir, \ + 'cleanup_ondisk_files')) as cleanup, \ mock.patch('swift.obj.diskfile.read_metadata') as readmeta: - hclistdir.return_value = {'files': ['1381679759.90941.data']} + cleanup.return_value = {'files': ['1381679759.90941.data']} readmeta.return_value = {'name': 'bad'} try: self.df_mgr.get_diskfile_from_hash( @@ -968,16 +968,16 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/') with mock.patch(self._manager_mock('diskfile_cls')) as dfclass, \ mock.patch(self._manager_mock( - 'cleanup_ondisk_files')) as hclistdir, \ + 'cleanup_ondisk_files')) as cleanup, \ mock.patch('swift.obj.diskfile.read_metadata') as readmeta: - hclistdir.return_value = {'files': ['1381679759.90941.data']} + cleanup.return_value = {'files': ['1381679759.90941.data']} readmeta.return_value = {'name': '/a/c/o'} self.df_mgr.get_diskfile_from_hash( 'dev', '9', '9a7175077c01a23ade5956b8a2bba900', POLICIES[0]) dfclass.assert_called_once_with( self.df_mgr, '/srv/dev/', self.df_mgr.threadpools['dev'], '9', 'a', 'c', 'o', policy=POLICIES[0]) - hclistdir.assert_called_once_with( + cleanup.assert_called_once_with( '/srv/dev/objects/9/900/9a7175077c01a23ade5956b8a2bba900', 604800) readmeta.assert_called_once_with( @@ -1167,7 +1167,7 @@ class TestDiskFileManager(DiskFileManagerMixin, unittest.TestCase): ] self._test_get_ondisk_files(scenarios, POLICIES[0], None) - self._test_hash_cleanup_listdir_files(scenarios, POLICIES[0]) + self._test_cleanup_ondisk_files_files(scenarios, POLICIES[0]) self._test_yield_hashes_cleanup(scenarios, POLICIES[0]) def test_get_ondisk_files_with_stray_meta(self): @@ -1225,7 +1225,7 @@ class TestDiskFileManager(DiskFileManagerMixin, unittest.TestCase): self.assertEqual("Invalid Timestamp value in filename 'junk'", str(cm.exception)) - def test_hash_cleanup_listdir_reclaim_with_data_files(self): + def test_cleanup_ondisk_files_reclaim_with_data_files(self): # Each scenario specifies a list of (filename, extension, [survives]) # tuples. If extension is set or 'survives' is True, the filename # should still be in the dir after cleanup. @@ -1250,7 +1250,7 @@ class TestDiskFileManager(DiskFileManagerMixin, unittest.TestCase): [('%s.meta' % older, '.meta'), ('%s.data' % much_older, '.data')]] - self._test_hash_cleanup_listdir_files(scenarios, POLICIES.default, + self._test_cleanup_ondisk_files_files(scenarios, POLICIES.default, reclaim_age=1000) def test_yield_hashes(self): @@ -1498,7 +1498,7 @@ class TestECDiskFileManager(DiskFileManagerMixin, unittest.TestCase): ] self._test_get_ondisk_files(scenarios, POLICIES.default, None) - self._test_hash_cleanup_listdir_files(scenarios, POLICIES.default) + self._test_cleanup_ondisk_files_files(scenarios, POLICIES.default) self._test_yield_hashes_cleanup(scenarios, POLICIES.default) def test_get_ondisk_files_with_ec_policy_and_frag_index(self): @@ -1565,10 +1565,10 @@ class TestECDiskFileManager(DiskFileManagerMixin, unittest.TestCase): ] self._test_get_ondisk_files(scenarios, POLICIES.default, frag_index=1) - # note: not calling self._test_hash_cleanup_listdir_files(scenarios, 0) + # note: not calling self._test_cleanup_ondisk_files_files(scenarios, 0) # here due to the anomalous scenario as commented above - def test_hash_cleanup_listdir_reclaim_with_data_files(self): + def test_cleanup_ondisk_files_reclaim_with_data_files(self): # Each scenario specifies a list of (filename, extension, [survives]) # tuples. If extension is set or 'survives' is True, the filename # should still be in the dir after cleanup. @@ -1624,7 +1624,7 @@ class TestECDiskFileManager(DiskFileManagerMixin, unittest.TestCase): [('%s.meta' % older, False, False), ('%s.durable' % much_older, False, False)]] - self._test_hash_cleanup_listdir_files(scenarios, POLICIES.default, + self._test_cleanup_ondisk_files_files(scenarios, POLICIES.default, reclaim_age=1000) def test_get_ondisk_files_with_stray_meta(self): @@ -3176,12 +3176,12 @@ class DiskFileMixin(BaseDiskFileTestMixin): if policy.policy_type == EC_POLICY: self.assertTrue(isinstance(mock_fsync.call_args[0][0], int)) - def test_commit_ignores_hash_cleanup_listdir_error(self): + def test_commit_ignores_cleanup_ondisk_files_error(self): for policy in POLICIES: - # Check OSError from hash_cleanup_listdir is caught and ignored - mock_hcl = mock.MagicMock(side_effect=OSError) + # Check OSError from cleanup_ondisk_files is caught and ignored + mock_cleanup = mock.MagicMock(side_effect=OSError) df = self._simple_get_diskfile(account='a', container='c', - obj='o_hcl_error', policy=policy) + obj='o_error', policy=policy) timestamp = Timestamp(time()) with df.create() as writer: @@ -3192,13 +3192,13 @@ class DiskFileMixin(BaseDiskFileTestMixin): } writer.put(metadata) with mock.patch(self._manager_mock( - 'cleanup_ondisk_files', df), mock_hcl): + 'cleanup_ondisk_files', df), mock_cleanup): writer.commit(timestamp) expected = { EC_POLICY: 1, REPL_POLICY: 0, }[policy.policy_type] - self.assertEqual(expected, mock_hcl.call_count) + self.assertEqual(expected, mock_cleanup.call_count) expected = ['%s.data' % timestamp.internal] if policy.policy_type == EC_POLICY: expected = ['%s#2.data' % timestamp.internal, @@ -3208,8 +3208,8 @@ class DiskFileMixin(BaseDiskFileTestMixin): 'Unexpected dir listing %s' % dl) self.assertEqual(sorted(expected), sorted(dl)) - def test_number_calls_to_hash_cleanup_listdir_during_create(self): - # Check how many calls are made to hash_cleanup_listdir, and when, + def test_number_calls_to_cleanup_ondisk_files_during_create(self): + # Check how many calls are made to cleanup_ondisk_files, and when, # during put(), commit() sequence for policy in POLICIES: expected = { @@ -3217,7 +3217,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): REPL_POLICY: (1, 0), }[policy.policy_type] df = self._simple_get_diskfile(account='a', container='c', - obj='o_hcl_error', policy=policy) + obj='o_error', policy=policy) timestamp = Timestamp(time()) with df.create() as writer: metadata = { @@ -3226,16 +3226,16 @@ class DiskFileMixin(BaseDiskFileTestMixin): 'Content-Length': '0', } with mock.patch(self._manager_mock( - 'cleanup_ondisk_files', df)) as mock_hcl: + 'cleanup_ondisk_files', df)) as mock_cleanup: writer.put(metadata) - self.assertEqual(expected[0], mock_hcl.call_count) + self.assertEqual(expected[0], mock_cleanup.call_count) with mock.patch(self._manager_mock( - 'cleanup_ondisk_files', df)) as mock_hcl: + 'cleanup_ondisk_files', df)) as mock_cleanup: writer.commit(timestamp) - self.assertEqual(expected[1], mock_hcl.call_count) + self.assertEqual(expected[1], mock_cleanup.call_count) - def test_number_calls_to_hash_cleanup_listdir_during_delete(self): - # Check how many calls are made to hash_cleanup_listdir, and when, + def test_number_calls_to_cleanup_ondisk_files_during_delete(self): + # Check how many calls are made to cleanup_ondisk_files, and when, # for delete() and necessary prerequisite steps for policy in POLICIES: expected = { @@ -3243,7 +3243,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): REPL_POLICY: (1, 0, 1), }[policy.policy_type] df = self._simple_get_diskfile(account='a', container='c', - obj='o_hcl_error', policy=policy) + obj='o_error', policy=policy) timestamp = Timestamp(time()) with df.create() as writer: metadata = { @@ -3252,18 +3252,18 @@ class DiskFileMixin(BaseDiskFileTestMixin): 'Content-Length': '0', } with mock.patch(self._manager_mock( - 'cleanup_ondisk_files', df)) as mock_hcl: + 'cleanup_ondisk_files', df)) as mock_cleanup: writer.put(metadata) - self.assertEqual(expected[0], mock_hcl.call_count) + self.assertEqual(expected[0], mock_cleanup.call_count) with mock.patch(self._manager_mock( - 'cleanup_ondisk_files', df)) as mock_hcl: + 'cleanup_ondisk_files', df)) as mock_cleanup: writer.commit(timestamp) - self.assertEqual(expected[1], mock_hcl.call_count) + self.assertEqual(expected[1], mock_cleanup.call_count) with mock.patch(self._manager_mock( - 'cleanup_ondisk_files', df)) as mock_hcl: + 'cleanup_ondisk_files', df)) as mock_cleanup: timestamp = Timestamp(time()) df.delete(timestamp) - self.assertEqual(expected[2], mock_hcl.call_count) + self.assertEqual(expected[2], mock_cleanup.call_count) def test_delete(self): for policy in POLICIES: @@ -3686,15 +3686,16 @@ class DiskFileMixin(BaseDiskFileTestMixin): # open() was attempted, but no data file so expect None self.assertIsNone(df.durable_timestamp) - def test_error_in_hash_cleanup_listdir(self): + def test_error_in_cleanup_ondisk_files(self): - def mock_hcl(*args, **kwargs): + def mock_cleanup(*args, **kwargs): raise OSError() df = self._get_open_disk_file() file_count = len(os.listdir(df._datadir)) ts = time() - with mock.patch(self._manager_mock('cleanup_ondisk_files'), mock_hcl): + with mock.patch( + self._manager_mock('cleanup_ondisk_files'), mock_cleanup): try: df.delete(ts) except OSError: @@ -4490,27 +4491,27 @@ class TestECDiskFile(DiskFileMixin, unittest.TestCase): class TestSuffixHashes(unittest.TestCase): """ This tests all things related to hashing suffixes and therefore - there's also few test methods for hash_cleanup_listdir as well + there's also few test methods for cleanup_ondisk_files as well (because it's used by hash_suffix). The public interface to suffix hashing is on the Manager:: - * hash_cleanup_listdir(hsh_path) + * cleanup_ondisk_files(hsh_path) * get_hashes(device, partition, suffixes, policy) * invalidate_hash(suffix_dir) The Manager.get_hashes method (used by the REPLICATE verb) calls Manager._get_hashes (which may be an alias to the module method get_hashes), which calls hash_suffix, which calls - hash_cleanup_listdir. + cleanup_ondisk_files. - Outside of that, hash_cleanup_listdir and invalidate_hash are + Outside of that, cleanup_ondisk_files and invalidate_hash are used mostly after writing new files via PUT or DELETE. Test methods are organized by:: - * hash_cleanup_listdir tests - behaviors - * hash_cleanup_listdir tests - error handling + * cleanup_ondisk_files tests - behaviors + * cleanup_ondisk_files tests - error handling * invalidate_hash tests - behavior * invalidate_hash tests - error handling * get_hashes tests - hash_suffix behaviors @@ -4583,7 +4584,7 @@ class TestSuffixHashes(unittest.TestCase): filename += '.meta' return filename - def check_hash_cleanup_listdir(self, policy, input_files, output_files): + def check_cleanup_ondisk_files(self, policy, input_files, output_files): orig_unlink = os.unlink file_list = list(input_files) @@ -4611,17 +4612,17 @@ class TestSuffixHashes(unittest.TestCase): files = df_mgr.cleanup_ondisk_files('/whatever')['files'] self.assertEqual(files, output_files) - # hash_cleanup_listdir tests - behaviors + # cleanup_ondisk_files tests - behaviors - def test_hash_cleanup_listdir_purge_data_newer_ts(self): + def test_cleanup_ondisk_files_purge_data_newer_ts(self): for policy in self.iter_policies(): # purge .data if there's a newer .ts file1 = self._datafilename(self.ts(), policy) file2 = self.ts().internal + '.ts' file_list = [file1, file2] - self.check_hash_cleanup_listdir(policy, file_list, [file2]) + self.check_cleanup_ondisk_files(policy, file_list, [file2]) - def test_hash_cleanup_listdir_purge_expired_ts(self): + def test_cleanup_ondisk_files_purge_expired_ts(self): for policy in self.iter_policies(): # purge older .ts files if there's a newer .data file1 = self.ts().internal + '.ts' @@ -4635,9 +4636,9 @@ class TestSuffixHashes(unittest.TestCase): EC_POLICY: [file3, file2], REPL_POLICY: [file3], }[policy.policy_type] - self.check_hash_cleanup_listdir(policy, file_list, expected) + self.check_cleanup_ondisk_files(policy, file_list, expected) - def test_hash_cleanup_listdir_purge_ts_newer_data(self): + def test_cleanup_ondisk_files_purge_ts_newer_data(self): for policy in self.iter_policies(): # purge .ts if there's a newer .data file1 = self.ts().internal + '.ts' @@ -4651,9 +4652,9 @@ class TestSuffixHashes(unittest.TestCase): EC_POLICY: [durable_file, file2], REPL_POLICY: [file2], }[policy.policy_type] - self.check_hash_cleanup_listdir(policy, file_list, expected) + self.check_cleanup_ondisk_files(policy, file_list, expected) - def test_hash_cleanup_listdir_purge_older_ts(self): + def test_cleanup_ondisk_files_purge_older_ts(self): for policy in self.iter_policies(): file1 = self.ts().internal + '.ts' file2 = self.ts().internal + '.ts' @@ -4667,9 +4668,9 @@ class TestSuffixHashes(unittest.TestCase): REPL_POLICY: [file4, file3], }[policy.policy_type] file_list = [file1, file2, file3, file4] - self.check_hash_cleanup_listdir(policy, file_list, expected) + self.check_cleanup_ondisk_files(policy, file_list, expected) - def test_hash_cleanup_listdir_keep_meta_data_purge_ts(self): + def test_cleanup_ondisk_files_keep_meta_data_purge_ts(self): for policy in self.iter_policies(): file1 = self.ts().internal + '.ts' file2 = self.ts().internal + '.ts' @@ -4686,17 +4687,17 @@ class TestSuffixHashes(unittest.TestCase): EC_POLICY: [file4, durable_filename, file3], REPL_POLICY: [file4, file3], }[policy.policy_type] - self.check_hash_cleanup_listdir(policy, file_list, expected) + self.check_cleanup_ondisk_files(policy, file_list, expected) - def test_hash_cleanup_listdir_keep_one_ts(self): + def test_cleanup_ondisk_files_keep_one_ts(self): for policy in self.iter_policies(): file1, file2, file3 = [self.ts().internal + '.ts' for i in range(3)] file_list = [file1, file2, file3] # keep only latest of multiple .ts files - self.check_hash_cleanup_listdir(policy, file_list, [file3]) + self.check_cleanup_ondisk_files(policy, file_list, [file3]) - def test_hash_cleanup_listdir_multi_data_file(self): + def test_cleanup_ondisk_files_multi_data_file(self): for policy in self.iter_policies(): file1 = self._datafilename(self.ts(), policy, 1) file2 = self._datafilename(self.ts(), policy, 2) @@ -4708,9 +4709,9 @@ class TestSuffixHashes(unittest.TestCase): REPL_POLICY: [file3] }[policy.policy_type] file_list = [file1, file2, file3] - self.check_hash_cleanup_listdir(policy, file_list, expected) + self.check_cleanup_ondisk_files(policy, file_list, expected) - def test_hash_cleanup_listdir_keeps_one_datafile(self): + def test_cleanup_ondisk_files_keeps_one_datafile(self): for policy in self.iter_policies(): timestamps = [self.ts() for i in range(3)] file1 = self._datafilename(timestamps[0], policy, 1) @@ -4727,9 +4728,9 @@ class TestSuffixHashes(unittest.TestCase): # keep only latest of multiple .data files REPL_POLICY: [file3] }[policy.policy_type] - self.check_hash_cleanup_listdir(policy, file_list, expected) + self.check_cleanup_ondisk_files(policy, file_list, expected) - def test_hash_cleanup_listdir_keep_one_meta(self): + def test_cleanup_ondisk_files_keep_one_meta(self): for policy in self.iter_policies(): # keep only latest of multiple .meta files t_data = self.ts() @@ -4743,50 +4744,50 @@ class TestSuffixHashes(unittest.TestCase): EC_POLICY: [file3, durable_file, file1], REPL_POLICY: [file3, file1] }[policy.policy_type] - self.check_hash_cleanup_listdir(policy, file_list, expected) + self.check_cleanup_ondisk_files(policy, file_list, expected) - def test_hash_cleanup_listdir_only_meta(self): + def test_cleanup_ondisk_files_only_meta(self): for policy in self.iter_policies(): file1, file2 = [self.ts().internal + '.meta' for i in range(2)] file_list = [file1, file2] - self.check_hash_cleanup_listdir(policy, file_list, [file2]) + self.check_cleanup_ondisk_files(policy, file_list, [file2]) - def test_hash_cleanup_listdir_ignore_orphaned_ts(self): + def test_cleanup_ondisk_files_ignore_orphaned_ts(self): for policy in self.iter_policies(): # A more recent orphaned .meta file will prevent old .ts files # from being cleaned up otherwise file1, file2 = [self.ts().internal + '.ts' for i in range(2)] file3 = self.ts().internal + '.meta' file_list = [file1, file2, file3] - self.check_hash_cleanup_listdir(policy, file_list, [file3, file2]) + self.check_cleanup_ondisk_files(policy, file_list, [file3, file2]) - def test_hash_cleanup_listdir_purge_old_data_only(self): + def test_cleanup_ondisk_files_purge_old_data_only(self): for policy in self.iter_policies(): # Oldest .data will be purge, .meta and .ts won't be touched file1 = self._datafilename(self.ts(), policy) file2 = self.ts().internal + '.ts' file3 = self.ts().internal + '.meta' file_list = [file1, file2, file3] - self.check_hash_cleanup_listdir(policy, file_list, [file3, file2]) + self.check_cleanup_ondisk_files(policy, file_list, [file3, file2]) - def test_hash_cleanup_listdir_purge_old_ts(self): + def test_cleanup_ondisk_files_purge_old_ts(self): for policy in self.iter_policies(): # A single old .ts file will be removed old_float = time() - (diskfile.ONE_WEEK + 1) file1 = Timestamp(old_float).internal + '.ts' file_list = [file1] - self.check_hash_cleanup_listdir(policy, file_list, []) + self.check_cleanup_ondisk_files(policy, file_list, []) - def test_hash_cleanup_listdir_keep_isolated_meta_purge_old_ts(self): + def test_cleanup_ondisk_files_keep_isolated_meta_purge_old_ts(self): for policy in self.iter_policies(): # A single old .ts file will be removed despite presence of a .meta old_float = time() - (diskfile.ONE_WEEK + 1) file1 = Timestamp(old_float).internal + '.ts' file2 = Timestamp(time() + 2).internal + '.meta' file_list = [file1, file2] - self.check_hash_cleanup_listdir(policy, file_list, [file2]) + self.check_cleanup_ondisk_files(policy, file_list, [file2]) - def test_hash_cleanup_listdir_keep_single_old_data(self): + def test_cleanup_ondisk_files_keep_single_old_data(self): for policy in self.iter_policies(): old_float = time() - (diskfile.ONE_WEEK + 1) file1 = self._datafilename(Timestamp(old_float), policy) @@ -4798,33 +4799,33 @@ class TestSuffixHashes(unittest.TestCase): else: # A single old .data file will not be removed expected = file_list - self.check_hash_cleanup_listdir(policy, file_list, expected) + self.check_cleanup_ondisk_files(policy, file_list, expected) - def test_hash_cleanup_listdir_drops_isolated_durable(self): + def test_cleanup_ondisk_files_drops_isolated_durable(self): for policy in self.iter_policies(): if policy.policy_type == EC_POLICY: file1 = Timestamp(time()).internal + '.durable' file_list = [file1] - self.check_hash_cleanup_listdir(policy, file_list, []) + self.check_cleanup_ondisk_files(policy, file_list, []) - def test_hash_cleanup_listdir_purges_single_old_meta(self): + def test_cleanup_ondisk_files_purges_single_old_meta(self): for policy in self.iter_policies(): # A single old .meta file will be removed old_float = time() - (diskfile.ONE_WEEK + 1) file1 = Timestamp(old_float).internal + '.meta' file_list = [file1] - self.check_hash_cleanup_listdir(policy, file_list, []) + self.check_cleanup_ondisk_files(policy, file_list, []) - # hash_cleanup_listdir tests - error handling + # cleanup_ondisk_files tests - error handling - def test_hash_cleanup_listdir_hsh_path_enoent(self): + def test_cleanup_ondisk_files_hsh_path_enoent(self): for policy in self.iter_policies(): df_mgr = self.df_router[policy] # common.utils.listdir *completely* mutes ENOENT path = os.path.join(self.testdir, 'does-not-exist') self.assertEqual(df_mgr.cleanup_ondisk_files(path)['files'], []) - def test_hash_cleanup_listdir_hsh_path_other_oserror(self): + def test_cleanup_ondisk_files_hsh_path_other_oserror(self): for policy in self.iter_policies(): df_mgr = self.df_router[policy] with mock.patch('os.listdir') as mock_listdir: @@ -4834,22 +4835,22 @@ class TestSuffixHashes(unittest.TestCase): self.assertRaises(OSError, df_mgr.cleanup_ondisk_files, path) - def test_hash_cleanup_listdir_reclaim_tombstone_remove_file_error(self): + def test_cleanup_ondisk_files_reclaim_tombstone_remove_file_error(self): for policy in self.iter_policies(): # Timestamp 1 makes the check routine pretend the file # disappeared after listdir before unlink. file1 = '0000000001.00000.ts' file_list = [file1] - self.check_hash_cleanup_listdir(policy, file_list, []) + self.check_cleanup_ondisk_files(policy, file_list, []) - def test_hash_cleanup_listdir_older_remove_file_error(self): + def test_cleanup_ondisk_files_older_remove_file_error(self): for policy in self.iter_policies(): # Timestamp 1 makes the check routine pretend the file # disappeared after listdir before unlink. file1 = self._datafilename(Timestamp(1), policy) file2 = '0000000002.00000.ts' file_list = [file1, file2] - self.check_hash_cleanup_listdir(policy, file_list, []) + self.check_cleanup_ondisk_files(policy, file_list, []) # invalidate_hash tests - behavior @@ -5472,7 +5473,7 @@ class TestSuffixHashes(unittest.TestCase): (os.path.join(part_path, '123'), False), ]) - def test_hash_suffix_hash_cleanup_listdir_enotdir_quarantined(self): + def test_hash_suffix_cleanup_ondisk_files_enotdir_quarantined(self): for policy in self.iter_policies(): df = self.df_router[policy].get_diskfile( self.existing_device, '0', 'a', 'c', 'o', policy=policy) @@ -5501,7 +5502,7 @@ class TestSuffixHashes(unittest.TestCase): ) self.assertTrue(os.path.exists(quarantine_path)) - def test_hash_suffix_hash_cleanup_listdir_other_oserror(self): + def test_hash_suffix_cleanup_ondisk_files_other_oserror(self): for policy in self.iter_policies(): timestamp = self.ts() df_mgr = self.df_router[policy] @@ -5531,7 +5532,7 @@ class TestSuffixHashes(unittest.TestCase): listdir_calls.append(path) if path == datadir_path: # we want the part and suffix listdir calls to pass and - # make the hash_cleanup_listdir raise an exception + # make the cleanup_ondisk_files raise an exception raise OSError(errno.EACCES, os.strerror(errno.EACCES)) return orig_os_listdir(path) diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py index c35ea76490..4e33dc1002 100755 --- a/test/unit/obj/test_server.py +++ b/test/unit/obj/test_server.py @@ -1126,7 +1126,7 @@ class TestObjectController(unittest.TestCase): # data file doesn't exist there (This is sanity because # if .data written unexpectedly, it will be removed - # by hash_cleanup_list_dir) + # by cleanup_ondisk_files) datafile = os.path.join( self.testdir, 'sda1', storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p', From cb8cda6588735411c542a1e4d78315ee047a357b Mon Sep 17 00:00:00 2001 From: Paul Dardeau Date: Mon, 9 May 2016 20:28:42 +0000 Subject: [PATCH 113/141] resurrect gholt blog posts on building consistent hashing ring These are blog posts authored by Greg Holt (gholt) and used with permission to add here. Content was only reformatted as rst and wrap prose lines at 70 characters. Change-Id: I7aa47c24b5019aa598ee005e01612a49514da25f --- doc/source/index.rst | 1 + doc/source/overview_ring.rst | 2 + doc/source/ring_background.rst | 957 +++++++++++++++++++++++++++++++++ 3 files changed, 960 insertions(+) create mode 100644 doc/source/ring_background.rst diff --git a/doc/source/index.rst b/doc/source/index.rst index 8f045cfb18..c648d0af4f 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -58,6 +58,7 @@ Overview and Concepts crossdomain overview_erasure_code overview_backing_store + ring_background associated_projects Developer Documentation diff --git a/doc/source/overview_ring.rst b/doc/source/overview_ring.rst index a1a72d8508..181b2f143c 100644 --- a/doc/source/overview_ring.rst +++ b/doc/source/overview_ring.rst @@ -426,3 +426,5 @@ for rings that were close to balanceable, like 3 machines with 60TB, 60TB, and didn't always get it. After that, overload was added to the ring builder so that operators could choose a balance between dispersion and device weights. In time the overload concept was improved and made more accurate. + +For more background on consistent hashing rings, please see :doc:`ring_background`. diff --git a/doc/source/ring_background.rst b/doc/source/ring_background.rst new file mode 100644 index 0000000000..ed282401b3 --- /dev/null +++ b/doc/source/ring_background.rst @@ -0,0 +1,957 @@ +================================== +Building a Consistent Hashing Ring +================================== + +--------------------- +Authored by Greg Holt +--------------------- + +This is compilation of five posts I made earlier discussing how to build +a consistent hashing ring. The posts seemed to be accessed quite frequently, +so I've gathered them all here on one page for easier reading. + +Part 1 +====== +“Consistent Hashing” is a term used to describe a process where data is +distributed using a hashing algorithm to determine its location. Using +only the hash of the id of the data you can determine exactly where that +data should be. This mapping of hashes to locations is usually termed a +“ring”. + +Probably the simplest hash is just a modulus of the id. For instance, if +all ids are numbers and you have two machines you wish to distribute data +to, you could just put all odd numbered ids on one machine and even numbered +ids on the other. Assuming you have a balanced number of odd and even +numbered ids, and a balanced data size per id, your data would be balanced +between the two machines. + +Since data ids are often textual names and not numbers, like paths for +files or URLs, it makes sense to use a “real” hashing algorithm to convert +the names to numbers first. Using MD5 for instance, the hash of the name +‘mom.png’ is ‘4559a12e3e8da7c2186250c2f292e3af’ and the hash of ‘dad.png’ +is ‘096edcc4107e9e18d6a03a43b3853bea’. Now, using the modulus, we can +place ‘mom.jpg’ on the odd machine and ‘dad.png’ on the even one. Another +benefit of using a hashing algorithm like MD5 is that the resulting hashes +have a known even distribution, meaning your ids will be evenly distributed +without worrying about keeping the id values themselves evenly distributed. + +Here is a simple example of this in action: + +.. code-block:: python + + from hashlib import md5 + from struct import unpack_from + + NODE_COUNT = 100 + DATA_ID_COUNT = 10000000 + + node_counts = [0] * NODE_COUNT + for data_id in xrange(DATA_ID_COUNT): + data_id = str(data_id) + # This just pulls part of the hash out as an integer + hsh = unpack_from('>I', md5(data_id).digest())[0] + node_id = hsh % NODE_COUNT + node_counts[node_id] += 1 + desired_count = DATA_ID_COUNT / NODE_COUNT + print '%d: Desired data ids per node' % desired_count + max_count = max(node_counts) + over = 100.0 * (max_count - desired_count) / desired_count + print '%d: Most data ids on one node, %.02f%% over' % \ + (max_count, over) + min_count = min(node_counts) + under = 100.0 * (desired_count - min_count) / desired_count + print '%d: Least data ids on one node, %.02f%% under' % \ + (min_count, under) + +:: + + 100000: Desired data ids per node + 100695: Most data ids on one node, 0.69% over + 99073: Least data ids on one node, 0.93% under + +So that’s not bad at all; less than a percent over/under for distribution +per node. In the next part of this series we’ll examine where modulus +distribution causes problems and how to improve our ring to overcome them. + +Part 2 +====== +In Part 1 of this series, we did a simple test of using the modulus of a +hash to locate data. We saw very good distribution, but that’s only part +of the story. Distributed systems not only need to distribute load, but +they often also need to grow as more and more data is placed in it. + +So let’s imagine we have a 100 node system up and running using our +previous algorithm, but it’s starting to get full so we want to add +another node. When we add that 101st node to our algorithm we notice +that many ids now map to different nodes than they previously did. +We’re going to have to shuffle a ton of data around our system to get +it all into place again. + +Let’s examine what’s happened on a much smaller scale: just 2 nodes +again, node 0 gets even ids and node 1 gets odd ids. So data id 100 +would map to node 0, data id 101 to node 1, data id 102 to node 0, etc. +This is simply node = id % 2. Now we add a third node (node 2) for more +space, so we want node = id % 3. So now data id 100 maps to node id 1, +data id 101 to node 2, and data id 102 to node 0. So we have to move +data for 2 of our 3 ids so they can be found again. + +Let’s examine this at a larger scale: + +.. code-block:: python + + from hashlib import md5 + from struct import unpack_from + + NODE_COUNT = 100 + NEW_NODE_COUNT = 101 + DATA_ID_COUNT = 10000000 + + moved_ids = 0 + for data_id in xrange(DATA_ID_COUNT): + data_id = str(data_id) + hsh = unpack_from('>I', md5(str(data_id)).digest())[0] + node_id = hsh % NODE_COUNT + new_node_id = hsh % NEW_NODE_COUNT + if node_id != new_node_id: + moved_ids += 1 + percent_moved = 100.0 * moved_ids / DATA_ID_COUNT + print '%d ids moved, %.02f%%' % (moved_ids, percent_moved) + +:: + + 9900989 ids moved, 99.01% + +Wow, that’s severe. We’d have to shuffle around 99% of our data just +to increase our capacity 1%! We need a new algorithm that combats this +behavior. + +This is where the “ring” really comes in. We can assign ranges of hashes +directly to nodes and then use an algorithm that minimizes the changes +to those ranges. Back to our small scale, let’s say our ids range from 0 +to 999. We have two nodes and we’ll assign data ids 0–499 to node 0 and +500–999 to node 1. Later, when we add node 2, we can take half the data +ids from node 0 and half from node 1, minimizing the amount of data that +needs to move. + +Let’s examine this at a larger scale: + +.. code-block:: python + + from bisect import bisect_left + from hashlib import md5 + from struct import unpack_from + + NODE_COUNT = 100 + NEW_NODE_COUNT = 101 + DATA_ID_COUNT = 10000000 + + node_range_starts = [] + for node_id in xrange(NODE_COUNT): + node_range_starts.append(DATA_ID_COUNT / + NODE_COUNT * node_id) + new_node_range_starts = [] + for new_node_id in xrange(NEW_NODE_COUNT): + new_node_range_starts.append(DATA_ID_COUNT / + NEW_NODE_COUNT * new_node_id) + moved_ids = 0 + for data_id in xrange(DATA_ID_COUNT): + data_id = str(data_id) + hsh = unpack_from('>I', md5(str(data_id)).digest())[0] + node_id = bisect_left(node_range_starts, + hsh % DATA_ID_COUNT) % NODE_COUNT + new_node_id = bisect_left(new_node_range_starts, + hsh % DATA_ID_COUNT) % NEW_NODE_COUNT + if node_id != new_node_id: + moved_ids += 1 + percent_moved = 100.0 * moved_ids / DATA_ID_COUNT + print '%d ids moved, %.02f%%' % (moved_ids, percent_moved) + +:: + + 4901707 ids moved, 49.02% + +Okay, that is better. But still, moving 50% of our data to add 1% capacity +is not very good. If we examine what happened more closely we’ll see what +is an “accordion effect”. We shrunk node 0’s range a bit to give to the +new node, but that shifted all the other node’s ranges by the same amount. + +We can minimize the change to a node’s assigned range by assigning several +smaller ranges instead of the single broad range we were before. This can +be done by creating “virtual nodes” for each node. So 100 nodes might have +1000 virtual nodes. Let’s examine how that might work. + +.. code-block:: python + + from bisect import bisect_left + from hashlib import md5 + from struct import unpack_from + + NODE_COUNT = 100 + DATA_ID_COUNT = 10000000 + VNODE_COUNT = 1000 + + vnode_range_starts = [] + vnode2node = [] + for vnode_id in xrange(VNODE_COUNT): + vnode_range_starts.append(DATA_ID_COUNT / + VNODE_COUNT * vnode_id) + vnode2node.append(vnode_id % NODE_COUNT) + new_vnode2node = list(vnode2node) + new_node_id = NODE_COUNT + NEW_NODE_COUNT = NODE_COUNT + 1 + vnodes_to_reassign = VNODE_COUNT / NEW_NODE_COUNT + while vnodes_to_reassign > 0: + for node_to_take_from in xrange(NODE_COUNT): + for vnode_id, node_id in enumerate(new_vnode2node): + if node_id == node_to_take_from: + new_vnode2node[vnode_id] = new_node_id + vnodes_to_reassign -= 1 + break + if vnodes_to_reassign <= 0: + break + moved_ids = 0 + for data_id in xrange(DATA_ID_COUNT): + data_id = str(data_id) + hsh = unpack_from('>I', md5(str(data_id)).digest())[0] + vnode_id = bisect_left(vnode_range_starts, + hsh % DATA_ID_COUNT) % VNODE_COUNT + node_id = vnode2node[vnode_id] + new_node_id = new_vnode2node[vnode_id] + if node_id != new_node_id: + moved_ids += 1 + percent_moved = 100.0 * moved_ids / DATA_ID_COUNT + print '%d ids moved, %.02f%%' % (moved_ids, percent_moved) + +:: + + 90423 ids moved, 0.90% + +There we go, we added 1% capacity and only moved 0.9% of existing data. +The vnode_range_starts list seems a bit out of place though. It’s values +are calculated and never change for the lifetime of the cluster, so let’s +optimize that out. + +.. code-block:: python + + from bisect import bisect_left + from hashlib import md5 + from struct import unpack_from + + NODE_COUNT = 100 + DATA_ID_COUNT = 10000000 + VNODE_COUNT = 1000 + + vnode2node = [] + for vnode_id in xrange(VNODE_COUNT): + vnode2node.append(vnode_id % NODE_COUNT) + new_vnode2node = list(vnode2node) + new_node_id = NODE_COUNT + vnodes_to_reassign = VNODE_COUNT / (NODE_COUNT + 1) + while vnodes_to_reassign > 0: + for node_to_take_from in xrange(NODE_COUNT): + for vnode_id, node_id in enumerate(vnode2node): + if node_id == node_to_take_from: + vnode2node[vnode_id] = new_node_id + vnodes_to_reassign -= 1 + break + if vnodes_to_reassign <= 0: + break + moved_ids = 0 + for data_id in xrange(DATA_ID_COUNT): + data_id = str(data_id) + hsh = unpack_from('>I', md5(str(data_id)).digest())[0] + vnode_id = hsh % VNODE_COUNT + node_id = vnode2node[vnode_id] + new_node_id = new_vnode2node[vnode_id] + if node_id != new_node_id: + moved_ids += 1 + percent_moved = 100.0 * moved_ids / DATA_ID_COUNT + print '%d ids moved, %.02f%%' % (moved_ids, percent_moved) + +:: + + 89841 ids moved, 0.90% + +There we go. In the next part of this series, will further examine the +algorithm’s limitations and how to improve on it. + +Part 3 +====== +In Part 2 of this series, we reached an algorithm that performed well +even when adding new nodes to the cluster. We used 1000 virtual nodes +that could be independently assigned to nodes, allowing us to minimize +the amount of data moved when a node was added. + +The number of virtual nodes puts a cap on how many real nodes you can +have. For example, if you have 1000 virtual nodes and you try to add a +1001st real node, you can’t assign a virtual node to it without leaving +another real node with no assignment, leaving you with just 1000 active +real nodes still. + +Unfortunately, the number of virtual nodes created at the beginning can +never change for the life of the cluster without a lot of careful work. +For example, you could double the virtual node count by splitting each +existing virtual node in half and assigning both halves to the same real +node. However, if the real node uses the virtual node’s id to optimally +store the data (for example, all data might be stored in /[virtual node +id]/[data id]) it would have to move data around locally to reflect the +change. And it would have to resolve data using both the new and old +locations while the moves were taking place, making atomic operations +difficult or impossible. + +Let’s continue with this assumption that changing the virtual node +count is more work than it’s worth, but keep in mind that some applications +might be fine with this. + +The easiest way to deal with this limitation is to make the limit high +enough that it won’t matter. For instance, if we decide our cluster will +never exceed 60,000 real nodes, we can just make 60,000 virtual nodes. + +Also, we should include in our calculations the relative size of our +nodes. For instance, a year from now we might have real nodes that can +handle twice the capacity of our current nodes. So we’d want to assign +twice the virtual nodes to those future nodes, so maybe we should raise +our virtual node estimate to 120,000. + +A good rule to follow might be to calculate 100 virtual nodes to each +real node at maximum capacity. This would allow you to alter the load +on any given node by 1%, even at max capacity, which is pretty fine +tuning. So now we’re at 6,000,000 virtual nodes for a max capacity cluster +of 60,000 real nodes. + +6 million virtual nodes seems like a lot, and it might seem like we’d +use up way too much memory. But the only structure this affects is the +virtual node to real node mapping. The base amount of memory required +would be 6 million times 2 bytes (to store a real node id from 0 to +65,535). 12 megabytes of memory just isn’t that much to use these days. + +Even with all the overhead of flexible data types, things aren’t that +bad. I changed the code from the previous part in this series to have +60,000 real and 6,000,000 virtual nodes, changed the list to an array(‘H’), +and python topped out at 27m of resident memory – and that includes two +rings. + +To change terminology a bit, we’re going to start calling these virtual +nodes “partitions”. This will make it a bit easier to discern between the +two types of nodes we’ve been talking about so far. Also, it makes sense +to talk about partitions as they are really just unchanging sections +of the hash space. + +We’re also going to always keep the partition count a power of two. This +makes it easy to just use bit manipulation on the hash to determine the +partition rather than modulus. It isn’t much faster, but it is a little. +So, here’s our updated ring code, using 8,388,608 (2 ** 23) partitions +and 65,536 nodes. We’ve upped the sample data id set and checked the +distribution to make sure we haven’t broken anything. + +.. code-block:: python + + from array import array + from hashlib import md5 + from struct import unpack_from + + PARTITION_POWER = 23 + PARTITION_SHIFT = 32 - PARTITION_POWER + NODE_COUNT = 65536 + DATA_ID_COUNT = 100000000 + + part2node = array('H') + for part in xrange(2 ** PARTITION_POWER): + part2node.append(part % NODE_COUNT) + node_counts = [0] * NODE_COUNT + for data_id in xrange(DATA_ID_COUNT): + data_id = str(data_id) + part = unpack_from('>I', + md5(str(data_id)).digest())[0] >> PARTITION_SHIFT + node_id = part2node[part] + node_counts[node_id] += 1 + desired_count = DATA_ID_COUNT / NODE_COUNT + print '%d: Desired data ids per node' % desired_count + max_count = max(node_counts) + over = 100.0 * (max_count - desired_count) / desired_count + print '%d: Most data ids on one node, %.02f%% over' % \ + (max_count, over) + min_count = min(node_counts) + under = 100.0 * (desired_count - min_count) / desired_count + print '%d: Least data ids on one node, %.02f%% under' % \ + (min_count, under) + +:: + + 1525: Desired data ids per node + 1683: Most data ids on one node, 10.36% over + 1360: Least data ids on one node, 10.82% under + +Hmm. +–10% seems a bit high, but I reran with 65,536 partitions and +256 nodes and got +–0.4% so it’s just that our sample size (100m) is +too small for our number of partitions (8m). It’ll take way too long +to run experiments with an even larger sample size, so let’s reduce +back down to these lesser numbers. (To be certain, I reran at the full +version with a 10 billion data id sample set and got +–1%, but it took +6.5 hours to run.) + +In the next part of this series, we’ll talk about how to increase the +durability of our data in the cluster. + +Part 4 +====== +In Part 3 of this series, we just further discussed partitions (virtual +nodes) and cleaned up our code a bit based on that. Now, let’s talk +about how to increase the durability and availability of our data in the +cluster. + +For many distributed data stores, durability is quite important. Either +RAID arrays or individually distinct copies of data are required. While +RAID will increase the durability, it does nothing to increase the +availability – if the RAID machine crashes, the data may be safe but +inaccessible until repairs are done. If we keep distinct copies of the +data on different machines and a machine crashes, the other copies will +still be available while we repair the broken machine. + +An easy way to gain this multiple copy durability/availability is to +just use multiple rings and groups of nodes. For instance, to achieve +the industry standard of three copies, you’d split the nodes into three +groups and each group would have its own ring and each would receive a +copy of each data item. This can work well enough, but has the drawback +that expanding capacity requires adding three nodes at a time and that +losing one node essentially lowers capacity by three times that node’s +capacity. + +Instead, let’s use a different, but common, approach of meeting our +requirements with a single ring. This can be done by walking the ring +from the starting point and looking for additional distinct nodes. +Here’s code that supports a variable number of replicas (set to 3 for +testing): + +.. code-block:: python + + from array import array + from hashlib import md5 + from struct import unpack_from + + REPLICAS = 3 + PARTITION_POWER = 16 + PARTITION_SHIFT = 32 - PARTITION_POWER + PARTITION_MAX = 2 ** PARTITION_POWER - 1 + NODE_COUNT = 256 + DATA_ID_COUNT = 10000000 + + part2node = array('H') + for part in xrange(2 ** PARTITION_POWER): + part2node.append(part % NODE_COUNT) + node_counts = [0] * NODE_COUNT + for data_id in xrange(DATA_ID_COUNT): + data_id = str(data_id) + part = unpack_from('>I', + md5(str(data_id)).digest())[0] >> PARTITION_SHIFT + node_ids = [part2node[part]] + node_counts[node_ids[0]] += 1 + for replica in xrange(1, REPLICAS): + while part2node[part] in node_ids: + part += 1 + if part > PARTITION_MAX: + part = 0 + node_ids.append(part2node[part]) + node_counts[node_ids[-1]] += 1 + desired_count = DATA_ID_COUNT / NODE_COUNT * REPLICAS + print '%d: Desired data ids per node' % desired_count + max_count = max(node_counts) + over = 100.0 * (max_count - desired_count) / desired_count + print '%d: Most data ids on one node, %.02f%% over' % \ + (max_count, over) + min_count = min(node_counts) + under = 100.0 * (desired_count - min_count) / desired_count + print '%d: Least data ids on one node, %.02f%% under' % \ + (min_count, under) + +:: + + 117186: Desired data ids per node + 118133: Most data ids on one node, 0.81% over + 116093: Least data ids on one node, 0.93% under + +That’s pretty good; less than 1% over/under. While this works well, +there are a couple of problems. + +First, because of how we’ve initially assigned the partitions to nodes, +all the partitions for a given node have their extra copies on the same +other two nodes. The problem here is that when a machine fails, the load +on these other nodes will jump by that amount. It’d be better if we +initially shuffled the partition assignment to distribute the failover +load better. + +The other problem is a bit harder to explain, but deals with physical +separation of machines. Imagine you can only put 16 machines in a rack +in your datacenter. The 256 nodes we’ve been using would fill 16 racks. +With our current code, if a rack goes out (power problem, network issue, +etc.) there is a good chance some data will have all three copies in that +rack, becoming inaccessible. We can fix this shortcoming by adding the +concept of zones to our nodes, and then ensuring that replicas are stored +in distinct zones. + +.. code-block:: python + + from array import array + from hashlib import md5 + from random import shuffle + from struct import unpack_from + + REPLICAS = 3 + PARTITION_POWER = 16 + PARTITION_SHIFT = 32 - PARTITION_POWER + PARTITION_MAX = 2 ** PARTITION_POWER - 1 + NODE_COUNT = 256 + ZONE_COUNT = 16 + DATA_ID_COUNT = 10000000 + + node2zone = [] + while len(node2zone) < NODE_COUNT: + zone = 0 + while zone < ZONE_COUNT and len(node2zone) < NODE_COUNT: + node2zone.append(zone) + zone += 1 + part2node = array('H') + for part in xrange(2 ** PARTITION_POWER): + part2node.append(part % NODE_COUNT) + shuffle(part2node) + node_counts = [0] * NODE_COUNT + zone_counts = [0] * ZONE_COUNT + for data_id in xrange(DATA_ID_COUNT): + data_id = str(data_id) + part = unpack_from('>I', + md5(str(data_id)).digest())[0] >> PARTITION_SHIFT + node_ids = [part2node[part]] + zones = [node2zone[node_ids[0]]] + node_counts[node_ids[0]] += 1 + zone_counts[zones[0]] += 1 + for replica in xrange(1, REPLICAS): + while part2node[part] in node_ids and \ + node2zone[part2node[part]] in zones: + part += 1 + if part > PARTITION_MAX: + part = 0 + node_ids.append(part2node[part]) + zones.append(node2zone[node_ids[-1]]) + node_counts[node_ids[-1]] += 1 + zone_counts[zones[-1]] += 1 + desired_count = DATA_ID_COUNT / NODE_COUNT * REPLICAS + print '%d: Desired data ids per node' % desired_count + max_count = max(node_counts) + over = 100.0 * (max_count - desired_count) / desired_count + print '%d: Most data ids on one node, %.02f%% over' % \ + (max_count, over) + min_count = min(node_counts) + under = 100.0 * (desired_count - min_count) / desired_count + print '%d: Least data ids on one node, %.02f%% under' % \ + (min_count, under) + desired_count = DATA_ID_COUNT / ZONE_COUNT * REPLICAS + print '%d: Desired data ids per zone' % desired_count + max_count = max(zone_counts) + over = 100.0 * (max_count - desired_count) / desired_count + print '%d: Most data ids in one zone, %.02f%% over' % \ + (max_count, over) + min_count = min(zone_counts) + under = 100.0 * (desired_count - min_count) / desired_count + print '%d: Least data ids in one zone, %.02f%% under' % \ + (min_count, under) + +:: + + 117186: Desired data ids per node + 118782: Most data ids on one node, 1.36% over + 115632: Least data ids on one node, 1.33% under + 1875000: Desired data ids per zone + 1878533: Most data ids in one zone, 0.19% over + 1869070: Least data ids in one zone, 0.32% under + +So the shuffle and zone distinctions affected our distribution some, +but still definitely good enough. This test took about 64 seconds to +run on my machine. + +There’s a completely alternate, and quite common, way of accomplishing +these same requirements. This alternate method doesn’t use partitions +at all, but instead just assigns anchors to the nodes within the hash +space. Finding the first node for a given hash just involves walking +this anchor ring for the next node, and finding additional nodes works +similarly as before. To attain the equivalent of our virtual nodes, +each real node is assigned multiple anchors. + +.. code-block:: python + + from bisect import bisect_left + from hashlib import md5 + from struct import unpack_from + + REPLICAS = 3 + NODE_COUNT = 256 + ZONE_COUNT = 16 + DATA_ID_COUNT = 10000000 + VNODE_COUNT = 100 + + node2zone = [] + while len(node2zone) < NODE_COUNT: + zone = 0 + while zone < ZONE_COUNT and len(node2zone) < NODE_COUNT: + node2zone.append(zone) + zone += 1 + hash2index = [] + index2node = [] + for node in xrange(NODE_COUNT): + for vnode in xrange(VNODE_COUNT): + hsh = unpack_from('>I', md5(str(node)).digest())[0] + index = bisect_left(hash2index, hsh) + if index > len(hash2index): + index = 0 + hash2index.insert(index, hsh) + index2node.insert(index, node) + node_counts = [0] * NODE_COUNT + zone_counts = [0] * ZONE_COUNT + for data_id in xrange(DATA_ID_COUNT): + data_id = str(data_id) + hsh = unpack_from('>I', md5(str(data_id)).digest())[0] + index = bisect_left(hash2index, hsh) + if index >= len(hash2index): + index = 0 + node_ids = [index2node[index]] + zones = [node2zone[node_ids[0]]] + node_counts[node_ids[0]] += 1 + zone_counts[zones[0]] += 1 + for replica in xrange(1, REPLICAS): + while index2node[index] in node_ids and \ + node2zone[index2node[index]] in zones: + index += 1 + if index >= len(hash2index): + index = 0 + node_ids.append(index2node[index]) + zones.append(node2zone[node_ids[-1]]) + node_counts[node_ids[-1]] += 1 + zone_counts[zones[-1]] += 1 + desired_count = DATA_ID_COUNT / NODE_COUNT * REPLICAS + print '%d: Desired data ids per node' % desired_count + max_count = max(node_counts) + over = 100.0 * (max_count - desired_count) / desired_count + print '%d: Most data ids on one node, %.02f%% over' % \ + (max_count, over) + min_count = min(node_counts) + under = 100.0 * (desired_count - min_count) / desired_count + print '%d: Least data ids on one node, %.02f%% under' % \ + (min_count, under) + desired_count = DATA_ID_COUNT / ZONE_COUNT * REPLICAS + print '%d: Desired data ids per zone' % desired_count + max_count = max(zone_counts) + over = 100.0 * (max_count - desired_count) / desired_count + print '%d: Most data ids in one zone, %.02f%% over' % \ + (max_count, over) + min_count = min(zone_counts) + under = 100.0 * (desired_count - min_count) / desired_count + print '%d: Least data ids in one zone, %.02f%% under' % \ + (min_count, under) + +:: + + 117186: Desired data ids per node + 351282: Most data ids on one node, 199.76% over + 15965: Least data ids on one node, 86.38% under + 1875000: Desired data ids per zone + 2248496: Most data ids in one zone, 19.92% over + 1378013: Least data ids in one zone, 26.51% under + +This test took over 15 minutes to run! Unfortunately, this method also +gives much less control over the distribution. To get better distribution, +you have to add more virtual nodes, which eats up more memory and takes +even more time to build the ring and perform distinct node lookups. The +most common operation, data id lookup, can be improved (by predetermining +each virtual nodes’ failover nodes, for instance) but it starts off so +far behind our first approach that we’ll just stick with that. + +In the next part of this series, we’ll start to wrap all this up into +a useful Python module. + +Part 5 +====== +In Part 4 of this series, we ended up with a multiple copy, distinctly +zoned ring. Or at least the start of it. In this final part we’ll package +the code up into a useable Python module and then add one last feature. +First, let’s separate the ring itself from the building of the data for +the ring and its testing. + +.. code-block:: python + + from array import array + from hashlib import md5 + from random import shuffle + from struct import unpack_from + from time import time + + class Ring(object): + + def __init__(self, nodes, part2node, replicas): + self.nodes = nodes + self.part2node = part2node + self.replicas = replicas + partition_power = 1 + while 2 ** partition_power < len(part2node): + partition_power += 1 + if len(part2node) != 2 ** partition_power: + raise Exception("part2node's length is not an " + "exact power of 2") + self.partition_shift = 32 - partition_power + + def get_nodes(self, data_id): + data_id = str(data_id) + part = unpack_from('>I', + md5(data_id).digest())[0] >> self.partition_shift + node_ids = [self.part2node[part]] + zones = [self.nodes[node_ids[0]]] + for replica in xrange(1, self.replicas): + while self.part2node[part] in node_ids and \ + self.nodes[self.part2node[part]] in zones: + part += 1 + if part >= len(self.part2node): + part = 0 + node_ids.append(self.part2node[part]) + zones.append(self.nodes[node_ids[-1]]) + return [self.nodes[n] for n in node_ids] + + def build_ring(nodes, partition_power, replicas): + begin = time() + part2node = array('H') + for part in xrange(2 ** partition_power): + part2node.append(part % len(nodes)) + shuffle(part2node) + ring = Ring(nodes, part2node, replicas) + print '%.02fs to build ring' % (time() - begin) + return ring + + def test_ring(ring): + begin = time() + DATA_ID_COUNT = 10000000 + node_counts = {} + zone_counts = {} + for data_id in xrange(DATA_ID_COUNT): + for node in ring.get_nodes(data_id): + node_counts[node['id']] = \ + node_counts.get(node['id'], 0) + 1 + zone_counts[node['zone']] = \ + zone_counts.get(node['zone'], 0) + 1 + print '%ds to test ring' % (time() - begin) + desired_count = \ + DATA_ID_COUNT / len(ring.nodes) * REPLICAS + print '%d: Desired data ids per node' % desired_count + max_count = max(node_counts.itervalues()) + over = \ + 100.0 * (max_count - desired_count) / desired_count + print '%d: Most data ids on one node, %.02f%% over' % \ + (max_count, over) + min_count = min(node_counts.itervalues()) + under = \ + 100.0 * (desired_count - min_count) / desired_count + print '%d: Least data ids on one node, %.02f%% under' % \ + (min_count, under) + zone_count = \ + len(set(n['zone'] for n in ring.nodes.itervalues())) + desired_count = \ + DATA_ID_COUNT / zone_count * ring.replicas + print '%d: Desired data ids per zone' % desired_count + max_count = max(zone_counts.itervalues()) + over = \ + 100.0 * (max_count - desired_count) / desired_count + print '%d: Most data ids in one zone, %.02f%% over' % \ + (max_count, over) + min_count = min(zone_counts.itervalues()) + under = \ + 100.0 * (desired_count - min_count) / desired_count + print '%d: Least data ids in one zone, %.02f%% under' % \ + (min_count, under) + + if __name__ == '__main__': + PARTITION_POWER = 16 + REPLICAS = 3 + NODE_COUNT = 256 + ZONE_COUNT = 16 + nodes = {} + while len(nodes) < NODE_COUNT: + zone = 0 + while zone < ZONE_COUNT and len(nodes) < NODE_COUNT: + node_id = len(nodes) + nodes[node_id] = {'id': node_id, 'zone': zone} + zone += 1 + ring = build_ring(nodes, PARTITION_POWER, REPLICAS) + test_ring(ring) + +:: + + 0.06s to build ring + 82s to test ring + 117186: Desired data ids per node + 118773: Most data ids on one node, 1.35% over + 115801: Least data ids on one node, 1.18% under + 1875000: Desired data ids per zone + 1878339: Most data ids in one zone, 0.18% over + 1869914: Least data ids in one zone, 0.27% under + +It takes a bit longer to test our ring, but that’s mostly because of +the switch to dictionaries from arrays for various items. Having node +dictionaries is nice because you can attach any node information you +want directly there (ip addresses, tcp ports, drive paths, etc.). But +we’re still on track for further testing; our distribution is still good. + +Now, let’s add our one last feature to our ring: the concept of weights. +Weights are useful because the nodes you add later in a ring’s life are +likely to have more capacity than those you have at the outset. For this +test, we’ll make half our nodes have twice the weight. We’ll have to +change build_ring to give more partitions to the nodes with more weight +and we’ll change test_ring to take into account these weights. Since +we’ve changed so much I’ll just post the entire module again: + +.. code-block:: python + + from array import array + from hashlib import md5 + from random import shuffle + from struct import unpack_from + from time import time + + class Ring(object): + + def __init__(self, nodes, part2node, replicas): + self.nodes = nodes + self.part2node = part2node + self.replicas = replicas + partition_power = 1 + while 2 ** partition_power < len(part2node): + partition_power += 1 + if len(part2node) != 2 ** partition_power: + raise Exception("part2node's length is not an " + "exact power of 2") + self.partition_shift = 32 - partition_power + + def get_nodes(self, data_id): + data_id = str(data_id) + part = unpack_from('>I', + md5(data_id).digest())[0] >> self.partition_shift + node_ids = [self.part2node[part]] + zones = [self.nodes[node_ids[0]]] + for replica in xrange(1, self.replicas): + while self.part2node[part] in node_ids and \ + self.nodes[self.part2node[part]] in zones: + part += 1 + if part >= len(self.part2node): + part = 0 + node_ids.append(self.part2node[part]) + zones.append(self.nodes[node_ids[-1]]) + return [self.nodes[n] for n in node_ids] + + def build_ring(nodes, partition_power, replicas): + begin = time() + parts = 2 ** partition_power + total_weight = \ + float(sum(n['weight'] for n in nodes.itervalues())) + for node in nodes.itervalues(): + node['desired_parts'] = \ + parts / total_weight * node['weight'] + part2node = array('H') + for part in xrange(2 ** partition_power): + for node in nodes.itervalues(): + if node['desired_parts'] >= 1: + node['desired_parts'] -= 1 + part2node.append(node['id']) + break + else: + for node in nodes.itervalues(): + if node['desired_parts'] >= 0: + node['desired_parts'] -= 1 + part2node.append(node['id']) + break + shuffle(part2node) + ring = Ring(nodes, part2node, replicas) + print '%.02fs to build ring' % (time() - begin) + return ring + + def test_ring(ring): + begin = time() + DATA_ID_COUNT = 10000000 + node_counts = {} + zone_counts = {} + for data_id in xrange(DATA_ID_COUNT): + for node in ring.get_nodes(data_id): + node_counts[node['id']] = \ + node_counts.get(node['id'], 0) + 1 + zone_counts[node['zone']] = \ + zone_counts.get(node['zone'], 0) + 1 + print '%ds to test ring' % (time() - begin) + total_weight = float(sum(n['weight'] for n in + ring.nodes.itervalues())) + max_over = 0 + max_under = 0 + for node in ring.nodes.itervalues(): + desired = DATA_ID_COUNT * REPLICAS * \ + node['weight'] / total_weight + diff = node_counts[node['id']] - desired + if diff > 0: + over = 100.0 * diff / desired + if over > max_over: + max_over = over + else: + under = 100.0 * (-diff) / desired + if under > max_under: + max_under = under + print '%.02f%% max node over' % max_over + print '%.02f%% max node under' % max_under + max_over = 0 + max_under = 0 + for zone in set(n['zone'] for n in + ring.nodes.itervalues()): + zone_weight = sum(n['weight'] for n in + ring.nodes.itervalues() if n['zone'] == zone) + desired = DATA_ID_COUNT * REPLICAS * \ + zone_weight / total_weight + diff = zone_counts[zone] - desired + if diff > 0: + over = 100.0 * diff / desired + if over > max_over: + max_over = over + else: + under = 100.0 * (-diff) / desired + if under > max_under: + max_under = under + print '%.02f%% max zone over' % max_over + print '%.02f%% max zone under' % max_under + + if __name__ == '__main__': + PARTITION_POWER = 16 + REPLICAS = 3 + NODE_COUNT = 256 + ZONE_COUNT = 16 + nodes = {} + while len(nodes) < NODE_COUNT: + zone = 0 + while zone < ZONE_COUNT and len(nodes) < NODE_COUNT: + node_id = len(nodes) + nodes[node_id] = {'id': node_id, 'zone': zone, + 'weight': 1.0 + (node_id % 2)} + zone += 1 + ring = build_ring(nodes, PARTITION_POWER, REPLICAS) + test_ring(ring) + +:: + + 0.88s to build ring + 86s to test ring + 1.66% max over + 1.46% max under + 0.28% max zone over + 0.23% max zone under + +So things are still good, even though we have differently weighted nodes. +I ran another test with this code using random weights from 1 to 100 and +got over/under values for nodes of 7.35%/18.12% and zones of 0.24%/0.22%, +still pretty good considering the crazy weight ranges. + +Summary +======= +Hopefully this series has been a good introduction to building a ring. +This code is essentially how the OpenStack Swift ring works, except that +Swift’s ring has lots of additional optimizations, such as storing each +replica assignment separately, and lots of extra features for building, +validating, and otherwise working with rings. From 46d61a4dcd9a5d9157625c06d6fe7d916e80c3d2 Mon Sep 17 00:00:00 2001 From: Prashanth Pai Date: Wed, 18 Feb 2015 11:59:31 +0530 Subject: [PATCH 114/141] Refactor server side copy as middleware Rewrite server side copy and 'object post as copy' feature as middleware to simplify the PUT method in the object controller code. COPY is no longer a verb implemented as public method in Proxy application. The server side copy middleware is inserted to the left of dlo, slo and versioned_writes middlewares in the proxy server pipeline. As a result, dlo and slo copy_hooks are no longer required. SLO manifests are now validated when copied so when copying a manifest to another account the referenced segments must be readable in that account for the manifest copy to succeed (previously this validation was not made, meaning the manifest was copied but could be unusable if the segments were not readable). With this change, there should be no change in functionality or existing behavior. This is asserted with (almost) no changes required to existing functional tests. Some notes (for operators): * Middleware required to be auto-inserted before slo and dlo and versioned_writes * Turning off server side copy is not configurable. * object_post_as_copy is no longer a configurable option of proxy server but of this middleware. However, for smooth upgrade, config option set in proxy server app is also read. DocImpact: Introducing server side copy as middleware Co-Authored-By: Alistair Coles Co-Authored-By: Thiago da Silva Change-Id: Ic96a92e938589a2f6add35a40741fd062f1c29eb Signed-off-by: Prashanth Pai Signed-off-by: Thiago da Silva --- doc/saio/swift/proxy-server.conf | 5 +- doc/source/logs.rst | 1 + doc/source/middleware.rst | 9 + etc/proxy-server.conf-sample | 20 +- setup.cfg | 1 + swift/common/constraints.py | 62 - swift/common/middleware/account_quotas.py | 22 +- swift/common/middleware/container_quotas.py | 36 +- swift/common/middleware/copy.py | 522 ++++++++ swift/common/middleware/dlo.py | 23 - swift/common/middleware/slo.py | 19 - swift/common/middleware/versioned_writes.py | 32 +- swift/common/swob.py | 5 + swift/common/wsgi.py | 2 +- swift/proxy/controllers/obj.py | 317 +---- swift/proxy/server.py | 15 +- test/functional/tests.py | 94 +- test/unit/common/middleware/helpers.py | 6 + .../common/middleware/test_account_quotas.py | 169 +-- test/unit/common/middleware/test_copy.py | 1183 +++++++++++++++++ test/unit/common/middleware/test_dlo.py | 101 -- test/unit/common/middleware/test_quotas.py | 323 ++--- test/unit/common/middleware/test_slo.py | 66 +- .../middleware/test_versioned_writes.py | 225 ++-- test/unit/common/test_constraints.py | 81 -- test/unit/common/test_swob.py | 13 +- test/unit/common/test_wsgi.py | 13 +- test/unit/proxy/controllers/test_obj.py | 209 +-- test/unit/proxy/test_server.py | 950 +------------ test/unit/proxy/test_sysmeta.py | 91 -- 30 files changed, 2301 insertions(+), 2314 deletions(-) create mode 100644 swift/common/middleware/copy.py create mode 100644 test/unit/common/middleware/test_copy.py diff --git a/doc/saio/swift/proxy-server.conf b/doc/saio/swift/proxy-server.conf index d9e5c95148..76b85d5818 100644 --- a/doc/saio/swift/proxy-server.conf +++ b/doc/saio/swift/proxy-server.conf @@ -9,7 +9,7 @@ eventlet_debug = true [pipeline:main] # Yes, proxy-logging appears twice. This is so that # middleware-originated requests get logged too. -pipeline = catch_errors gatekeeper healthcheck proxy-logging cache bulk tempurl ratelimit crossdomain container_sync tempauth staticweb container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server +pipeline = catch_errors gatekeeper healthcheck proxy-logging cache bulk tempurl ratelimit crossdomain container_sync tempauth staticweb copy container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server [filter:catch_errors] use = egg:swift#catch_errors @@ -68,6 +68,9 @@ use = egg:swift#gatekeeper use = egg:swift#versioned_writes allow_versioned_writes = true +[filter:copy] +use = egg:swift#copy + [app:proxy-server] use = egg:swift#proxy allow_account_management = true diff --git a/doc/source/logs.rst b/doc/source/logs.rst index 75b669f1a5..7e2c1dd94b 100644 --- a/doc/source/logs.rst +++ b/doc/source/logs.rst @@ -103,6 +103,7 @@ LE :ref:`list_endpoints` KS :ref:`keystoneauth` RL :ref:`ratelimit` VW :ref:`versioned_writes` +SSC :ref:`copy` ======================= ============================= diff --git a/doc/source/middleware.rst b/doc/source/middleware.rst index 3c17339b17..a078747204 100644 --- a/doc/source/middleware.rst +++ b/doc/source/middleware.rst @@ -187,6 +187,15 @@ Recon :members: :show-inheritance: +.. _copy: + +Server Side Copy +================ + +.. automodule:: swift.common.middleware.copy + :members: + :show-inheritance: + Static Large Objects ==================== diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample index ba860b9b9d..b5cfbf873b 100644 --- a/etc/proxy-server.conf-sample +++ b/etc/proxy-server.conf-sample @@ -79,12 +79,12 @@ bind_port = 8080 [pipeline:main] # This sample pipeline uses tempauth and is used for SAIO dev work and # testing. See below for a pipeline using keystone. -pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server +pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth copy container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server # The following pipeline shows keystone integration. Comment out the one # above and uncomment this one. Additional steps for integrating keystone are # covered further below in the filter sections for authtoken and keystoneauth. -#pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server +#pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit authtoken keystoneauth copy container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server [app:proxy-server] use = egg:swift#proxy @@ -129,11 +129,6 @@ use = egg:swift#proxy # 'false' no one, even authorized, can. # allow_account_management = false # -# Set object_post_as_copy = false to turn on fast posts where only the metadata -# changes are stored anew and the original data file is kept in place. This -# makes for quicker posts. -# object_post_as_copy = true -# # If set to 'true' authorized accounts that do not yet exist within the Swift # cluster will be automatically created. # account_autocreate = false @@ -749,3 +744,14 @@ use = egg:swift#versioned_writes # in the container configuration file, which will be eventually # deprecated. See documentation for more details. # allow_versioned_writes = false + +# Note: Put after auth and before dlo and slo middlewares. +# If you don't put it in the pipeline, it will be inserted for you. +[filter:copy] +use = egg:swift#copy +# Set object_post_as_copy = false to turn on fast posts where only the metadata +# changes are stored anew and the original data file is kept in place. This +# makes for quicker posts. +# When object_post_as_copy is set to True, a POST request will be transformed +# into a COPY request where source and destination objects are the same. +# object_post_as_copy = true diff --git a/setup.cfg b/setup.cfg index 77c6824b44..098b6c64f7 100644 --- a/setup.cfg +++ b/setup.cfg @@ -96,6 +96,7 @@ paste.filter_factory = container_sync = swift.common.middleware.container_sync:filter_factory xprofile = swift.common.middleware.xprofile:filter_factory versioned_writes = swift.common.middleware.versioned_writes:filter_factory + copy = swift.common.middleware.copy:filter_factory [build_sphinx] all_files = 1 diff --git a/swift/common/constraints.py b/swift/common/constraints.py index abfab4bb9e..787d2d91da 100644 --- a/swift/common/constraints.py +++ b/swift/common/constraints.py @@ -20,7 +20,6 @@ import time import six from six.moves.configparser import ConfigParser, NoSectionError, NoOptionError from six.moves import urllib -from six.moves.urllib.parse import unquote from swift.common import utils, exceptions from swift.common.swob import HTTPBadRequest, HTTPLengthRequired, \ @@ -205,10 +204,6 @@ def check_object_creation(req, object_name): request=req, content_type='text/plain') - if 'X-Copy-From' in req.headers and req.content_length: - return HTTPBadRequest(body='Copy requests require a zero byte body', - request=req, content_type='text/plain') - if len(object_name) > MAX_OBJECT_NAME_LENGTH: return HTTPBadRequest(body='Object name length of %d longer than %d' % (len(object_name), MAX_OBJECT_NAME_LENGTH), @@ -359,63 +354,6 @@ def check_utf8(string): return False -def check_path_header(req, name, length, error_msg): - """ - Validate that the value of path-like header is - well formatted. We assume the caller ensures that - specific header is present in req.headers. - - :param req: HTTP request object - :param name: header name - :param length: length of path segment check - :param error_msg: error message for client - :returns: A tuple with path parts according to length - :raise: HTTPPreconditionFailed if header value - is not well formatted. - """ - src_header = unquote(req.headers.get(name)) - if not src_header.startswith('/'): - src_header = '/' + src_header - try: - return utils.split_path(src_header, length, length, True) - except ValueError: - raise HTTPPreconditionFailed( - request=req, - body=error_msg) - - -def check_copy_from_header(req): - """ - Validate that the value from x-copy-from header is - well formatted. We assume the caller ensures that - x-copy-from header is present in req.headers. - - :param req: HTTP request object - :returns: A tuple with container name and object name - :raise: HTTPPreconditionFailed if x-copy-from value - is not well formatted. - """ - return check_path_header(req, 'X-Copy-From', 2, - 'X-Copy-From header must be of the form ' - '/') - - -def check_destination_header(req): - """ - Validate that the value from destination header is - well formatted. We assume the caller ensures that - destination header is present in req.headers. - - :param req: HTTP request object - :returns: A tuple with container name and object name - :raise: HTTPPreconditionFailed if destination value - is not well formatted. - """ - return check_path_header(req, 'Destination', 2, - 'Destination header must be of the form ' - '/') - - def check_name_format(req, name, target_type): """ Validate that the header contains valid account or container name. diff --git a/swift/common/middleware/account_quotas.py b/swift/common/middleware/account_quotas.py index fcb55b5573..8811aad84c 100644 --- a/swift/common/middleware/account_quotas.py +++ b/swift/common/middleware/account_quotas.py @@ -52,11 +52,10 @@ Due to the eventual consistency further uploads might be possible until the account size has been updated. """ -from swift.common.constraints import check_copy_from_header from swift.common.swob import HTTPForbidden, HTTPBadRequest, \ HTTPRequestEntityTooLarge, wsgify from swift.common.utils import register_swift_info -from swift.proxy.controllers.base import get_account_info, get_object_info +from swift.proxy.controllers.base import get_account_info class AccountQuotaMiddleware(object): @@ -71,7 +70,7 @@ class AccountQuotaMiddleware(object): @wsgify def __call__(self, request): - if request.method not in ("POST", "PUT", "COPY"): + if request.method not in ("POST", "PUT"): return self.app try: @@ -106,15 +105,6 @@ class AccountQuotaMiddleware(object): if request.method == "POST" or not obj: return self.app - if request.method == 'COPY': - copy_from = container + '/' + obj - else: - if 'x-copy-from' in request.headers: - src_cont, src_obj = check_copy_from_header(request) - copy_from = "%s/%s" % (src_cont, src_obj) - else: - copy_from = None - content_length = (request.content_length or 0) account_info = get_account_info(request.environ, self.app) @@ -127,14 +117,6 @@ class AccountQuotaMiddleware(object): if quota < 0: return self.app - if copy_from: - path = '/' + ver + '/' + account + '/' + copy_from - object_info = get_object_info(request.environ, self.app, path) - if not object_info or not object_info['length']: - content_length = 0 - else: - content_length = int(object_info['length']) - new_size = int(account_info['bytes']) + content_length if quota < new_size: resp = HTTPRequestEntityTooLarge(body='Upload exceeds quota.') diff --git a/swift/common/middleware/container_quotas.py b/swift/common/middleware/container_quotas.py index 4feca69a7b..a78876aca5 100644 --- a/swift/common/middleware/container_quotas.py +++ b/swift/common/middleware/container_quotas.py @@ -51,13 +51,11 @@ For example:: [filter:container_quotas] use = egg:swift#container_quotas """ -from swift.common.constraints import check_copy_from_header, \ - check_account_format, check_destination_header from swift.common.http import is_success from swift.common.swob import HTTPRequestEntityTooLarge, HTTPBadRequest, \ wsgify from swift.common.utils import register_swift_info -from swift.proxy.controllers.base import get_container_info, get_object_info +from swift.proxy.controllers.base import get_container_info class ContainerQuotaMiddleware(object): @@ -91,25 +89,9 @@ class ContainerQuotaMiddleware(object): return HTTPBadRequest(body='Invalid count quota.') # check user uploads against quotas - elif obj and req.method in ('PUT', 'COPY'): - container_info = None - if req.method == 'PUT': - container_info = get_container_info( - req.environ, self.app, swift_source='CQ') - if req.method == 'COPY' and 'Destination' in req.headers: - dest_account = account - if 'Destination-Account' in req.headers: - dest_account = req.headers.get('Destination-Account') - dest_account = check_account_format(req, dest_account) - dest_container, dest_object = check_destination_header(req) - path_info = req.environ['PATH_INFO'] - req.environ['PATH_INFO'] = "/%s/%s/%s/%s" % ( - version, dest_account, dest_container, dest_object) - try: - container_info = get_container_info( - req.environ, self.app, swift_source='CQ') - finally: - req.environ['PATH_INFO'] = path_info + elif obj and req.method in ('PUT'): + container_info = get_container_info( + req.environ, self.app, swift_source='CQ') if not container_info or not is_success(container_info['status']): # this will hopefully 404 later return self.app @@ -118,16 +100,6 @@ class ContainerQuotaMiddleware(object): 'bytes' in container_info and \ container_info['meta']['quota-bytes'].isdigit(): content_length = (req.content_length or 0) - if 'x-copy-from' in req.headers or req.method == 'COPY': - if 'x-copy-from' in req.headers: - container, obj = check_copy_from_header(req) - path = '/%s/%s/%s/%s' % (version, account, - container, obj) - object_info = get_object_info(req.environ, self.app, path) - if not object_info or not object_info['length']: - content_length = 0 - else: - content_length = int(object_info['length']) new_size = int(container_info['bytes']) + content_length if int(container_info['meta']['quota-bytes']) < new_size: return self.bad_response(req, container_info) diff --git a/swift/common/middleware/copy.py b/swift/common/middleware/copy.py new file mode 100644 index 0000000000..e895813e8d --- /dev/null +++ b/swift/common/middleware/copy.py @@ -0,0 +1,522 @@ +# Copyright (c) 2015 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Server side copy is a feature that enables users/clients to COPY objects +between accounts and containers without the need to download and then +re-upload objects, thus eliminating additional bandwidth consumption and +also saving time. This may be used when renaming/moving an object which +in Swift is a (COPY + DELETE) operation. + +The server side copy middleware should be inserted in the pipeline after auth +and before the quotas and large object middlewares. If it is not present in the +pipeline in the proxy-server configuration file, it will be inserted +automatically. There is no configurable option provided to turn off server +side copy. + +-------- +Metadata +-------- +* All metadata of source object is preserved during object copy. +* One can also provide additional metadata during PUT/COPY request. This will + over-write any existing conflicting keys. +* Server side copy can also be used to change content-type of an existing + object. + +----------- +Object Copy +----------- +* The destination container must exist before requesting copy of the object. +* When several replicas exist, the system copies from the most recent replica. + That is, the copy operation behaves as though the X-Newest header is in the + request. +* The request to copy an object should have no body (i.e. content-length of the + request must be zero). + +There are two ways in which an object can be copied: + +1. Send a PUT request to the new object (destination/target) with an additional + header named ``X-Copy-From`` specifying the source object + (in '/container/object' format). Example:: + + curl -i -X PUT http:///container1/destination_obj + -H 'X-Auth-Token: ' + -H 'X-Copy-From: /container2/source_obj' + -H 'Content-Length: 0' + +2. Send a COPY request with an existing object in URL with an additional header + named ``Destination`` specifying the destination/target object + (in '/container/object' format). Example:: + + curl -i -X COPY http:///container2/source_obj + -H 'X-Auth-Token: ' + -H 'Destination: /container1/destination_obj' + -H 'Content-Length: 0' + +Note that if the incoming request has some conditional headers (e.g. ``Range``, +``If-Match``), the *source* object will be evaluated for these headers (i.e. if +PUT with both ``X-Copy-From`` and ``Range``, Swift will make a partial copy to +the destination object). + +------------------------- +Cross Account Object Copy +------------------------- +Objects can also be copied from one account to another account if the user +has the necessary permissions (i.e. permission to read from container +in source account and permission to write to container in destination account). + +Similar to examples mentioned above, there are two ways to copy objects across +accounts: + +1. Like the example above, send PUT request to copy object but with an + additional header named ``X-Copy-From-Account`` specifying the source + account. Example:: + + curl -i -X PUT http://:/v1/AUTH_test1/container/destination_obj + -H 'X-Auth-Token: ' + -H 'X-Copy-From: /container/source_obj' + -H 'X-Copy-From-Account: AUTH_test2' + -H 'Content-Length: 0' + +2. Like the previous example, send a COPY request but with an additional header + named ``Destination-Account`` specifying the name of destination account. + Example:: + + curl -i -X COPY http://:/v1/AUTH_test2/container/source_obj + -H 'X-Auth-Token: ' + -H 'Destination: /container/destination_obj' + -H 'Destination-Account: AUTH_test1' + -H 'Content-Length: 0' + +------------------- +Large Object Copy +------------------- +The best option to copy a large option is to copy segments individually. +To copy the manifest object of a large object, add the query parameter to +the copy request:: + + ?multipart-manifest=get + +If a request is sent without the query parameter, an attempt will be made to +copy the whole object but will fail if the object size is +greater than 5GB. + +------------------- +Object Post as Copy +------------------- +Historically, this has been a feature (and a configurable option with default +set to True) in proxy server configuration. This has been moved to server side +copy middleware. + +When ``object_post_as_copy`` is set to ``true`` (default value), an incoming +POST request is morphed into a COPY request where source and destination +objects are same. + +This feature was necessary because of a previous behavior where POSTS would +update the metadata on the object but not on the container. As a result, +features like container sync would not work correctly. This is no longer the +case and the plan is to deprecate this option. It is being kept now for +backwards compatibility. At first chance, set ``object_post_as_copy`` to +``false``. +""" + +import os +from urllib import quote +from ConfigParser import ConfigParser, NoSectionError, NoOptionError +from six.moves.urllib.parse import unquote + +from swift.common import utils +from swift.common.utils import get_logger, \ + config_true_value, FileLikeIter, read_conf_dir, close_if_possible +from swift.common.swob import Request, HTTPPreconditionFailed, \ + HTTPRequestEntityTooLarge, HTTPBadRequest +from swift.common.http import HTTP_MULTIPLE_CHOICES, HTTP_CREATED, \ + is_success +from swift.common.constraints import check_account_format, MAX_FILE_SIZE +from swift.common.request_helpers import copy_header_subset, remove_items, \ + is_sys_meta, is_sys_or_user_meta +from swift.common.wsgi import WSGIContext, make_subrequest + + +def _check_path_header(req, name, length, error_msg): + """ + Validate that the value of path-like header is + well formatted. We assume the caller ensures that + specific header is present in req.headers. + + :param req: HTTP request object + :param name: header name + :param length: length of path segment check + :param error_msg: error message for client + :returns: A tuple with path parts according to length + :raise: HTTPPreconditionFailed if header value + is not well formatted. + """ + src_header = unquote(req.headers.get(name)) + if not src_header.startswith('/'): + src_header = '/' + src_header + try: + return utils.split_path(src_header, length, length, True) + except ValueError: + raise HTTPPreconditionFailed( + request=req, + body=error_msg) + + +def _check_copy_from_header(req): + """ + Validate that the value from x-copy-from header is + well formatted. We assume the caller ensures that + x-copy-from header is present in req.headers. + + :param req: HTTP request object + :returns: A tuple with container name and object name + :raise: HTTPPreconditionFailed if x-copy-from value + is not well formatted. + """ + return _check_path_header(req, 'X-Copy-From', 2, + 'X-Copy-From header must be of the form ' + '/') + + +def _check_destination_header(req): + """ + Validate that the value from destination header is + well formatted. We assume the caller ensures that + destination header is present in req.headers. + + :param req: HTTP request object + :returns: A tuple with container name and object name + :raise: HTTPPreconditionFailed if destination value + is not well formatted. + """ + return _check_path_header(req, 'Destination', 2, + 'Destination header must be of the form ' + '/') + + +def _copy_headers_into(from_r, to_r): + """ + Will copy desired headers from from_r to to_r + :params from_r: a swob Request or Response + :params to_r: a swob Request or Response + """ + pass_headers = ['x-delete-at'] + for k, v in from_r.headers.items(): + if is_sys_or_user_meta('object', k) or k.lower() in pass_headers: + to_r.headers[k] = v + + +class ServerSideCopyWebContext(WSGIContext): + + def __init__(self, app, logger): + super(ServerSideCopyWebContext, self).__init__(app) + self.app = app + self.logger = logger + + def get_source_resp(self, req): + sub_req = make_subrequest( + req.environ, path=req.path_info, headers=req.headers, + swift_source='SSC') + return sub_req.get_response(self.app) + + def send_put_req(self, req, additional_resp_headers, start_response): + app_resp = self._app_call(req.environ) + self._adjust_put_response(req, additional_resp_headers) + start_response(self._response_status, + self._response_headers, + self._response_exc_info) + return app_resp + + def _adjust_put_response(self, req, additional_resp_headers): + if 'swift.post_as_copy' in req.environ: + # Older editions returned 202 Accepted on object POSTs, so we'll + # convert any 201 Created responses to that for compatibility with + # picky clients. + if self._get_status_int() == HTTP_CREATED: + self._response_status = '202 Accepted' + elif is_success(self._get_status_int()): + for header, value in additional_resp_headers.items(): + self._response_headers.append((header, value)) + + def handle_OPTIONS_request(self, req, start_response): + app_resp = self._app_call(req.environ) + if is_success(self._get_status_int()): + for i, (header, value) in enumerate(self._response_headers): + if header.lower() == 'allow' and 'COPY' not in value: + self._response_headers[i] = ('Allow', value + ', COPY') + if header.lower() == 'access-control-allow-methods' and \ + 'COPY' not in value: + self._response_headers[i] = \ + ('Access-Control-Allow-Methods', value + ', COPY') + start_response(self._response_status, + self._response_headers, + self._response_exc_info) + return app_resp + + +class ServerSideCopyMiddleware(object): + + def __init__(self, app, conf): + self.app = app + self.logger = get_logger(conf, log_route="copy") + # Read the old object_post_as_copy option from Proxy app just in case + # someone has set it to false (non default). This wouldn't cause + # problems during upgrade. + self._load_object_post_as_copy_conf(conf) + self.object_post_as_copy = \ + config_true_value(conf.get('object_post_as_copy', 'true')) + + def _load_object_post_as_copy_conf(self, conf): + if ('object_post_as_copy' in conf or '__file__' not in conf): + # Option is explicitly set in middleware conf. In that case, + # we assume operator knows what he's doing. + # This takes preference over the one set in proxy app + return + + cp = ConfigParser() + if os.path.isdir(conf['__file__']): + read_conf_dir(cp, conf['__file__']) + else: + cp.read(conf['__file__']) + + try: + pipe = cp.get("pipeline:main", "pipeline") + except (NoSectionError, NoOptionError): + return + + proxy_name = pipe.rsplit(None, 1)[-1] + proxy_section = "app:" + proxy_name + + try: + conf['object_post_as_copy'] = cp.get(proxy_section, + 'object_post_as_copy') + except (NoSectionError, NoOptionError): + pass + + def __call__(self, env, start_response): + req = Request(env) + try: + (version, account, container, obj) = req.split_path(4, 4, True) + except ValueError: + # If obj component is not present in req, do not proceed further. + return self.app(env, start_response) + + self.account_name = account + self.container_name = container + self.object_name = obj + + # Save off original request method (COPY/POST) in case it gets mutated + # into PUT during handling. This way logging can display the method + # the client actually sent. + req.environ['swift.orig_req_method'] = req.method + + if req.method == 'PUT' and req.headers.get('X-Copy-From'): + return self.handle_PUT(req, start_response) + elif req.method == 'COPY': + return self.handle_COPY(req, start_response) + elif req.method == 'POST' and self.object_post_as_copy: + return self.handle_object_post_as_copy(req, start_response) + elif req.method == 'OPTIONS': + # Does not interfere with OPTIONS response from (account,container) + # servers and /info response. + return self.handle_OPTIONS(req, start_response) + + return self.app(env, start_response) + + def handle_object_post_as_copy(self, req, start_response): + req.method = 'PUT' + req.path_info = '/v1/%s/%s/%s' % ( + self.account_name, self.container_name, self.object_name) + req.headers['Content-Length'] = 0 + req.headers.pop('Range', None) + req.headers['X-Copy-From'] = quote('/%s/%s' % (self.container_name, + self.object_name)) + req.environ['swift.post_as_copy'] = True + params = req.params + # for post-as-copy always copy the manifest itself if source is *LO + params['multipart-manifest'] = 'get' + req.params = params + return self.handle_PUT(req, start_response) + + def handle_COPY(self, req, start_response): + if not req.headers.get('Destination'): + return HTTPPreconditionFailed(request=req, + body='Destination header required' + )(req.environ, start_response) + dest_account = self.account_name + if 'Destination-Account' in req.headers: + dest_account = req.headers.get('Destination-Account') + dest_account = check_account_format(req, dest_account) + req.headers['X-Copy-From-Account'] = self.account_name + self.account_name = dest_account + del req.headers['Destination-Account'] + dest_container, dest_object = _check_destination_header(req) + source = '/%s/%s' % (self.container_name, self.object_name) + self.container_name = dest_container + self.object_name = dest_object + # re-write the existing request as a PUT instead of creating a new one + req.method = 'PUT' + # As this the path info is updated with destination container, + # the proxy server app will use the right object controller + # implementation corresponding to the container's policy type. + ver, _junk = req.split_path(1, 2, rest_with_last=True) + req.path_info = '/%s/%s/%s/%s' % \ + (ver, dest_account, dest_container, dest_object) + req.headers['Content-Length'] = 0 + req.headers['X-Copy-From'] = quote(source) + del req.headers['Destination'] + return self.handle_PUT(req, start_response) + + def _get_source_object(self, ssc_ctx, source_path, req): + source_req = req.copy_get() + + # make sure the source request uses it's container_info + source_req.headers.pop('X-Backend-Storage-Policy-Index', None) + source_req.path_info = quote(source_path) + source_req.headers['X-Newest'] = 'true' + if 'swift.post_as_copy' in req.environ: + # We're COPYing one object over itself because of a POST; rely on + # the PUT for write authorization, don't require read authorization + source_req.environ['swift.authorize'] = lambda req: None + source_req.environ['swift.authorize_override'] = True + + # in case we are copying an SLO manifest, set format=raw parameter + params = source_req.params + if params.get('multipart-manifest') == 'get': + params['format'] = 'raw' + source_req.params = params + + source_resp = ssc_ctx.get_source_resp(source_req) + + if source_resp.content_length is None: + # This indicates a transfer-encoding: chunked source object, + # which currently only happens because there are more than + # CONTAINER_LISTING_LIMIT segments in a segmented object. In + # this case, we're going to refuse to do the server-side copy. + return HTTPRequestEntityTooLarge(request=req) + + if source_resp.content_length > MAX_FILE_SIZE: + return HTTPRequestEntityTooLarge(request=req) + + return source_resp + + def _create_response_headers(self, source_path, source_resp, sink_req): + resp_headers = dict() + acct, path = source_path.split('/', 3)[2:4] + resp_headers['X-Copied-From-Account'] = quote(acct) + resp_headers['X-Copied-From'] = quote(path) + if 'last-modified' in source_resp.headers: + resp_headers['X-Copied-From-Last-Modified'] = \ + source_resp.headers['last-modified'] + # Existing sys and user meta of source object is added to response + # headers in addition to the new ones. + for k, v in sink_req.headers.items(): + if is_sys_or_user_meta('object', k) or k.lower() == 'x-delete-at': + resp_headers[k] = v + return resp_headers + + def handle_PUT(self, req, start_response): + if req.content_length: + return HTTPBadRequest(body='Copy requests require a zero byte ' + 'body', request=req, + content_type='text/plain')(req.environ, + start_response) + + # Form the path of source object to be fetched + ver, acct, _rest = req.split_path(2, 3, True) + src_account_name = req.headers.get('X-Copy-From-Account') + if src_account_name: + src_account_name = check_account_format(req, src_account_name) + else: + src_account_name = acct + src_container_name, src_obj_name = _check_copy_from_header(req) + source_path = '/%s/%s/%s/%s' % (ver, src_account_name, + src_container_name, src_obj_name) + + if req.environ.get('swift.orig_req_method', req.method) != 'POST': + self.logger.info("Copying object from %s to %s" % + (source_path, req.path)) + + # GET the source object, bail out on error + ssc_ctx = ServerSideCopyWebContext(self.app, self.logger) + source_resp = self._get_source_object(ssc_ctx, source_path, req) + if source_resp.status_int >= HTTP_MULTIPLE_CHOICES: + close_if_possible(source_resp.app_iter) + return source_resp(source_resp.environ, start_response) + + # Create a new Request object based on the original req instance. + # This will preserve env and headers. + sink_req = Request.blank(req.path_info, + environ=req.environ, headers=req.headers) + + params = sink_req.params + if params.get('multipart-manifest') == 'get': + if 'X-Static-Large-Object' in source_resp.headers: + params['multipart-manifest'] = 'put' + if 'X-Object-Manifest' in source_resp.headers: + del params['multipart-manifest'] + sink_req.headers['X-Object-Manifest'] = \ + source_resp.headers['X-Object-Manifest'] + sink_req.params = params + + # Set data source, content length and etag for the PUT request + sink_req.environ['wsgi.input'] = FileLikeIter(source_resp.app_iter) + sink_req.content_length = source_resp.content_length + sink_req.etag = source_resp.etag + + # We no longer need these headers + sink_req.headers.pop('X-Copy-From', None) + sink_req.headers.pop('X-Copy-From-Account', None) + # If the copy request does not explicitly override content-type, + # use the one present in the source object. + if not req.headers.get('content-type'): + sink_req.headers['Content-Type'] = \ + source_resp.headers['Content-Type'] + + fresh_meta_flag = config_true_value( + sink_req.headers.get('x-fresh-metadata', 'false')) + + if fresh_meta_flag or 'swift.post_as_copy' in sink_req.environ: + # Post-as-copy: ignore new sysmeta, copy existing sysmeta + condition = lambda k: is_sys_meta('object', k) + remove_items(sink_req.headers, condition) + copy_header_subset(source_resp, sink_req, condition) + else: + # Copy/update existing sysmeta and user meta + _copy_headers_into(source_resp, sink_req) + # Copy/update new metadata provided in request if any + _copy_headers_into(req, sink_req) + + # Create response headers for PUT response + resp_headers = self._create_response_headers(source_path, + source_resp, sink_req) + + put_resp = ssc_ctx.send_put_req(sink_req, resp_headers, start_response) + close_if_possible(source_resp.app_iter) + return put_resp + + def handle_OPTIONS(self, req, start_response): + return ServerSideCopyWebContext(self.app, self.logger).\ + handle_OPTIONS_request(req, start_response) + + +def filter_factory(global_conf, **local_conf): + conf = global_conf.copy() + conf.update(local_conf) + + def copy_filter(app): + return ServerSideCopyMiddleware(app, conf) + + return copy_filter diff --git a/swift/common/middleware/dlo.py b/swift/common/middleware/dlo.py index 2fd37c3d29..1c27800eb2 100644 --- a/swift/common/middleware/dlo.py +++ b/swift/common/middleware/dlo.py @@ -405,11 +405,6 @@ class DynamicLargeObject(object): except ValueError: return self.app(env, start_response) - # install our COPY-callback hook - env['swift.copy_hook'] = self.copy_hook( - env.get('swift.copy_hook', - lambda src_req, src_resp, sink_req: src_resp)) - if ((req.method == 'GET' or req.method == 'HEAD') and req.params.get('multipart-manifest') != 'get'): return GetContext(self, self.logger).\ @@ -438,24 +433,6 @@ class DynamicLargeObject(object): body=('X-Object-Manifest must be in the ' 'format container/prefix')) - def copy_hook(self, inner_hook): - - def dlo_copy_hook(source_req, source_resp, sink_req): - x_o_m = source_resp.headers.get('X-Object-Manifest') - if x_o_m: - if source_req.params.get('multipart-manifest') == 'get': - # To copy the manifest, we let the copy proceed as normal, - # but ensure that X-Object-Manifest is set on the new - # object. - sink_req.headers['X-Object-Manifest'] = x_o_m - else: - ctx = GetContext(self, self.logger) - source_resp = ctx.get_or_head_response( - source_req, x_o_m, source_resp.headers.items()) - return inner_hook(source_req, source_resp, sink_req) - - return dlo_copy_hook - def filter_factory(global_conf, **local_conf): conf = global_conf.copy() diff --git a/swift/common/middleware/slo.py b/swift/common/middleware/slo.py index 0216264b99..b87c8f2984 100644 --- a/swift/common/middleware/slo.py +++ b/swift/common/middleware/slo.py @@ -798,20 +798,6 @@ class StaticLargeObject(object): """ return SloGetContext(self).handle_slo_get_or_head(req, start_response) - def copy_hook(self, inner_hook): - - def slo_hook(source_req, source_resp, sink_req): - x_slo = source_resp.headers.get('X-Static-Large-Object') - if (config_true_value(x_slo) - and source_req.params.get('multipart-manifest') != 'get' - and 'swift.post_as_copy' not in source_req.environ): - source_resp = SloGetContext(self).get_or_head_response( - source_req, source_resp.headers.items(), - source_resp.app_iter) - return inner_hook(source_req, source_resp, sink_req) - - return slo_hook - def handle_multipart_put(self, req, start_response): """ Will handle the PUT of a SLO manifest. @@ -1058,11 +1044,6 @@ class StaticLargeObject(object): except ValueError: return self.app(env, start_response) - # install our COPY-callback hook - env['swift.copy_hook'] = self.copy_hook( - env.get('swift.copy_hook', - lambda src_req, src_resp, sink_req: src_resp)) - try: if req.method == 'PUT' and \ req.params.get('multipart-manifest') == 'put': diff --git a/swift/common/middleware/versioned_writes.py b/swift/common/middleware/versioned_writes.py index 3cb0989bba..ae091cff20 100644 --- a/swift/common/middleware/versioned_writes.py +++ b/swift/common/middleware/versioned_writes.py @@ -127,9 +127,7 @@ from swift.common.request_helpers import get_sys_meta_prefix, \ from swift.common.wsgi import WSGIContext, make_pre_authed_request from swift.common.swob import ( Request, HTTPException, HTTPRequestEntityTooLarge) -from swift.common.constraints import ( - check_account_format, check_container_format, check_destination_header, - MAX_FILE_SIZE) +from swift.common.constraints import check_container_format, MAX_FILE_SIZE from swift.proxy.controllers.base import get_container_info from swift.common.http import ( is_success, is_client_error, HTTP_NOT_FOUND) @@ -493,24 +491,10 @@ class VersionedWritesMiddleware(object): account_name = unquote(account) container_name = unquote(container) object_name = unquote(obj) - container_info = None resp = None is_enabled = config_true_value(allow_versioned_writes) - if req.method in ('PUT', 'DELETE'): - container_info = get_container_info( - req.environ, self.app) - elif req.method == 'COPY' and 'Destination' in req.headers: - if 'Destination-Account' in req.headers: - account_name = req.headers.get('Destination-Account') - account_name = check_account_format(req, account_name) - container_name, object_name = check_destination_header(req) - req.environ['PATH_INFO'] = "/%s/%s/%s/%s" % ( - api_version, account_name, container_name, object_name) - container_info = get_container_info( - req.environ, self.app) - - if not container_info: - return self.app + container_info = get_container_info( + req.environ, self.app) # To maintain backwards compatibility, container version # location could be stored as sysmeta or not, need to check both. @@ -530,7 +514,7 @@ class VersionedWritesMiddleware(object): if is_enabled and versions_cont: versions_cont = unquote(versions_cont).split('/')[0] vw_ctx = VersionedWritesContext(self.app, self.logger) - if req.method in ('PUT', 'COPY'): + if req.method == 'PUT': resp = vw_ctx.handle_obj_versions_put( req, versions_cont, api_version, account_name, object_name) @@ -545,10 +529,7 @@ class VersionedWritesMiddleware(object): return self.app def __call__(self, env, start_response): - # making a duplicate, because if this is a COPY request, we will - # modify the PATH_INFO to find out if the 'Destination' is in a - # versioned container - req = Request(env.copy()) + req = Request(env) try: (api_version, account, container, obj) = req.split_path(3, 4, True) except ValueError: @@ -576,7 +557,8 @@ class VersionedWritesMiddleware(object): allow_versioned_writes) except HTTPException as error_response: return error_response(env, start_response) - elif obj and req.method in ('PUT', 'COPY', 'DELETE'): + elif (obj and req.method in ('PUT', 'DELETE') and + not req.environ.get('swift.post_as_copy')): try: return self.object_request( req, api_version, account, container, obj, diff --git a/swift/common/swob.py b/swift/common/swob.py index 0954ef9d3c..f895c44f74 100644 --- a/swift/common/swob.py +++ b/swift/common/swob.py @@ -888,6 +888,11 @@ class Request(object): return self._params_cache str_params = params + @params.setter + def params(self, param_pairs): + self._params_cache = None + self.query_string = urllib.parse.urlencode(param_pairs) + @property def timestamp(self): """ diff --git a/swift/common/wsgi.py b/swift/common/wsgi.py index 2c169eb2a6..534333999e 100644 --- a/swift/common/wsgi.py +++ b/swift/common/wsgi.py @@ -1100,7 +1100,7 @@ def make_env(env, method=None, path=None, agent='Swift', query_string=None, 'SERVER_PROTOCOL', 'swift.cache', 'swift.source', 'swift.trans_id', 'swift.authorize_override', 'swift.authorize', 'HTTP_X_USER_ID', 'HTTP_X_PROJECT_ID', - 'HTTP_REFERER'): + 'HTTP_REFERER', 'swift.orig_req_method', 'swift.log_info'): if name in env: newenv[name] = env[name] if method: diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py index 70400fc143..6f8559063a 100644 --- a/swift/proxy/controllers/obj.py +++ b/swift/proxy/controllers/obj.py @@ -25,7 +25,7 @@ # collected. We've seen objects hang around forever otherwise. import six -from six.moves.urllib.parse import unquote, quote +from six.moves.urllib.parse import unquote import collections import itertools @@ -49,9 +49,7 @@ from swift.common.utils import ( document_iters_to_http_response_body, parse_content_range, quorum_size, reiterate, close_if_possible) from swift.common.bufferedhttp import http_connect -from swift.common.constraints import check_metadata, check_object_creation, \ - check_copy_from_header, check_destination_header, \ - check_account_format +from swift.common.constraints import check_metadata, check_object_creation from swift.common import constraints from swift.common.exceptions import ChunkReadTimeout, \ ChunkWriteTimeout, ConnectionTimeout, ResponseTimeout, \ @@ -60,33 +58,19 @@ from swift.common.exceptions import ChunkReadTimeout, \ from swift.common.header_key_dict import HeaderKeyDict from swift.common.http import ( is_informational, is_success, is_client_error, is_server_error, - is_redirection, HTTP_CONTINUE, HTTP_CREATED, HTTP_MULTIPLE_CHOICES, - HTTP_INTERNAL_SERVER_ERROR, HTTP_SERVICE_UNAVAILABLE, - HTTP_INSUFFICIENT_STORAGE, HTTP_PRECONDITION_FAILED, HTTP_CONFLICT, - HTTP_UNPROCESSABLE_ENTITY, HTTP_REQUESTED_RANGE_NOT_SATISFIABLE) + is_redirection, HTTP_CONTINUE, HTTP_INTERNAL_SERVER_ERROR, + HTTP_SERVICE_UNAVAILABLE, HTTP_INSUFFICIENT_STORAGE, + HTTP_PRECONDITION_FAILED, HTTP_CONFLICT, HTTP_UNPROCESSABLE_ENTITY, + HTTP_REQUESTED_RANGE_NOT_SATISFIABLE) from swift.common.storage_policy import (POLICIES, REPL_POLICY, EC_POLICY, ECDriverError, PolicyError) from swift.proxy.controllers.base import Controller, delay_denial, \ cors_validation, ResumingGetter from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPNotFound, \ HTTPPreconditionFailed, HTTPRequestEntityTooLarge, HTTPRequestTimeout, \ - HTTPServerError, HTTPServiceUnavailable, Request, \ - HTTPClientDisconnect, HTTPUnprocessableEntity, Response, HTTPException, \ + HTTPServerError, HTTPServiceUnavailable, HTTPClientDisconnect, \ + HTTPUnprocessableEntity, Response, HTTPException, \ HTTPRequestedRangeNotSatisfiable, Range, HTTPInternalServerError -from swift.common.request_helpers import is_sys_or_user_meta, is_sys_meta, \ - remove_items, copy_header_subset - - -def copy_headers_into(from_r, to_r): - """ - Will copy desired headers from from_r to to_r - :params from_r: a swob Request or Response - :params to_r: a swob Request or Response - """ - pass_headers = ['x-delete-at'] - for k, v in from_r.headers.items(): - if is_sys_or_user_meta('object', k) or k.lower() in pass_headers: - to_r.headers[k] = v def check_content_type(req): @@ -200,8 +184,7 @@ class BaseObjectController(Controller): self.account_name, self.container_name, self.object_name) node_iter = self.app.iter_nodes(obj_ring, partition) - resp = self._reroute(policy)._get_or_head_response( - req, node_iter, partition, policy) + resp = self._get_or_head_response(req, node_iter, partition, policy) if ';' in resp.headers.get('content-type', ''): resp.content_type = clean_content_type( @@ -227,55 +210,38 @@ class BaseObjectController(Controller): @delay_denial def POST(self, req): """HTTP POST request handler.""" - if self.app.object_post_as_copy: - req.method = 'PUT' - req.path_info = '/v1/%s/%s/%s' % ( - self.account_name, self.container_name, self.object_name) - req.headers['Content-Length'] = 0 - req.headers['X-Copy-From'] = quote('/%s/%s' % (self.container_name, - self.object_name)) - req.environ['swift.post_as_copy'] = True - req.environ['swift_versioned_copy'] = True - resp = self.PUT(req) - # Older editions returned 202 Accepted on object POSTs, so we'll - # convert any 201 Created responses to that for compatibility with - # picky clients. - if resp.status_int != HTTP_CREATED: - return resp - return HTTPAccepted(request=req) - else: - error_response = check_metadata(req, 'object') - if error_response: - return error_response - container_info = self.container_info( - self.account_name, self.container_name, req) - container_partition = container_info['partition'] - containers = container_info['nodes'] - req.acl = container_info['write_acl'] - if 'swift.authorize' in req.environ: - aresp = req.environ['swift.authorize'](req) - if aresp: - return aresp - if not containers: - return HTTPNotFound(request=req) + error_response = check_metadata(req, 'object') + if error_response: + return error_response + container_info = self.container_info( + self.account_name, self.container_name, req) + container_partition = container_info['partition'] + containers = container_info['nodes'] + req.acl = container_info['write_acl'] + if 'swift.authorize' in req.environ: + aresp = req.environ['swift.authorize'](req) + if aresp: + return aresp + if not containers: + return HTTPNotFound(request=req) - req, delete_at_container, delete_at_part, \ - delete_at_nodes = self._config_obj_expiration(req) + req, delete_at_container, delete_at_part, \ + delete_at_nodes = self._config_obj_expiration(req) - # pass the policy index to storage nodes via req header - policy_index = req.headers.get('X-Backend-Storage-Policy-Index', - container_info['storage_policy']) - obj_ring = self.app.get_object_ring(policy_index) - req.headers['X-Backend-Storage-Policy-Index'] = policy_index - partition, nodes = obj_ring.get_nodes( - self.account_name, self.container_name, self.object_name) + # pass the policy index to storage nodes via req header + policy_index = req.headers.get('X-Backend-Storage-Policy-Index', + container_info['storage_policy']) + obj_ring = self.app.get_object_ring(policy_index) + req.headers['X-Backend-Storage-Policy-Index'] = policy_index + partition, nodes = obj_ring.get_nodes( + self.account_name, self.container_name, self.object_name) - req.headers['X-Timestamp'] = Timestamp(time.time()).internal + req.headers['X-Timestamp'] = Timestamp(time.time()).internal - headers = self._backend_requests( - req, len(nodes), container_partition, containers, - delete_at_container, delete_at_part, delete_at_nodes) - return self._post_object(req, obj_ring, partition, headers) + headers = self._backend_requests( + req, len(nodes), container_partition, containers, + delete_at_container, delete_at_part, delete_at_nodes) + return self._post_object(req, obj_ring, partition, headers) def _backend_requests(self, req, n_outgoing, container_partition, containers, @@ -414,133 +380,8 @@ class BaseObjectController(Controller): return req, delete_at_container, delete_at_part, delete_at_nodes - def _handle_copy_request(self, req): - """ - This method handles copying objects based on values set in the headers - 'X-Copy-From' and 'X-Copy-From-Account' - - Note that if the incomming request has some conditional headers (e.g. - 'Range', 'If-Match'), *source* object will be evaluated for these - headers. i.e. if PUT with both 'X-Copy-From' and 'Range', Swift will - make a partial copy as a new object. - - This method was added as part of the refactoring of the PUT method and - the functionality is expected to be moved to middleware - """ - if req.environ.get('swift.orig_req_method', req.method) != 'POST': - req.environ.setdefault('swift.log_info', []).append( - 'x-copy-from:%s' % req.headers['X-Copy-From']) - ver, acct, _rest = req.split_path(2, 3, True) - src_account_name = req.headers.get('X-Copy-From-Account', None) - if src_account_name: - src_account_name = check_account_format(req, src_account_name) - else: - src_account_name = acct - src_container_name, src_obj_name = check_copy_from_header(req) - source_header = '/%s/%s/%s/%s' % ( - ver, src_account_name, src_container_name, src_obj_name) - source_req = req.copy_get() - - # make sure the source request uses it's container_info - source_req.headers.pop('X-Backend-Storage-Policy-Index', None) - source_req.path_info = source_header - source_req.headers['X-Newest'] = 'true' - if 'swift.post_as_copy' in req.environ: - # We're COPYing one object over itself because of a POST; rely on - # the PUT for write authorization, don't require read authorization - source_req.environ['swift.authorize'] = lambda req: None - source_req.environ['swift.authorize_override'] = True - - orig_obj_name = self.object_name - orig_container_name = self.container_name - orig_account_name = self.account_name - sink_req = Request.blank(req.path_info, - environ=req.environ, headers=req.headers) - - self.object_name = src_obj_name - self.container_name = src_container_name - self.account_name = src_account_name - - source_resp = self.GET(source_req) - - # This gives middlewares a way to change the source; for example, - # this lets you COPY a SLO manifest and have the new object be the - # concatenation of the segments (like what a GET request gives - # the client), not a copy of the manifest file. - hook = req.environ.get( - 'swift.copy_hook', - (lambda source_req, source_resp, sink_req: source_resp)) - source_resp = hook(source_req, source_resp, sink_req) - - # reset names - self.object_name = orig_obj_name - self.container_name = orig_container_name - self.account_name = orig_account_name - - if source_resp.status_int >= HTTP_MULTIPLE_CHOICES: - # this is a bit of ugly code, but I'm willing to live with it - # until copy request handling moves to middleware - return source_resp, None, None, None - if source_resp.content_length is None: - # This indicates a transfer-encoding: chunked source object, - # which currently only happens because there are more than - # CONTAINER_LISTING_LIMIT segments in a segmented object. In - # this case, we're going to refuse to do the server-side copy. - raise HTTPRequestEntityTooLarge(request=req) - if source_resp.content_length > constraints.MAX_FILE_SIZE: - raise HTTPRequestEntityTooLarge(request=req) - - data_source = iter(source_resp.app_iter) - sink_req.content_length = source_resp.content_length - sink_req.etag = source_resp.etag - - # we no longer need the X-Copy-From header - del sink_req.headers['X-Copy-From'] - if 'X-Copy-From-Account' in sink_req.headers: - del sink_req.headers['X-Copy-From-Account'] - if not req.content_type_manually_set: - sink_req.headers['Content-Type'] = \ - source_resp.headers['Content-Type'] - - fresh_meta_flag = config_true_value( - sink_req.headers.get('x-fresh-metadata', 'false')) - - if fresh_meta_flag or 'swift.post_as_copy' in sink_req.environ: - # post-as-copy: ignore new sysmeta, copy existing sysmeta - condition = lambda k: is_sys_meta('object', k) - remove_items(sink_req.headers, condition) - copy_header_subset(source_resp, sink_req, condition) - else: - # copy/update existing sysmeta and user meta - copy_headers_into(source_resp, sink_req) - copy_headers_into(req, sink_req) - - # copy over x-static-large-object for POSTs and manifest copies - if 'X-Static-Large-Object' in source_resp.headers and \ - (req.params.get('multipart-manifest') == 'get' or - 'swift.post_as_copy' in req.environ): - sink_req.headers['X-Static-Large-Object'] = \ - source_resp.headers['X-Static-Large-Object'] - - req = sink_req - - def update_response(req, resp): - acct, path = source_resp.environ['PATH_INFO'].split('/', 3)[2:4] - resp.headers['X-Copied-From-Account'] = quote(acct) - resp.headers['X-Copied-From'] = quote(path) - if 'last-modified' in source_resp.headers: - resp.headers['X-Copied-From-Last-Modified'] = \ - source_resp.headers['last-modified'] - copy_headers_into(req, resp) - return resp - - # this is a bit of ugly code, but I'm willing to live with it - # until copy request handling moves to middleware - return None, req, data_source, update_response - def _update_content_type(self, req): # Sometimes the 'content-type' header exists, but is set to None. - req.content_type_manually_set = True detect_content_type = \ config_true_value(req.headers.get('x-detect-content-type')) if detect_content_type or not req.headers.get('content-type'): @@ -549,8 +390,6 @@ class BaseObjectController(Controller): 'application/octet-stream' if detect_content_type: req.headers.pop('x-detect-content-type') - else: - req.content_type_manually_set = False def _update_x_timestamp(self, req): # Used by container sync feature @@ -744,22 +583,13 @@ class BaseObjectController(Controller): self._update_x_timestamp(req) - # check if request is a COPY of an existing object - source_header = req.headers.get('X-Copy-From') - if source_header: - error_response, req, data_source, update_response = \ - self._handle_copy_request(req) - if error_response: - return error_response - else: - def reader(): - try: - return req.environ['wsgi.input'].read( - self.app.client_chunk_size) - except (ValueError, IOError) as e: - raise ChunkReadError(str(e)) - data_source = iter(reader, '') - update_response = lambda req, resp: resp + def reader(): + try: + return req.environ['wsgi.input'].read( + self.app.client_chunk_size) + except (ValueError, IOError) as e: + raise ChunkReadError(str(e)) + data_source = iter(reader, '') # check if object is set to be automatically deleted (i.e. expired) req, delete_at_container, delete_at_part, \ @@ -773,7 +603,7 @@ class BaseObjectController(Controller): # send object to storage nodes resp = self._store_object( req, data_source, nodes, partition, outgoing_headers) - return update_response(req, resp) + return resp @public @cors_validation @@ -817,63 +647,6 @@ class BaseObjectController(Controller): req, len(nodes), container_partition, containers) return self._delete_object(req, obj_ring, partition, headers) - def _reroute(self, policy): - """ - For COPY requests we need to make sure the controller instance the - request is routed through is the correct type for the policy. - """ - if not policy: - raise HTTPServiceUnavailable('Unknown Storage Policy') - if policy.policy_type != self.policy_type: - controller = self.app.obj_controller_router[policy]( - self.app, self.account_name, self.container_name, - self.object_name) - else: - controller = self - return controller - - @public - @cors_validation - @delay_denial - def COPY(self, req): - """HTTP COPY request handler.""" - if not req.headers.get('Destination'): - return HTTPPreconditionFailed(request=req, - body='Destination header required') - dest_account = self.account_name - if 'Destination-Account' in req.headers: - dest_account = req.headers.get('Destination-Account') - dest_account = check_account_format(req, dest_account) - req.headers['X-Copy-From-Account'] = self.account_name - self.account_name = dest_account - del req.headers['Destination-Account'] - dest_container, dest_object = check_destination_header(req) - - source = '/%s/%s' % (self.container_name, self.object_name) - self.container_name = dest_container - self.object_name = dest_object - # re-write the existing request as a PUT instead of creating a new one - # since this one is already attached to the posthooklogger - # TODO: Swift now has proxy-logging middleware instead of - # posthooklogger used in before. i.e. we don't have to - # keep the code depends on evnetlet.posthooks sequence, IMHO. - # However, creating a new sub request might - # cause the possibility to hide some bugs behindes the request - # so that we should discuss whichi is suitable (new-sub-request - # vs re-write-existing-request) for Swift. [kota_] - req.method = 'PUT' - req.path_info = '/v1/%s/%s/%s' % \ - (dest_account, dest_container, dest_object) - req.headers['Content-Length'] = 0 - req.headers['X-Copy-From'] = quote(source) - del req.headers['Destination'] - - container_info = self.container_info( - dest_account, dest_container, req) - dest_policy = POLICIES.get_by_index(container_info['storage_policy']) - - return self._reroute(dest_policy).PUT(req) - @ObjectControllerRouter.register(REPL_POLICY) class ReplicatedObjectController(BaseObjectController): diff --git a/swift/proxy/server.py b/swift/proxy/server.py index f8f4296a25..963bf34f0e 100644 --- a/swift/proxy/server.py +++ b/swift/proxy/server.py @@ -64,10 +64,14 @@ required_filters = [ if pipe.startswith('catch_errors') else [])}, {'name': 'dlo', 'after_fn': lambda _junk: [ - 'staticweb', 'tempauth', 'keystoneauth', + 'copy', 'staticweb', 'tempauth', 'keystoneauth', 'catch_errors', 'gatekeeper', 'proxy_logging']}, {'name': 'versioned_writes', 'after_fn': lambda _junk: [ - 'slo', 'dlo', 'staticweb', 'tempauth', 'keystoneauth', + 'slo', 'dlo', 'copy', 'staticweb', 'tempauth', + 'keystoneauth', 'catch_errors', 'gatekeeper', 'proxy_logging']}, + # Put copy before dlo, slo and versioned_writes + {'name': 'copy', 'after_fn': lambda _junk: [ + 'staticweb', 'tempauth', 'keystoneauth', 'catch_errors', 'gatekeeper', 'proxy_logging']}] @@ -107,8 +111,6 @@ class Application(object): int(conf.get('recheck_account_existence', 60)) self.allow_account_management = \ config_true_value(conf.get('allow_account_management', 'no')) - self.object_post_as_copy = \ - config_true_value(conf.get('object_post_as_copy', 'true')) self.container_ring = container_ring or Ring(swift_dir, ring_name='container') self.account_ring = account_ring or Ring(swift_dir, @@ -392,8 +394,7 @@ class Application(object): # controller's method indicates it'd like to gather more # information and try again later. resp = req.environ['swift.authorize'](req) - if not resp and not req.headers.get('X-Copy-From-Account') \ - and not req.headers.get('Destination-Account'): + if not resp: # No resp means authorized, no delayed recheck required. old_authorize = req.environ['swift.authorize'] else: @@ -404,7 +405,7 @@ class Application(object): # Save off original request method (GET, POST, etc.) in case it # gets mutated during handling. This way logging can display the # method the client actually sent. - req.environ['swift.orig_req_method'] = req.method + req.environ.setdefault('swift.orig_req_method', req.method) try: if old_authorize: req.environ.pop('swift.authorize', None) diff --git a/test/functional/tests.py b/test/functional/tests.py index fc9e362f2a..e35e79706d 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -1306,12 +1306,10 @@ class TestFile(Base): acct, '%s%s' % (prefix, self.env.container), Utils.create_name())) - if acct == acct2: - # there is no such source container - # and foreign user can have no permission to read it - self.assert_status(403) - else: - self.assert_status(404) + # there is no such source container but user has + # permissions to do a GET (done internally via COPY) for + # objects in his own account. + self.assert_status(404) self.assertFalse(file_item.copy_account( acct, @@ -1325,12 +1323,10 @@ class TestFile(Base): acct, '%s%s' % (prefix, self.env.container), Utils.create_name())) - if acct == acct2: - # there is no such object - # and foreign user can have no permission to read it - self.assert_status(403) - else: - self.assert_status(404) + # there is no such source container but user has + # permissions to do a GET (done internally via COPY) for + # objects in his own account. + self.assert_status(404) self.assertFalse(file_item.copy_account( acct, @@ -2677,6 +2673,23 @@ class TestFileComparisonUTF8(Base2, TestFileComparison): class TestSloEnv(object): slo_enabled = None # tri-state: None initially, then True/False + @classmethod + def create_segments(cls, container): + seg_info = {} + for letter, size in (('a', 1024 * 1024), + ('b', 1024 * 1024), + ('c', 1024 * 1024), + ('d', 1024 * 1024), + ('e', 1)): + seg_name = "seg_%s" % letter + file_item = container.file(seg_name) + file_item.write(letter * size) + seg_info[seg_name] = { + 'size_bytes': size, + 'etag': file_item.md5, + 'path': '/%s/%s' % (container.name, seg_name)} + return seg_info + @classmethod def setUp(cls): cls.conn = Connection(tf.config) @@ -2711,19 +2724,7 @@ class TestSloEnv(object): if not cont.create(): raise ResponseError(cls.conn.response) - cls.seg_info = seg_info = {} - for letter, size in (('a', 1024 * 1024), - ('b', 1024 * 1024), - ('c', 1024 * 1024), - ('d', 1024 * 1024), - ('e', 1)): - seg_name = "seg_%s" % letter - file_item = cls.container.file(seg_name) - file_item.write(letter * size) - seg_info[seg_name] = { - 'size_bytes': size, - 'etag': file_item.md5, - 'path': '/%s/%s' % (cls.container.name, seg_name)} + cls.seg_info = seg_info = cls.create_segments(cls.container) file_item = cls.container.file("manifest-abcde") file_item.write( @@ -3125,8 +3126,9 @@ class TestSlo(Base): def test_slo_copy_the_manifest(self): file_item = self.env.container.file("manifest-abcde") - file_item.copy(self.env.container.name, "copied-abcde-manifest-only", - parms={'multipart-manifest': 'get'}) + self.assertTrue(file_item.copy(self.env.container.name, + "copied-abcde-manifest-only", + parms={'multipart-manifest': 'get'})) copied = self.env.container.file("copied-abcde-manifest-only") copied_contents = copied.read(parms={'multipart-manifest': 'get'}) @@ -3157,10 +3159,40 @@ class TestSlo(Base): self.assertTrue(dest_cont.create(hdrs={ 'X-Container-Write': self.env.conn.user_acl })) - file_item.copy_account(acct, - dest_cont, - "copied-abcde-manifest-only", - parms={'multipart-manifest': 'get'}) + + # manifest copy will fail because there is no read access to segments + # in destination account + file_item.copy_account( + acct, dest_cont, "copied-abcde-manifest-only", + parms={'multipart-manifest': 'get'}) + self.assertEqual(400, file_item.conn.response.status) + resp_body = file_item.conn.response.read() + self.assertEqual(5, resp_body.count('403 Forbidden'), + 'Unexpected response body %r' % resp_body) + + # create segments container in account2 with read access for account1 + segs_container = self.env.account2.container(self.env.container.name) + self.assertTrue(segs_container.create(hdrs={ + 'X-Container-Read': self.env.conn.user_acl + })) + + # manifest copy will still fail because there are no segments in + # destination account + file_item.copy_account( + acct, dest_cont, "copied-abcde-manifest-only", + parms={'multipart-manifest': 'get'}) + self.assertEqual(400, file_item.conn.response.status) + resp_body = file_item.conn.response.read() + self.assertEqual(5, resp_body.count('404 Not Found'), + 'Unexpected response body %r' % resp_body) + + # create segments in account2 container with same name as in account1, + # manifest copy now succeeds + self.env.create_segments(segs_container) + + self.assertTrue(file_item.copy_account( + acct, dest_cont, "copied-abcde-manifest-only", + parms={'multipart-manifest': 'get'})) copied = dest_cont.file("copied-abcde-manifest-only") copied_contents = copied.read(parms={'multipart-manifest': 'get'}) diff --git a/test/unit/common/middleware/helpers.py b/test/unit/common/middleware/helpers.py index 2432d0dc37..bcd3c4c2ec 100644 --- a/test/unit/common/middleware/helpers.py +++ b/test/unit/common/middleware/helpers.py @@ -20,6 +20,7 @@ from copy import deepcopy from hashlib import md5 from swift.common import swob from swift.common.header_key_dict import HeaderKeyDict +from swift.common.swob import HTTPNotImplemented from swift.common.utils import split_path from test.unit import FakeLogger, FakeRing @@ -43,6 +44,8 @@ class FakeSwift(object): """ A good-enough fake Swift proxy server to use in testing middleware. """ + ALLOWED_METHODS = [ + 'PUT', 'POST', 'DELETE', 'GET', 'HEAD', 'OPTIONS', 'REPLICATE'] def __init__(self): self._calls = [] @@ -71,6 +74,9 @@ class FakeSwift(object): def __call__(self, env, start_response): method = env['REQUEST_METHOD'] + if method not in self.ALLOWED_METHODS: + raise HTTPNotImplemented() + path = env['PATH_INFO'] _, acc, cont, obj = split_path(env['PATH_INFO'], 0, 4, rest_with_last=True) diff --git a/test/unit/common/middleware/test_account_quotas.py b/test/unit/common/middleware/test_account_quotas.py index 345e178cd1..b443b4a28d 100644 --- a/test/unit/common/middleware/test_account_quotas.py +++ b/test/unit/common/middleware/test_account_quotas.py @@ -13,9 +13,10 @@ import unittest -from swift.common.swob import Request, wsgify, HTTPForbidden +from swift.common.swob import Request, wsgify, HTTPForbidden, \ + HTTPException -from swift.common.middleware import account_quotas +from swift.common.middleware import account_quotas, copy from swift.proxy.controllers.base import _get_cache_key, \ headers_to_account_info, get_object_env_key, \ @@ -245,84 +246,6 @@ class TestAccountQuota(unittest.TestCase): res = req.get_response(app) self.assertEqual(res.status_int, 200) - def test_exceed_bytes_quota_copy_from(self): - headers = [('x-account-bytes-used', '500'), - ('x-account-meta-quota-bytes', '1000'), - ('content-length', '1000')] - app = account_quotas.AccountQuotaMiddleware(FakeApp(headers)) - cache = FakeCache(None) - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'PUT', - 'swift.cache': cache}, - headers={'x-copy-from': '/c2/o2'}) - res = req.get_response(app) - self.assertEqual(res.status_int, 413) - self.assertEqual(res.body, 'Upload exceeds quota.') - - def test_exceed_bytes_quota_copy_verb(self): - headers = [('x-account-bytes-used', '500'), - ('x-account-meta-quota-bytes', '1000'), - ('content-length', '1000')] - app = account_quotas.AccountQuotaMiddleware(FakeApp(headers)) - cache = FakeCache(None) - req = Request.blank('/v1/a/c2/o2', - environ={'REQUEST_METHOD': 'COPY', - 'swift.cache': cache}, - headers={'Destination': '/c/o'}) - res = req.get_response(app) - self.assertEqual(res.status_int, 413) - self.assertEqual(res.body, 'Upload exceeds quota.') - - def test_not_exceed_bytes_quota_copy_from(self): - headers = [('x-account-bytes-used', '0'), - ('x-account-meta-quota-bytes', '1000'), - ('content-length', '1000')] - app = account_quotas.AccountQuotaMiddleware(FakeApp(headers)) - cache = FakeCache(None) - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'PUT', - 'swift.cache': cache}, - headers={'x-copy-from': '/c2/o2'}) - res = req.get_response(app) - self.assertEqual(res.status_int, 200) - - def test_not_exceed_bytes_quota_copy_verb(self): - headers = [('x-account-bytes-used', '0'), - ('x-account-meta-quota-bytes', '1000'), - ('content-length', '1000')] - app = account_quotas.AccountQuotaMiddleware(FakeApp(headers)) - cache = FakeCache(None) - req = Request.blank('/v1/a/c2/o2', - environ={'REQUEST_METHOD': 'COPY', - 'swift.cache': cache}, - headers={'Destination': '/c/o'}) - res = req.get_response(app) - self.assertEqual(res.status_int, 200) - - def test_quota_copy_from_no_src(self): - headers = [('x-account-bytes-used', '0'), - ('x-account-meta-quota-bytes', '1000')] - app = account_quotas.AccountQuotaMiddleware(FakeApp(headers)) - cache = FakeCache(None) - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'PUT', - 'swift.cache': cache}, - headers={'x-copy-from': '/c2/o3'}) - res = req.get_response(app) - self.assertEqual(res.status_int, 200) - - def test_quota_copy_from_bad_src(self): - headers = [('x-account-bytes-used', '0'), - ('x-account-meta-quota-bytes', '1000')] - app = account_quotas.AccountQuotaMiddleware(FakeApp(headers)) - cache = FakeCache(None) - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'PUT', - 'swift.cache': cache}, - headers={'x-copy-from': 'bad_path'}) - res = req.get_response(app) - self.assertEqual(res.status_int, 412) - def test_exceed_bytes_quota_reseller(self): headers = [('x-account-bytes-used', '1000'), ('x-account-meta-quota-bytes', '0')] @@ -485,5 +408,91 @@ class TestAccountQuota(unittest.TestCase): self.assertEqual(res.status_int, 200) +class AccountQuotaCopyingTestCases(unittest.TestCase): + + def setUp(self): + self.app = FakeApp() + self.aq_filter = account_quotas.filter_factory({})(self.app) + self.copy_filter = copy.filter_factory({})(self.aq_filter) + + def test_exceed_bytes_quota_copy_from(self): + headers = [('x-account-bytes-used', '500'), + ('x-account-meta-quota-bytes', '1000'), + ('content-length', '1000')] + self.app.headers = headers + cache = FakeCache(None) + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'PUT', + 'swift.cache': cache}, + headers={'x-copy-from': '/c2/o2'}) + res = req.get_response(self.copy_filter) + self.assertEqual(res.status_int, 413) + self.assertEqual(res.body, 'Upload exceeds quota.') + + def test_exceed_bytes_quota_copy_verb(self): + headers = [('x-account-bytes-used', '500'), + ('x-account-meta-quota-bytes', '1000'), + ('content-length', '1000')] + self.app.headers = headers + cache = FakeCache(None) + req = Request.blank('/v1/a/c2/o2', + environ={'REQUEST_METHOD': 'COPY', + 'swift.cache': cache}, + headers={'Destination': '/c/o'}) + res = req.get_response(self.copy_filter) + self.assertEqual(res.status_int, 413) + self.assertEqual(res.body, 'Upload exceeds quota.') + + def test_not_exceed_bytes_quota_copy_from(self): + headers = [('x-account-bytes-used', '0'), + ('x-account-meta-quota-bytes', '1000'), + ('content-length', '1000')] + self.app.headers = headers + cache = FakeCache(None) + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'PUT', + 'swift.cache': cache}, + headers={'x-copy-from': '/c2/o2'}) + res = req.get_response(self.copy_filter) + self.assertEqual(res.status_int, 200) + + def test_not_exceed_bytes_quota_copy_verb(self): + headers = [('x-account-bytes-used', '0'), + ('x-account-meta-quota-bytes', '1000'), + ('content-length', '1000')] + self.app.headers = headers + cache = FakeCache(None) + req = Request.blank('/v1/a/c2/o2', + environ={'REQUEST_METHOD': 'COPY', + 'swift.cache': cache}, + headers={'Destination': '/c/o'}) + res = req.get_response(self.copy_filter) + self.assertEqual(res.status_int, 200) + + def test_quota_copy_from_no_src(self): + headers = [('x-account-bytes-used', '0'), + ('x-account-meta-quota-bytes', '1000')] + self.app.headers = headers + cache = FakeCache(None) + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'PUT', + 'swift.cache': cache}, + headers={'x-copy-from': '/c2/o3'}) + res = req.get_response(self.copy_filter) + self.assertEqual(res.status_int, 200) + + def test_quota_copy_from_bad_src(self): + headers = [('x-account-bytes-used', '0'), + ('x-account-meta-quota-bytes', '1000')] + self.app.headers = headers + cache = FakeCache(None) + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'PUT', + 'swift.cache': cache}, + headers={'x-copy-from': 'bad_path'}) + with self.assertRaises(HTTPException) as catcher: + req.get_response(self.copy_filter) + self.assertEqual(412, catcher.exception.status_int) + if __name__ == '__main__': unittest.main() diff --git a/test/unit/common/middleware/test_copy.py b/test/unit/common/middleware/test_copy.py new file mode 100644 index 0000000000..190d7c9084 --- /dev/null +++ b/test/unit/common/middleware/test_copy.py @@ -0,0 +1,1183 @@ +#!/usr/bin/env python +# Copyright (c) 2015 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +import mock +import shutil +import tempfile +import unittest +from hashlib import md5 +from textwrap import dedent + +from swift.common import swob +from swift.common.middleware import copy +from swift.common.storage_policy import POLICIES +from swift.common.swob import Request, HTTPException +from test.unit import patch_policies, debug_logger, FakeMemcache, FakeRing +from test.unit.common.middleware.helpers import FakeSwift +from test.unit.proxy.controllers.test_obj import set_http_connect, \ + PatchedObjControllerApp + + +class TestCopyConstraints(unittest.TestCase): + def test_validate_copy_from(self): + req = Request.blank( + '/v/a/c/o', + headers={'x-copy-from': 'c/o2'}) + src_cont, src_obj = copy._check_copy_from_header(req) + self.assertEqual(src_cont, 'c') + self.assertEqual(src_obj, 'o2') + req = Request.blank( + '/v/a/c/o', + headers={'x-copy-from': 'c/subdir/o2'}) + src_cont, src_obj = copy._check_copy_from_header(req) + self.assertEqual(src_cont, 'c') + self.assertEqual(src_obj, 'subdir/o2') + req = Request.blank( + '/v/a/c/o', + headers={'x-copy-from': '/c/o2'}) + src_cont, src_obj = copy._check_copy_from_header(req) + self.assertEqual(src_cont, 'c') + self.assertEqual(src_obj, 'o2') + + def test_validate_bad_copy_from(self): + req = Request.blank( + '/v/a/c/o', + headers={'x-copy-from': 'bad_object'}) + self.assertRaises(HTTPException, + copy._check_copy_from_header, req) + + def test_validate_destination(self): + req = Request.blank( + '/v/a/c/o', + headers={'destination': 'c/o2'}) + src_cont, src_obj = copy._check_destination_header(req) + self.assertEqual(src_cont, 'c') + self.assertEqual(src_obj, 'o2') + req = Request.blank( + '/v/a/c/o', + headers={'destination': 'c/subdir/o2'}) + src_cont, src_obj = copy._check_destination_header(req) + self.assertEqual(src_cont, 'c') + self.assertEqual(src_obj, 'subdir/o2') + req = Request.blank( + '/v/a/c/o', + headers={'destination': '/c/o2'}) + src_cont, src_obj = copy._check_destination_header(req) + self.assertEqual(src_cont, 'c') + self.assertEqual(src_obj, 'o2') + + def test_validate_bad_destination(self): + req = Request.blank( + '/v/a/c/o', + headers={'destination': 'bad_object'}) + self.assertRaises(HTTPException, + copy._check_destination_header, req) + + +class TestServerSideCopyMiddleware(unittest.TestCase): + def setUp(self): + self.app = FakeSwift() + self.ssc = copy.filter_factory({ + 'object_post_as_copy': 'yes', + })(self.app) + self.ssc.logger = self.app.logger + + def call_app(self, req, app=None, expect_exception=False): + if app is None: + app = self.app + + self.authorized = [] + + def authorize(req): + self.authorized.append(req) + + if 'swift.authorize' not in req.environ: + req.environ['swift.authorize'] = authorize + + req.headers.setdefault("User-Agent", "Bruce Wayne") + + status = [None] + headers = [None] + + def start_response(s, h, ei=None): + status[0] = s + headers[0] = h + + body_iter = app(req.environ, start_response) + body = '' + caught_exc = None + try: + for chunk in body_iter: + body += chunk + except Exception as exc: + if expect_exception: + caught_exc = exc + else: + raise + + if expect_exception: + return status[0], headers[0], body, caught_exc + else: + return status[0], headers[0], body + + def call_ssc(self, req, **kwargs): + return self.call_app(req, app=self.ssc, **kwargs) + + def assertRequestEqual(self, req, other): + self.assertEqual(req.method, other.method) + self.assertEqual(req.path, other.path) + + def test_no_object_in_path_pass_through(self): + self.app.register('PUT', '/v1/a/c', swob.HTTPCreated, {}) + req = Request.blank('/v1/a/c', method='PUT') + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + self.assertEqual(len(self.authorized), 1) + self.assertRequestEqual(req, self.authorized[0]) + + def test_object_delete_pass_through(self): + self.app.register('DELETE', '/v1/a/c/o', swob.HTTPOk, {}) + req = Request.blank('/v1/a/c/o', method='DELETE') + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '200 OK') + self.assertEqual(len(self.authorized), 1) + self.assertRequestEqual(req, self.authorized[0]) + + def test_POST_as_COPY_simple(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') + self.app.register('PUT', '/v1/a/c/o', swob.HTTPAccepted, {}) + req = Request.blank('/v1/a/c/o', method='POST') + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '202 Accepted') + self.assertEqual(len(self.authorized), 1) + self.assertRequestEqual(req, self.authorized[0]) + + def test_POST_as_COPY_201_return_202(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') + self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {}) + req = Request.blank('/v1/a/c/o', method='POST') + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '202 Accepted') + self.assertEqual(len(self.authorized), 1) + self.assertRequestEqual(req, self.authorized[0]) + + def test_POST_delete_at(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') + self.app.register('PUT', '/v1/a/c/o', swob.HTTPAccepted, {}) + t = str(int(time.time() + 100)) + req = Request.blank('/v1/a/c/o', method='POST', + headers={'Content-Type': 'foo/bar', + 'X-Delete-At': t}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '202 Accepted') + + calls = self.app.calls_with_headers + method, path, req_headers = calls[1] + self.assertEqual('PUT', method) + self.assertTrue('X-Delete-At' in req_headers) + self.assertEqual(req_headers['X-Delete-At'], str(t)) + self.assertEqual(len(self.authorized), 1) + self.assertRequestEqual(req, self.authorized[0]) + + def test_POST_as_COPY_static_large_object(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, + {'X-Static-Large-Object': True}, 'passed') + self.app.register('PUT', '/v1/a/c/o', swob.HTTPAccepted, {}) + req = Request.blank('/v1/a/c/o', method='POST', + headers={}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '202 Accepted') + + calls = self.app.calls_with_headers + method, path, req_headers = calls[1] + self.assertEqual('PUT', method) + self.assertNotIn('X-Static-Large-Object', req_headers) + self.assertEqual(len(self.authorized), 1) + self.assertRequestEqual(req, self.authorized[0]) + + def test_basic_put_with_x_copy_from(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') + self.app.register('PUT', '/v1/a/c/o2', swob.HTTPCreated, {}) + req = Request.blank('/v1/a/c/o2', environ={'REQUEST_METHOD': 'PUT'}, + headers={'Content-Length': '0', + 'X-Copy-From': 'c/o'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + self.assertTrue(('X-Copied-From', 'c/o') in headers) + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a/c/o2', self.authorized[1].path) + + def test_static_large_object(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, + {'X-Static-Large-Object': 'True'}, 'passed') + self.app.register('PUT', '/v1/a/c/o2?multipart-manifest=put', + swob.HTTPCreated, {}) + req = Request.blank('/v1/a/c/o2?multipart-manifest=get', + environ={'REQUEST_METHOD': 'PUT'}, + headers={'Content-Length': '0', + 'X-Copy-From': 'c/o'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + self.assertTrue(('X-Copied-From', 'c/o') in headers) + calls = self.app.calls_with_headers + method, path, req_headers = calls[1] + self.assertEqual('PUT', method) + self.assertEqual('/v1/a/c/o2?multipart-manifest=put', path) + self.assertNotIn('X-Static-Large-Object', req_headers) + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a/c/o2', self.authorized[1].path) + + def test_basic_put_with_x_copy_from_across_container(self): + self.app.register('GET', '/v1/a/c1/o1', swob.HTTPOk, {}, 'passed') + self.app.register('PUT', '/v1/a/c2/o2', swob.HTTPCreated, {}) + req = Request.blank('/v1/a/c2/o2', environ={'REQUEST_METHOD': 'PUT'}, + headers={'Content-Length': '0', + 'X-Copy-From': 'c1/o1'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + self.assertTrue(('X-Copied-From', 'c1/o1') in headers) + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c1/o1', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a/c2/o2', self.authorized[1].path) + + def test_basic_put_with_x_copy_from_across_container_and_account(self): + self.app.register('GET', '/v1/a1/c1/o1', swob.HTTPOk, {}, 'passed') + self.app.register('PUT', '/v1/a2/c2/o2', swob.HTTPCreated, {}, + 'passed') + req = Request.blank('/v1/a2/c2/o2', environ={'REQUEST_METHOD': 'PUT'}, + headers={'Content-Length': '0', + 'X-Copy-From': 'c1/o1', + 'X-Copy-From-Account': 'a1'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + self.assertTrue(('X-Copied-From', 'c1/o1') in headers) + self.assertTrue(('X-Copied-From-Account', 'a1') in headers) + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a1/c1/o1', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a2/c2/o2', self.authorized[1].path) + + def test_copy_non_zero_content_length(self): + req = Request.blank('/v1/a/c2/o2', environ={'REQUEST_METHOD': 'PUT'}, + headers={'Content-Length': '10', + 'X-Copy-From': 'c1/o1'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '400 Bad Request') + + def test_copy_non_zero_content_length_with_account(self): + req = Request.blank('/v1/a2/c2/o2', environ={'REQUEST_METHOD': 'PUT'}, + headers={'Content-Length': '10', + 'X-Copy-From': 'c1/o1', + 'X-Copy-From-Account': 'a1'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '400 Bad Request') + + def test_copy_with_slashes_in_x_copy_from(self): + self.app.register('GET', '/v1/a/c/o/o2', swob.HTTPOk, {}, 'passed') + self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {}) + req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, + headers={'Content-Length': '0', + 'X-Copy-From': 'c/o/o2'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + self.assertTrue(('X-Copied-From', 'c/o/o2') in headers) + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o/o2', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a/c/o', self.authorized[1].path) + + def test_copy_with_slashes_in_x_copy_from_and_account(self): + self.app.register('GET', '/v1/a1/c1/o/o1', swob.HTTPOk, {}, 'passed') + self.app.register('PUT', '/v1/a2/c2/o2', swob.HTTPCreated, {}) + req = Request.blank('/v1/a2/c2/o2', environ={'REQUEST_METHOD': 'PUT'}, + headers={'Content-Length': '0', + 'X-Copy-From': 'c1/o/o1', + 'X-Copy-From-Account': 'a1'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + self.assertTrue(('X-Copied-From', 'c1/o/o1') in headers) + self.assertTrue(('X-Copied-From-Account', 'a1') in headers) + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a1/c1/o/o1', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a2/c2/o2', self.authorized[1].path) + + def test_copy_with_spaces_in_x_copy_from(self): + self.app.register('GET', '/v1/a/c/o o2', swob.HTTPOk, {}, 'passed') + self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {}) + # space in soure path + req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, + headers={'Content-Length': '0', + 'X-Copy-From': 'c/o%20o2'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + calls = self.app.calls_with_headers + method, path, req_headers = calls[0] + self.assertEqual('GET', method) + self.assertEqual('/v1/a/c/o o2', path) + self.assertTrue(('X-Copied-From', 'c/o%20o2') in headers) + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o%20o2', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a/c/o', self.authorized[1].path) + + def test_copy_with_spaces_in_x_copy_from_and_account(self): + self.app.register('GET', '/v1/a/c/o o2', swob.HTTPOk, {}, 'passed') + self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {}) + # space in soure path + req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'}, + headers={'Content-Length': '0', + 'X-Copy-From': 'c/o%20o2', + 'X-Copy-From-Account': 'a'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + calls = self.app.calls_with_headers + method, path, req_headers = calls[0] + self.assertEqual('GET', method) + self.assertEqual('/v1/a/c/o o2', path) + self.assertTrue(('X-Copied-From', 'c/o%20o2') in headers) + self.assertTrue(('X-Copied-From-Account', 'a') in headers) + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o%20o2', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a1/c1/o', self.authorized[1].path) + + def test_copy_with_leading_slash_in_x_copy_from(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') + self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {}) + # repeat tests with leading / + req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, + headers={'Content-Length': '0', + 'X-Copy-From': '/c/o'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + calls = self.app.calls_with_headers + method, path, req_headers = calls[0] + self.assertEqual('GET', method) + self.assertEqual('/v1/a/c/o', path) + self.assertTrue(('X-Copied-From', 'c/o') in headers) + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a/c/o', self.authorized[1].path) + + def test_copy_with_leading_slash_in_x_copy_from_and_account(self): + # repeat tests with leading / + self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') + self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {}) + req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'}, + headers={'Content-Length': '0', + 'X-Copy-From': '/c/o', + 'X-Copy-From-Account': 'a'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + calls = self.app.calls_with_headers + method, path, req_headers = calls[0] + self.assertEqual('GET', method) + self.assertEqual('/v1/a/c/o', path) + self.assertTrue(('X-Copied-From', 'c/o') in headers) + self.assertTrue(('X-Copied-From-Account', 'a') in headers) + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a1/c1/o', self.authorized[1].path) + + def test_copy_with_leading_slash_and_slashes_in_x_copy_from(self): + self.app.register('GET', '/v1/a/c/o/o2', swob.HTTPOk, {}, 'passed') + self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {}) + req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, + headers={'Content-Length': '0', + 'X-Copy-From': '/c/o/o2'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + calls = self.app.calls_with_headers + method, path, req_headers = calls[0] + self.assertEqual('GET', method) + self.assertEqual('/v1/a/c/o/o2', path) + self.assertTrue(('X-Copied-From', 'c/o/o2') in headers) + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o/o2', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a/c/o', self.authorized[1].path) + + def test_copy_with_leading_slash_and_slashes_in_x_copy_from_acct(self): + self.app.register('GET', '/v1/a/c/o/o2', swob.HTTPOk, {}, 'passed') + self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {}) + req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'}, + headers={'Content-Length': '0', + 'X-Copy-From': '/c/o/o2', + 'X-Copy-From-Account': 'a'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + calls = self.app.calls_with_headers + method, path, req_headers = calls[0] + self.assertEqual('GET', method) + self.assertEqual('/v1/a/c/o/o2', path) + self.assertTrue(('X-Copied-From', 'c/o/o2') in headers) + self.assertTrue(('X-Copied-From-Account', 'a') in headers) + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o/o2', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a1/c1/o', self.authorized[1].path) + + def test_copy_with_no_object_in_x_copy_from(self): + req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, + headers={'Content-Length': '0', + 'X-Copy-From': '/c'}) + try: + status, headers, body = self.call_ssc(req) + except HTTPException as resp: + self.assertEqual("412 Precondition Failed", str(resp)) + else: + self.fail("Expecting HTTPException.") + + def test_copy_with_no_object_in_x_copy_from_and_account(self): + req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, + headers={'Content-Length': '0', + 'X-Copy-From': '/c', + 'X-Copy-From-Account': 'a'}) + try: + status, headers, body = self.call_ssc(req) + except HTTPException as resp: + self.assertEqual("412 Precondition Failed", str(resp)) + else: + self.fail("Expecting HTTPException.") + + def test_copy_with_bad_x_copy_from_account(self): + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'PUT'}, + headers={'Content-Length': '0', + 'X-Copy-From': '/c/o', + 'X-Copy-From-Account': '/i/am/bad'}) + try: + status, headers, body = self.call_ssc(req) + except HTTPException as resp: + self.assertEqual("412 Precondition Failed", str(resp)) + else: + self.fail("Expecting HTTPException.") + + def test_copy_server_error_reading_source(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPServiceUnavailable, {}) + req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, + headers={'Content-Length': '0', + 'X-Copy-From': '/c/o'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '503 Service Unavailable') + + def test_copy_server_error_reading_source_and_account(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPServiceUnavailable, {}) + req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'}, + headers={'Content-Length': '0', + 'X-Copy-From': '/c/o', + 'X-Copy-From-Account': 'a'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '503 Service Unavailable') + self.assertEqual(len(self.authorized), 1) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o', self.authorized[0].path) + + def test_copy_not_found_reading_source(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPNotFound, {}) + req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, + headers={'Content-Length': '0', + 'X-Copy-From': '/c/o'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '404 Not Found') + self.assertEqual(len(self.authorized), 1) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o', self.authorized[0].path) + + def test_copy_not_found_reading_source_and_account(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPNotFound, {}) + req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'}, + headers={'Content-Length': '0', + 'X-Copy-From': '/c/o', + 'X-Copy-From-Account': 'a'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '404 Not Found') + self.assertEqual(len(self.authorized), 1) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o', self.authorized[0].path) + + def test_copy_with_object_metadata(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') + self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {}) + req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, + headers={'Content-Length': '0', + 'X-Copy-From': '/c/o', + 'X-Object-Meta-Ours': 'okay'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + calls = self.app.calls_with_headers + method, path, req_headers = calls[1] + self.assertEqual('PUT', method) + self.assertEqual('/v1/a/c/o', path) + self.assertEqual(req_headers['X-Object-Meta-Ours'], 'okay') + self.assertTrue(('X-Object-Meta-Ours', 'okay') in headers) + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a/c/o', self.authorized[1].path) + + def test_copy_with_object_metadata_and_account(self): + self.app.register('GET', '/v1/a1/c/o', swob.HTTPOk, {}, 'passed') + self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {}) + req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, + headers={'Content-Length': '0', + 'X-Copy-From': '/c/o', + 'X-Object-Meta-Ours': 'okay', + 'X-Copy-From-Account': 'a1'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + calls = self.app.calls_with_headers + method, path, req_headers = calls[1] + self.assertEqual('PUT', method) + self.assertEqual('/v1/a/c/o', path) + self.assertEqual(req_headers['X-Object-Meta-Ours'], 'okay') + self.assertTrue(('X-Object-Meta-Ours', 'okay') in headers) + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a1/c/o', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a/c/o', self.authorized[1].path) + + def test_copy_source_larger_than_max_file_size(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, "largebody") + req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, + headers={'Content-Length': '0', + 'X-Copy-From': '/c/o'}) + with mock.patch('swift.common.middleware.copy.' + 'MAX_FILE_SIZE', 1): + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '413 Request Entity Too Large') + self.assertEqual(len(self.authorized), 1) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o', self.authorized[0].path) + + def test_basic_COPY(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') + self.app.register('PUT', '/v1/a/c/o-copy', swob.HTTPCreated, {}) + req = Request.blank( + '/v1/a/c/o', method='COPY', + headers={'Content-Length': 0, + 'Destination': 'c/o-copy'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + self.assertTrue(('X-Copied-From', 'c/o') in headers) + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a/c/o-copy', self.authorized[1].path) + + def test_COPY_no_destination_header(self): + req = Request.blank( + '/v1/a/c/o', method='COPY', headers={'Content-Length': 0}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '412 Precondition Failed') + self.assertEqual(len(self.authorized), 0) + + def test_basic_COPY_account(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') + self.app.register('PUT', '/v1/a1/c1/o2', swob.HTTPCreated, {}) + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'COPY'}, + headers={'Destination': 'c1/o2', + 'Destination-Account': 'a1'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + calls = self.app.calls_with_headers + method, path, req_headers = calls[0] + self.assertEqual('GET', method) + self.assertEqual('/v1/a/c/o', path) + method, path, req_headers = calls[1] + self.assertEqual('PUT', method) + self.assertEqual('/v1/a1/c1/o2', path) + self.assertTrue(('X-Copied-From', 'c/o') in headers) + self.assertTrue(('X-Copied-From-Account', 'a') in headers) + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a1/c1/o2', self.authorized[1].path) + + def test_COPY_across_containers(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') + self.app.register('PUT', '/v1/a/c2/o', swob.HTTPCreated, {}) + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'COPY'}, + headers={'Destination': 'c2/o'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + self.assertTrue(('X-Copied-From', 'c/o') in headers) + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a/c2/o', self.authorized[1].path) + + def test_COPY_source_with_slashes_in_name(self): + self.app.register('GET', '/v1/a/c/o/o2', swob.HTTPOk, {}, 'passed') + self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {}) + req = Request.blank('/v1/a/c/o/o2', + environ={'REQUEST_METHOD': 'COPY'}, + headers={'Destination': 'c/o'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + calls = self.app.calls_with_headers + method, path, req_headers = calls[1] + self.assertEqual('PUT', method) + self.assertEqual('/v1/a/c/o', path) + self.assertTrue(('X-Copied-From', 'c/o/o2') in headers) + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o/o2', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a/c/o', self.authorized[1].path) + + def test_COPY_account_source_with_slashes_in_name(self): + self.app.register('GET', '/v1/a/c/o/o2', swob.HTTPOk, {}, 'passed') + self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {}) + req = Request.blank('/v1/a/c/o/o2', + environ={'REQUEST_METHOD': 'COPY'}, + headers={'Destination': 'c1/o', + 'Destination-Account': 'a1'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + calls = self.app.calls_with_headers + method, path, req_headers = calls[1] + self.assertEqual('PUT', method) + self.assertEqual('/v1/a1/c1/o', path) + self.assertTrue(('X-Copied-From', 'c/o/o2') in headers) + self.assertTrue(('X-Copied-From-Account', 'a') in headers) + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o/o2', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a1/c1/o', self.authorized[1].path) + + def test_COPY_destination_leading_slash(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') + self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {}) + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'COPY'}, + headers={'Destination': '/c/o'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + self.assertTrue(('X-Copied-From', 'c/o') in headers) + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a/c/o', self.authorized[1].path) + + def test_COPY_account_destination_leading_slash(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') + self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {}) + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'COPY'}, + headers={'Destination': '/c1/o', + 'Destination-Account': 'a1'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + calls = self.app.calls_with_headers + method, path, req_headers = calls[1] + self.assertEqual('PUT', method) + self.assertEqual('/v1/a1/c1/o', path) + self.assertTrue(('X-Copied-From', 'c/o') in headers) + self.assertTrue(('X-Copied-From-Account', 'a') in headers) + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a1/c1/o', self.authorized[1].path) + + def test_COPY_source_with_slashes_destination_leading_slash(self): + self.app.register('GET', '/v1/a/c/o/o2', swob.HTTPOk, {}, 'passed') + self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {}) + req = Request.blank('/v1/a/c/o/o2', + environ={'REQUEST_METHOD': 'COPY'}, + headers={'Destination': '/c/o'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + calls = self.app.calls_with_headers + method, path, req_headers = calls[1] + self.assertEqual('PUT', method) + self.assertEqual('/v1/a/c/o', path) + self.assertTrue(('X-Copied-From', 'c/o/o2') in headers) + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o/o2', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a/c/o', self.authorized[1].path) + + def test_COPY_account_source_with_slashes_destination_leading_slash(self): + self.app.register('GET', '/v1/a/c/o/o2', swob.HTTPOk, {}, 'passed') + self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {}) + req = Request.blank('/v1/a/c/o/o2', + environ={'REQUEST_METHOD': 'COPY'}, + headers={'Destination': '/c1/o', + 'Destination-Account': 'a1'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + calls = self.app.calls_with_headers + method, path, req_headers = calls[1] + self.assertEqual('PUT', method) + self.assertEqual('/v1/a1/c1/o', path) + self.assertTrue(('X-Copied-From', 'c/o/o2') in headers) + self.assertTrue(('X-Copied-From-Account', 'a') in headers) + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o/o2', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a1/c1/o', self.authorized[1].path) + + def test_COPY_no_object_in_destination(self): + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'COPY'}, + headers={'Destination': 'c_o'}) + try: + status, headers, body = self.call_ssc(req) + except HTTPException as resp: + self.assertEqual("412 Precondition Failed", str(resp)) + else: + self.fail("Expecting HTTPException.") + + def test_COPY_account_no_object_in_destination(self): + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'COPY'}, + headers={'Destination': 'c_o', + 'Destination-Account': 'a1'}) + try: + status, headers, body = self.call_ssc(req) + except HTTPException as resp: + self.assertEqual("412 Precondition Failed", str(resp)) + else: + self.fail("Expecting HTTPException.") + + def test_COPY_account_bad_destination_account(self): + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'COPY'}, + headers={'Destination': '/c/o', + 'Destination-Account': '/i/am/bad'}) + try: + status, headers, body = self.call_ssc(req) + except HTTPException as resp: + self.assertEqual("412 Precondition Failed", str(resp)) + else: + self.fail("Expecting HTTPException.") + + def test_COPY_server_error_reading_source(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPServiceUnavailable, {}) + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'COPY'}, + headers={'Destination': '/c/o'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '503 Service Unavailable') + self.assertEqual(len(self.authorized), 1) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o', self.authorized[0].path) + + def test_COPY_account_server_error_reading_source(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPServiceUnavailable, {}) + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'COPY'}, + headers={'Destination': '/c1/o', + 'Destination-Account': 'a1'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '503 Service Unavailable') + self.assertEqual(len(self.authorized), 1) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o', self.authorized[0].path) + + def test_COPY_not_found_reading_source(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPNotFound, {}) + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'COPY'}, + headers={'Destination': '/c/o'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '404 Not Found') + self.assertEqual(len(self.authorized), 1) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o', self.authorized[0].path) + + def test_COPY_account_not_found_reading_source(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPNotFound, {}) + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'COPY'}, + headers={'Destination': '/c1/o', + 'Destination-Account': 'a1'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '404 Not Found') + self.assertEqual(len(self.authorized), 1) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o', self.authorized[0].path) + + def test_COPY_with_metadata(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, "passed") + self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {}) + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'COPY'}, + headers={'Destination': '/c/o', + 'X-Object-Meta-Ours': 'okay'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + calls = self.app.calls_with_headers + method, path, req_headers = calls[1] + self.assertEqual('PUT', method) + self.assertEqual('/v1/a/c/o', path) + self.assertEqual(req_headers['X-Object-Meta-Ours'], 'okay') + self.assertTrue(('X-Object-Meta-Ours', 'okay') in headers) + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a/c/o', self.authorized[1].path) + + def test_COPY_account_with_metadata(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, "passed") + self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {}) + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'COPY'}, + headers={'Destination': '/c1/o', + 'X-Object-Meta-Ours': 'okay', + 'Destination-Account': 'a1'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + calls = self.app.calls_with_headers + method, path, req_headers = calls[1] + self.assertEqual('PUT', method) + self.assertEqual('/v1/a1/c1/o', path) + self.assertEqual(req_headers['X-Object-Meta-Ours'], 'okay') + self.assertTrue(('X-Object-Meta-Ours', 'okay') in headers) + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a1/c1/o', self.authorized[1].path) + + def test_COPY_source_zero_content_length(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, None) + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'COPY'}, + headers={'Destination': '/c/o'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '413 Request Entity Too Large') + self.assertEqual(len(self.authorized), 1) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o', self.authorized[0].path) + + def test_COPY_source_larger_than_max_file_size(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, "largebody") + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'COPY'}, + headers={'Destination': '/c/o'}) + with mock.patch('swift.common.middleware.copy.' + 'MAX_FILE_SIZE', 1): + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '413 Request Entity Too Large') + self.assertEqual(len(self.authorized), 1) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o', self.authorized[0].path) + + def test_COPY_account_source_zero_content_length(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, None) + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'COPY'}, + headers={'Destination': '/c/o', + 'Destination-Account': 'a1'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '413 Request Entity Too Large') + self.assertEqual(len(self.authorized), 1) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o', self.authorized[0].path) + + def test_COPY_account_source_larger_than_max_file_size(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, "largebody") + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'COPY'}, + headers={'Destination': '/c1/o', + 'Destination-Account': 'a1'}) + with mock.patch('swift.common.middleware.copy.' + 'MAX_FILE_SIZE', 1): + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '413 Request Entity Too Large') + self.assertEqual(len(self.authorized), 1) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o', self.authorized[0].path) + + def test_COPY_newest(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, + {'Last-Modified': '123'}, "passed") + self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {}) + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'COPY'}, + headers={'Destination': '/c/o'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + self.assertTrue(('X-Copied-From-Last-Modified', '123') in headers) + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a/c/o', self.authorized[1].path) + + def test_COPY_account_newest(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, + {'Last-Modified': '123'}, "passed") + self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {}) + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'COPY'}, + headers={'Destination': '/c1/o', + 'Destination-Account': 'a1'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + self.assertTrue(('X-Copied-From-Last-Modified', '123') in headers) + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a1/c1/o', self.authorized[1].path) + + def test_COPY_in_OPTIONS_response(self): + self.app.register('OPTIONS', '/v1/a/c/o', swob.HTTPOk, + {'Allow': 'GET, PUT'}) + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'OPTIONS'}, headers={}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '200 OK') + calls = self.app.calls_with_headers + method, path, req_headers = calls[0] + self.assertEqual('OPTIONS', method) + self.assertEqual('/v1/a/c/o', path) + self.assertTrue(('Allow', 'GET, PUT, COPY') in headers) + self.assertEqual(len(self.authorized), 1) + self.assertEqual('OPTIONS', self.authorized[0].method) + self.assertEqual('/v1/a/c/o', self.authorized[0].path) + + def test_COPY_in_OPTIONS_response_CORS(self): + self.app.register('OPTIONS', '/v1/a/c/o', swob.HTTPOk, + {'Allow': 'GET, PUT', + 'Access-Control-Allow-Methods': 'GET, PUT'}) + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'OPTIONS'}, headers={}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '200 OK') + calls = self.app.calls_with_headers + method, path, req_headers = calls[0] + self.assertEqual('OPTIONS', method) + self.assertEqual('/v1/a/c/o', path) + self.assertTrue(('Allow', 'GET, PUT, COPY') in headers) + self.assertTrue(('Access-Control-Allow-Methods', + 'GET, PUT, COPY') in headers) + self.assertEqual(len(self.authorized), 1) + self.assertEqual('OPTIONS', self.authorized[0].method) + self.assertEqual('/v1/a/c/o', self.authorized[0].path) + + +class TestServerSideCopyConfiguration(unittest.TestCase): + + def setUp(self): + self.tmpdir = tempfile.mkdtemp() + + def tearDown(self): + shutil.rmtree(self.tmpdir) + + def test_reading_proxy_conf_when_no_middleware_conf_present(self): + proxy_conf = dedent(""" + [DEFAULT] + bind_ip = 10.4.5.6 + + [pipeline:main] + pipeline = catch_errors copy ye-olde-proxy-server + + [filter:copy] + use = egg:swift#copy + + [app:ye-olde-proxy-server] + use = egg:swift#proxy + object_post_as_copy = no + """) + + conffile = tempfile.NamedTemporaryFile() + conffile.write(proxy_conf) + conffile.flush() + + ssc = copy.filter_factory({ + '__file__': conffile.name + })("no app here") + + self.assertEqual(ssc.object_post_as_copy, False) + + def test_middleware_conf_precedence(self): + proxy_conf = dedent(""" + [DEFAULT] + bind_ip = 10.4.5.6 + + [pipeline:main] + pipeline = catch_errors copy ye-olde-proxy-server + + [filter:copy] + use = egg:swift#copy + object_post_as_copy = no + + [app:ye-olde-proxy-server] + use = egg:swift#proxy + object_post_as_copy = yes + """) + + conffile = tempfile.NamedTemporaryFile() + conffile.write(proxy_conf) + conffile.flush() + + ssc = copy.filter_factory({ + 'object_post_as_copy': 'no', + '__file__': conffile.name + })("no app here") + + self.assertEqual(ssc.object_post_as_copy, False) + + +@patch_policies(with_ec_default=True) +class TestServerSideCopyMiddlewareWithEC(unittest.TestCase): + container_info = { + 'write_acl': None, + 'read_acl': None, + 'storage_policy': None, + 'sync_key': None, + 'versions': None, + } + + def setUp(self): + self.logger = debug_logger('proxy-server') + self.logger.thread_locals = ('txn1', '127.0.0.2') + self.app = PatchedObjControllerApp( + None, FakeMemcache(), account_ring=FakeRing(), + container_ring=FakeRing(), logger=self.logger) + self.ssc = copy.filter_factory({ + 'object_post_as_copy': 'yes', + })(self.app) + self.ssc.logger = self.app.logger + self.policy = POLICIES.default + self.app.container_info = dict(self.container_info) + + def test_COPY_with_ranges(self): + req = swob.Request.blank( + '/v1/a/c/o', method='COPY', + headers={'Destination': 'c1/o', + 'Range': 'bytes=5-10'}) + # turn a real body into fragments + segment_size = self.policy.ec_segment_size + real_body = ('asdf' * segment_size)[:-10] + + # split it up into chunks + chunks = [real_body[x:x + segment_size] + for x in range(0, len(real_body), segment_size)] + + # we need only first chunk to rebuild 5-10 range + fragments = self.policy.pyeclib_driver.encode(chunks[0]) + fragment_payloads = [] + fragment_payloads.append(fragments) + + node_fragments = zip(*fragment_payloads) + self.assertEqual(len(node_fragments), + self.policy.object_ring.replicas) # sanity + headers = {'X-Object-Sysmeta-Ec-Content-Length': str(len(real_body))} + responses = [(200, ''.join(node_fragments[i]), headers) + for i in range(POLICIES.default.ec_ndata)] + responses += [(201, '', {})] * self.policy.object_ring.replicas + status_codes, body_iter, headers = zip(*responses) + expect_headers = { + 'X-Obj-Metadata-Footer': 'yes', + 'X-Obj-Multiphase-Commit': 'yes' + } + with set_http_connect(*status_codes, body_iter=body_iter, + headers=headers, expect_headers=expect_headers): + resp = req.get_response(self.ssc) + self.assertEqual(resp.status_int, 201) + + def test_COPY_with_invalid_ranges(self): + # real body size is segment_size - 10 (just 1 segment) + segment_size = self.policy.ec_segment_size + real_body = ('a' * segment_size)[:-10] + + # range is out of real body but in segment size + self._test_invalid_ranges('COPY', real_body, + segment_size, '%s-' % (segment_size - 10)) + # range is out of both real body and segment size + self._test_invalid_ranges('COPY', real_body, + segment_size, '%s-' % (segment_size + 10)) + + def _test_invalid_ranges(self, method, real_body, segment_size, req_range): + # make a request with range starts from more than real size. + body_etag = md5(real_body).hexdigest() + req = swob.Request.blank( + '/v1/a/c/o', method=method, + headers={'Destination': 'c1/o', + 'Range': 'bytes=%s' % (req_range)}) + + fragments = self.policy.pyeclib_driver.encode(real_body) + fragment_payloads = [fragments] + + node_fragments = zip(*fragment_payloads) + self.assertEqual(len(node_fragments), + self.policy.object_ring.replicas) # sanity + headers = {'X-Object-Sysmeta-Ec-Content-Length': str(len(real_body)), + 'X-Object-Sysmeta-Ec-Etag': body_etag} + start = int(req_range.split('-')[0]) + self.assertTrue(start >= 0) # sanity + title, exp = swob.RESPONSE_REASONS[416] + range_not_satisfiable_body = \ + '

%s

%s

' % (title, exp) + if start >= segment_size: + responses = [(416, range_not_satisfiable_body, headers) + for i in range(POLICIES.default.ec_ndata)] + else: + responses = [(200, ''.join(node_fragments[i]), headers) + for i in range(POLICIES.default.ec_ndata)] + status_codes, body_iter, headers = zip(*responses) + expect_headers = { + 'X-Obj-Metadata-Footer': 'yes', + 'X-Obj-Multiphase-Commit': 'yes' + } + # TODO possibly use FakeApp here + with set_http_connect(*status_codes, body_iter=body_iter, + headers=headers, expect_headers=expect_headers): + resp = req.get_response(self.ssc) + self.assertEqual(resp.status_int, 416) + self.assertEqual(resp.content_length, len(range_not_satisfiable_body)) + self.assertEqual(resp.body, range_not_satisfiable_body) + self.assertEqual(resp.etag, body_etag) + self.assertEqual(resp.headers['Accept-Ranges'], 'bytes') diff --git a/test/unit/common/middleware/test_dlo.py b/test/unit/common/middleware/test_dlo.py index 1374b403df..04fbc4e614 100644 --- a/test/unit/common/middleware/test_dlo.py +++ b/test/unit/common/middleware/test_dlo.py @@ -803,107 +803,6 @@ class TestDloGetManifest(DloTestCase): self.assertTrue(auth_got_called[0] > 1) -def fake_start_response(*args, **kwargs): - pass - - -class TestDloCopyHook(DloTestCase): - def setUp(self): - super(TestDloCopyHook, self).setUp() - - self.app.register( - 'GET', '/v1/AUTH_test/c/o1', swob.HTTPOk, - {'Content-Length': '10', 'Etag': 'o1-etag'}, - "aaaaaaaaaa") - self.app.register( - 'GET', '/v1/AUTH_test/c/o2', swob.HTTPOk, - {'Content-Length': '10', 'Etag': 'o2-etag'}, - "bbbbbbbbbb") - self.app.register( - 'GET', '/v1/AUTH_test/c/man', - swob.HTTPOk, {'X-Object-Manifest': 'c/o'}, - "manifest-contents") - - lm = '2013-11-22T02:42:13.781760' - ct = 'application/octet-stream' - segs = [{"hash": "o1-etag", "bytes": 10, "name": "o1", - "last_modified": lm, "content_type": ct}, - {"hash": "o2-etag", "bytes": 5, "name": "o2", - "last_modified": lm, "content_type": ct}] - - self.app.register( - 'GET', '/v1/AUTH_test/c?format=json&prefix=o', - swob.HTTPOk, {'Content-Type': 'application/json; charset=utf-8'}, - json.dumps(segs)) - - copy_hook = [None] - - # slip this guy in there to pull out the hook - def extract_copy_hook(env, sr): - copy_hook[0] = env.get('swift.copy_hook') - return self.app(env, sr) - - self.dlo = dlo.filter_factory({})(extract_copy_hook) - - req = swob.Request.blank('/v1/AUTH_test/c/o1', - environ={'REQUEST_METHOD': 'GET'}) - self.dlo(req.environ, fake_start_response) - self.copy_hook = copy_hook[0] - - self.assertTrue(self.copy_hook is not None) # sanity check - - def test_copy_hook_passthrough(self): - source_req = swob.Request.blank( - '/v1/AUTH_test/c/man', - environ={'REQUEST_METHOD': 'GET'}) - sink_req = swob.Request.blank( - '/v1/AUTH_test/c/man', - environ={'REQUEST_METHOD': 'PUT'}) - source_resp = swob.Response(request=source_req, status=200) - - # no X-Object-Manifest header, so do nothing - modified_resp = self.copy_hook(source_req, source_resp, sink_req) - self.assertTrue(modified_resp is source_resp) - - def test_copy_hook_manifest(self): - source_req = swob.Request.blank( - '/v1/AUTH_test/c/man', - environ={'REQUEST_METHOD': 'GET'}) - sink_req = swob.Request.blank( - '/v1/AUTH_test/c/man', - environ={'REQUEST_METHOD': 'PUT'}) - source_resp = swob.Response( - request=source_req, status=200, - headers={"X-Object-Manifest": "c/o"}, - app_iter=["manifest"]) - - # it's a manifest, so copy the segments to make a normal object - modified_resp = self.copy_hook(source_req, source_resp, sink_req) - self.assertTrue(modified_resp is not source_resp) - self.assertEqual(modified_resp.etag, - hashlib.md5("o1-etago2-etag").hexdigest()) - self.assertEqual(sink_req.headers.get('X-Object-Manifest'), None) - - def test_copy_hook_manifest_with_multipart_manifest_get(self): - source_req = swob.Request.blank( - '/v1/AUTH_test/c/man', - environ={'REQUEST_METHOD': 'GET', - 'QUERY_STRING': 'multipart-manifest=get'}) - sink_req = swob.Request.blank( - '/v1/AUTH_test/c/man', - environ={'REQUEST_METHOD': 'PUT'}) - source_resp = swob.Response( - request=source_req, status=200, - headers={"X-Object-Manifest": "c/o"}, - app_iter=["manifest"]) - - # make sure the sink request (the backend PUT) gets X-Object-Manifest - # on it, but that's all - modified_resp = self.copy_hook(source_req, source_resp, sink_req) - self.assertTrue(modified_resp is source_resp) - self.assertEqual(sink_req.headers.get('X-Object-Manifest'), 'c/o') - - class TestDloConfiguration(unittest.TestCase): """ For backwards compatibility, we will read a couple of values out of the diff --git a/test/unit/common/middleware/test_quotas.py b/test/unit/common/middleware/test_quotas.py index b71b78ed83..f99b8df663 100644 --- a/test/unit/common/middleware/test_quotas.py +++ b/test/unit/common/middleware/test_quotas.py @@ -15,8 +15,9 @@ import unittest -from swift.common.swob import Request, HTTPUnauthorized -from swift.common.middleware import container_quotas +from swift.common.swob import Request, HTTPUnauthorized, HTTPOk, HTTPException +from swift.common.middleware import container_quotas, copy +from test.unit.common.middleware.helpers import FakeSwift class FakeCache(object): @@ -95,32 +96,6 @@ class TestContainerQuotas(unittest.TestCase): self.assertEqual(res.status_int, 413) self.assertEqual(res.body, 'Upload exceeds quota.') - def test_exceed_bytes_quota_copy_from(self): - app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {}) - cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '2'}}) - - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'PUT', - 'swift.object/a/c2/o2': {'length': 10}, - 'swift.cache': cache}, - headers={'x-copy-from': '/c2/o2'}) - res = req.get_response(app) - self.assertEqual(res.status_int, 413) - self.assertEqual(res.body, 'Upload exceeds quota.') - - def test_exceed_bytes_quota_copy_verb(self): - app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {}) - cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '2'}}) - - req = Request.blank('/v1/a/c2/o2', - environ={'REQUEST_METHOD': 'COPY', - 'swift.object/a/c2/o2': {'length': 10}, - 'swift.cache': cache}, - headers={'Destination': '/c/o'}) - res = req.get_response(app) - self.assertEqual(res.status_int, 413) - self.assertEqual(res.body, 'Upload exceeds quota.') - def test_not_exceed_bytes_quota(self): app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {}) cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}}) @@ -131,60 +106,6 @@ class TestContainerQuotas(unittest.TestCase): res = req.get_response(app) self.assertEqual(res.status_int, 200) - def test_not_exceed_bytes_quota_copy_from(self): - app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {}) - cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}}) - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'PUT', - 'swift.object/a/c2/o2': {'length': 10}, - 'swift.cache': cache}, - headers={'x-copy-from': '/c2/o2'}) - res = req.get_response(app) - self.assertEqual(res.status_int, 200) - - def test_not_exceed_bytes_quota_copy_verb(self): - app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {}) - cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}}) - req = Request.blank('/v1/a/c2/o2', - environ={'REQUEST_METHOD': 'COPY', - 'swift.object/a/c2/o2': {'length': 10}, - 'swift.cache': cache}, - headers={'Destination': '/c/o'}) - res = req.get_response(app) - self.assertEqual(res.status_int, 200) - - def test_bytes_quota_copy_from_no_src(self): - app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {}) - cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}}) - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'PUT', - 'swift.object/a/c2/o2': {'length': 10}, - 'swift.cache': cache}, - headers={'x-copy-from': '/c2/o3'}) - res = req.get_response(app) - self.assertEqual(res.status_int, 200) - - def test_bytes_quota_copy_from_bad_src(self): - app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {}) - cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}}) - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'PUT', - 'swift.cache': cache}, - headers={'x-copy-from': 'bad_path'}) - res = req.get_response(app) - self.assertEqual(res.status_int, 412) - - def test_bytes_quota_copy_verb_no_src(self): - app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {}) - cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}}) - req = Request.blank('/v1/a/c2/o3', - environ={'REQUEST_METHOD': 'COPY', - 'swift.object/a/c2/o2': {'length': 10}, - 'swift.cache': cache}, - headers={'Destination': '/c/o'}) - res = req.get_response(app) - self.assertEqual(res.status_int, 200) - def test_exceed_counts_quota(self): app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {}) cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '1'}}) @@ -196,61 +117,6 @@ class TestContainerQuotas(unittest.TestCase): self.assertEqual(res.status_int, 413) self.assertEqual(res.body, 'Upload exceeds quota.') - def test_exceed_counts_quota_copy_from(self): - app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {}) - cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '1'}}) - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'PUT', - 'swift.object/a/c2/o2': {'length': 10}, - 'swift.cache': cache}, - headers={'x-copy-from': '/c2/o2'}) - res = req.get_response(app) - self.assertEqual(res.status_int, 413) - self.assertEqual(res.body, 'Upload exceeds quota.') - - def test_exceed_counts_quota_copy_verb(self): - app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {}) - cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '1'}}) - req = Request.blank('/v1/a/c2/o2', - environ={'REQUEST_METHOD': 'COPY', - 'swift.cache': cache}, - headers={'Destination': '/c/o'}) - res = req.get_response(app) - self.assertEqual(res.status_int, 413) - self.assertEqual(res.body, 'Upload exceeds quota.') - - def test_exceed_counts_quota_copy_cross_account_verb(self): - app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {}) - a_c_cache = {'storage_policy': '0', 'meta': {'quota-count': '2'}, - 'status': 200, 'object_count': 1} - a2_c_cache = {'storage_policy': '0', 'meta': {'quota-count': '1'}, - 'status': 200, 'object_count': 1} - req = Request.blank('/v1/a/c2/o2', - environ={'REQUEST_METHOD': 'COPY', - 'swift.container/a/c': a_c_cache, - 'swift.container/a2/c': a2_c_cache}, - headers={'Destination': '/c/o', - 'Destination-Account': 'a2'}) - res = req.get_response(app) - self.assertEqual(res.status_int, 413) - self.assertEqual(res.body, 'Upload exceeds quota.') - - def test_exceed_counts_quota_copy_cross_account_PUT_verb(self): - app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {}) - a_c_cache = {'storage_policy': '0', 'meta': {'quota-count': '2'}, - 'status': 200, 'object_count': 1} - a2_c_cache = {'storage_policy': '0', 'meta': {'quota-count': '1'}, - 'status': 200, 'object_count': 1} - req = Request.blank('/v1/a2/c/o', - environ={'REQUEST_METHOD': 'PUT', - 'swift.container/a/c': a_c_cache, - 'swift.container/a2/c': a2_c_cache}, - headers={'X-Copy-From': '/c2/o2', - 'X-Copy-From-Account': 'a'}) - res = req.get_response(app) - self.assertEqual(res.status_int, 413) - self.assertEqual(res.body, 'Upload exceeds quota.') - def test_not_exceed_counts_quota(self): app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {}) cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '2'}}) @@ -261,26 +127,6 @@ class TestContainerQuotas(unittest.TestCase): res = req.get_response(app) self.assertEqual(res.status_int, 200) - def test_not_exceed_counts_quota_copy_from(self): - app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {}) - cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '2'}}) - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'PUT', - 'swift.cache': cache}, - headers={'x-copy-from': '/c2/o2'}) - res = req.get_response(app) - self.assertEqual(res.status_int, 200) - - def test_not_exceed_counts_quota_copy_verb(self): - app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {}) - cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '2'}}) - req = Request.blank('/v1/a/c2/o2', - environ={'REQUEST_METHOD': 'COPY', - 'swift.cache': cache}, - headers={'Destination': '/c/o'}) - res = req.get_response(app) - self.assertEqual(res.status_int, 200) - def test_invalid_quotas(self): req = Request.blank( '/v1/a/c', @@ -346,5 +192,168 @@ class TestContainerQuotas(unittest.TestCase): res = req.get_response(app) self.assertEqual(res.status_int, 401) + +class ContainerQuotaCopyingTestCases(unittest.TestCase): + + def setUp(self): + self.app = FakeSwift() + self.cq_filter = container_quotas.filter_factory({})(self.app) + self.copy_filter = copy.filter_factory({})(self.cq_filter) + + def test_exceed_bytes_quota_copy_verb(self): + cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '2'}}) + self.app.register('GET', '/v1/a/c2/o2', HTTPOk, + {'Content-Length': '10'}, 'passed') + + req = Request.blank('/v1/a/c2/o2', + environ={'REQUEST_METHOD': 'COPY', + 'swift.cache': cache}, + headers={'Destination': '/c/o'}) + res = req.get_response(self.copy_filter) + self.assertEqual(res.status_int, 413) + self.assertEqual(res.body, 'Upload exceeds quota.') + + def test_not_exceed_bytes_quota_copy_verb(self): + self.app.register('GET', '/v1/a/c2/o2', HTTPOk, + {'Content-Length': '10'}, 'passed') + self.app.register( + 'PUT', '/v1/a/c/o', HTTPOk, {}, 'passed') + cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}}) + req = Request.blank('/v1/a/c2/o2', + environ={'REQUEST_METHOD': 'COPY', + 'swift.cache': cache}, + headers={'Destination': '/c/o'}) + res = req.get_response(self.copy_filter) + self.assertEqual(res.status_int, 200) + + def test_exceed_counts_quota_copy_verb(self): + self.app.register('GET', '/v1/a/c2/o2', HTTPOk, {}, 'passed') + cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '1'}}) + req = Request.blank('/v1/a/c2/o2', + environ={'REQUEST_METHOD': 'COPY', + 'swift.cache': cache}, + headers={'Destination': '/c/o'}) + res = req.get_response(self.copy_filter) + self.assertEqual(res.status_int, 413) + self.assertEqual(res.body, 'Upload exceeds quota.') + + def test_exceed_counts_quota_copy_cross_account_verb(self): + self.app.register('GET', '/v1/a/c2/o2', HTTPOk, {}, 'passed') + a_c_cache = {'storage_policy': '0', 'meta': {'quota-count': '2'}, + 'status': 200, 'object_count': 1} + a2_c_cache = {'storage_policy': '0', 'meta': {'quota-count': '1'}, + 'status': 200, 'object_count': 1} + req = Request.blank('/v1/a/c2/o2', + environ={'REQUEST_METHOD': 'COPY', + 'swift.container/a/c': a_c_cache, + 'swift.container/a2/c': a2_c_cache}, + headers={'Destination': '/c/o', + 'Destination-Account': 'a2'}) + res = req.get_response(self.copy_filter) + self.assertEqual(res.status_int, 413) + self.assertEqual(res.body, 'Upload exceeds quota.') + + def test_exceed_counts_quota_copy_cross_account_PUT_verb(self): + self.app.register('GET', '/v1/a/c2/o2', HTTPOk, {}, 'passed') + a_c_cache = {'storage_policy': '0', 'meta': {'quota-count': '2'}, + 'status': 200, 'object_count': 1} + a2_c_cache = {'storage_policy': '0', 'meta': {'quota-count': '1'}, + 'status': 200, 'object_count': 1} + req = Request.blank('/v1/a2/c/o', + environ={'REQUEST_METHOD': 'PUT', + 'swift.container/a/c': a_c_cache, + 'swift.container/a2/c': a2_c_cache}, + headers={'X-Copy-From': '/c2/o2', + 'X-Copy-From-Account': 'a'}) + res = req.get_response(self.copy_filter) + self.assertEqual(res.status_int, 413) + self.assertEqual(res.body, 'Upload exceeds quota.') + + def test_exceed_bytes_quota_copy_from(self): + self.app.register('GET', '/v1/a/c2/o2', HTTPOk, + {'Content-Length': '10'}, 'passed') + cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '2'}}) + + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'PUT', + 'swift.cache': cache}, + headers={'x-copy-from': '/c2/o2'}) + res = req.get_response(self.copy_filter) + self.assertEqual(res.status_int, 413) + self.assertEqual(res.body, 'Upload exceeds quota.') + + def test_not_exceed_bytes_quota_copy_from(self): + self.app.register('GET', '/v1/a/c2/o2', HTTPOk, + {'Content-Length': '10'}, 'passed') + self.app.register( + 'PUT', '/v1/a/c/o', HTTPOk, {}, 'passed') + cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}}) + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'PUT', + 'swift.cache': cache}, + headers={'x-copy-from': '/c2/o2'}) + res = req.get_response(self.copy_filter) + self.assertEqual(res.status_int, 200) + + def test_bytes_quota_copy_from_no_src(self): + self.app.register('GET', '/v1/a/c2/o3', HTTPOk, {}, 'passed') + self.app.register( + 'PUT', '/v1/a/c/o', HTTPOk, {}, 'passed') + cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}}) + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'PUT', + 'swift.cache': cache}, + headers={'x-copy-from': '/c2/o3'}) + res = req.get_response(self.copy_filter) + self.assertEqual(res.status_int, 200) + + def test_bytes_quota_copy_from_bad_src(self): + cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}}) + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'PUT', + 'swift.cache': cache}, + headers={'x-copy-from': 'bad_path'}) + with self.assertRaises(HTTPException) as catcher: + req.get_response(self.copy_filter) + self.assertEqual(412, catcher.exception.status_int) + + def test_exceed_counts_quota_copy_from(self): + self.app.register('GET', '/v1/a/c2/o2', HTTPOk, + {'Content-Length': '10'}, 'passed') + cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '1'}}) + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'PUT', + 'swift.cache': cache}, + headers={'x-copy-from': '/c2/o2'}) + res = req.get_response(self.copy_filter) + self.assertEqual(res.status_int, 413) + self.assertEqual(res.body, 'Upload exceeds quota.') + + def test_not_exceed_counts_quota_copy_from(self): + self.app.register('GET', '/v1/a/c2/o2', HTTPOk, + {'Content-Length': '10'}, 'passed') + self.app.register( + 'PUT', '/v1/a/c/o', HTTPOk, {}, 'passed') + cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '2'}}) + req = Request.blank('/v1/a/c/o', + environ={'REQUEST_METHOD': 'PUT', + 'swift.cache': cache}, + headers={'x-copy-from': '/c2/o2'}) + res = req.get_response(self.copy_filter) + self.assertEqual(res.status_int, 200) + + def test_not_exceed_counts_quota_copy_verb(self): + self.app.register('GET', '/v1/a/c2/o2', HTTPOk, + {'Content-Length': '10'}, 'passed') + self.app.register( + 'PUT', '/v1/a/c/o', HTTPOk, {}, 'passed') + cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '2'}}) + req = Request.blank('/v1/a/c2/o2', + environ={'REQUEST_METHOD': 'COPY', + 'swift.cache': cache}, + headers={'Destination': '/c/o'}) + res = req.get_response(self.copy_filter) + self.assertEqual(res.status_int, 200) + if __name__ == '__main__': unittest.main() diff --git a/test/unit/common/middleware/test_slo.py b/test/unit/common/middleware/test_slo.py index 03f5c23213..79eaddcbf3 100644 --- a/test/unit/common/middleware/test_slo.py +++ b/test/unit/common/middleware/test_slo.py @@ -26,7 +26,7 @@ from swift.common import swob, utils from swift.common.exceptions import ListingIterError, SegmentError from swift.common.header_key_dict import HeaderKeyDict from swift.common.middleware import slo -from swift.common.swob import Request, Response, HTTPException +from swift.common.swob import Request, HTTPException from swift.common.utils import quote, closing_if_possible, close_if_possible from test.unit.common.middleware.helpers import FakeSwift @@ -2653,70 +2653,6 @@ class TestSloBulkLogger(unittest.TestCase): self.assertTrue(slo_mware.logger is slo_mware.bulk_deleter.logger) -class TestSloCopyHook(SloTestCase): - def setUp(self): - super(TestSloCopyHook, self).setUp() - - self.app.register( - 'GET', '/v1/AUTH_test/c/o', swob.HTTPOk, - {'Content-Length': '3', 'Etag': md5hex("obj")}, "obj") - self.app.register( - 'GET', '/v1/AUTH_test/c/man', - swob.HTTPOk, {'Content-Type': 'application/json', - 'X-Static-Large-Object': 'true'}, - json.dumps([{'name': '/c/o', 'hash': md5hex("obj"), - 'bytes': '3'}])) - self.app.register( - 'COPY', '/v1/AUTH_test/c/o', swob.HTTPCreated, {}) - - copy_hook = [None] - - # slip this guy in there to pull out the hook - def extract_copy_hook(env, sr): - if env['REQUEST_METHOD'] == 'COPY': - copy_hook[0] = env['swift.copy_hook'] - return self.app(env, sr) - - self.slo = slo.filter_factory({})(extract_copy_hook) - - req = Request.blank('/v1/AUTH_test/c/o', - environ={'REQUEST_METHOD': 'COPY'}) - self.slo(req.environ, fake_start_response) - self.copy_hook = copy_hook[0] - - self.assertTrue(self.copy_hook is not None) # sanity check - - def test_copy_hook_passthrough(self): - source_req = Request.blank( - '/v1/AUTH_test/c/o', - environ={'REQUEST_METHOD': 'GET'}) - sink_req = Request.blank( - '/v1/AUTH_test/c/o', - environ={'REQUEST_METHOD': 'PUT'}) - # no X-Static-Large-Object header, so do nothing - source_resp = Response(request=source_req, status=200) - - modified_resp = self.copy_hook(source_req, source_resp, sink_req) - self.assertTrue(modified_resp is source_resp) - - def test_copy_hook_manifest(self): - source_req = Request.blank( - '/v1/AUTH_test/c/o', - environ={'REQUEST_METHOD': 'GET'}) - sink_req = Request.blank( - '/v1/AUTH_test/c/o', - environ={'REQUEST_METHOD': 'PUT'}) - source_resp = Response(request=source_req, status=200, - headers={"X-Static-Large-Object": "true"}, - app_iter=[json.dumps([{'name': '/c/o', - 'hash': md5hex("obj"), - 'bytes': '3'}])]) - - modified_resp = self.copy_hook(source_req, source_resp, sink_req) - self.assertTrue(modified_resp is not source_resp) - self.assertEqual(modified_resp.etag, md5hex(md5hex("obj"))) - - class TestSwiftInfo(unittest.TestCase): def setUp(self): utils._swift_info = {} diff --git a/test/unit/common/middleware/test_versioned_writes.py b/test/unit/common/middleware/test_versioned_writes.py index c6da47fde8..27b8914555 100644 --- a/test/unit/common/middleware/test_versioned_writes.py +++ b/test/unit/common/middleware/test_versioned_writes.py @@ -19,7 +19,7 @@ import os import time import unittest from swift.common import swob -from swift.common.middleware import versioned_writes +from swift.common.middleware import versioned_writes, copy from swift.common.swob import Request from test.unit.common.middleware.helpers import FakeSwift @@ -259,6 +259,23 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): self.assertEqual(len(self.authorized), 1) self.assertRequestEqual(req, self.authorized[0]) + def test_put_object_post_as_copy(self): + # PUTs due to a post-as-copy should NOT cause a versioning op + self.app.register( + 'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, 'passed') + + cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) + req = Request.blank( + '/v1/a/c/o', + environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache, + 'CONTENT_LENGTH': '100', + 'swift.post_as_copy': True}) + status, headers, body = self.call_vw(req) + self.assertEqual(status, '201 Created') + self.assertEqual(len(self.authorized), 1) + self.assertRequestEqual(req, self.authorized[0]) + self.assertEqual(1, self.app.call_count) + def test_put_first_object_success(self): self.app.register( 'PUT', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') @@ -333,7 +350,7 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): def test_delete_object_no_versioning_with_container_config_true(self): # set False to versions_write obviously and expect no GET versioning - # container and PUT called (just delete object as normal) + # container and GET/PUT called (just delete object as normal) self.vw.conf = {'allow_versioned_writes': 'false'} self.app.register( 'DELETE', '/v1/a/c/o', swob.HTTPNoContent, {}, 'passed') @@ -351,25 +368,6 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): self.assertTrue('GET' not in called_method) self.assertEqual(1, self.app.call_count) - def test_copy_object_no_versioning_with_container_config_true(self): - # set False to versions_write obviously and expect no extra - # COPY called (just copy object as normal) - self.vw.conf = {'allow_versioned_writes': 'false'} - self.app.register( - 'COPY', '/v1/a/c/o', swob.HTTPCreated, {}, None) - cache = FakeCache({'versions': 'ver_cont'}) - req = Request.blank( - '/v1/a/c/o', - environ={'REQUEST_METHOD': 'COPY', 'swift.cache': cache}) - status, headers, body = self.call_vw(req) - self.assertEqual(status, '201 Created') - self.assertEqual(len(self.authorized), 1) - self.assertRequestEqual(req, self.authorized[0]) - called_method = \ - [method for (method, path, rheaders) in self.app._calls] - self.assertTrue('COPY' in called_method) - self.assertEqual(called_method.count('COPY'), 1) - def test_new_version_success(self): self.app.register( 'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, 'passed') @@ -476,77 +474,6 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): self.assertEqual('PUT', method) self.assertEqual('/v1/a/ver_cont/001o/0000000000.00000', path) - def test_copy_first_version(self): - self.app.register( - 'COPY', '/v1/a/src_cont/src_obj', swob.HTTPOk, {}, 'passed') - self.app.register( - 'GET', '/v1/a/tgt_cont/tgt_obj', swob.HTTPNotFound, {}, None) - cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) - req = Request.blank( - '/v1/a/src_cont/src_obj', - environ={'REQUEST_METHOD': 'COPY', 'swift.cache': cache, - 'CONTENT_LENGTH': '100'}, - headers={'Destination': 'tgt_cont/tgt_obj'}) - status, headers, body = self.call_vw(req) - self.assertEqual(status, '200 OK') - self.assertEqual(len(self.authorized), 1) - self.assertRequestEqual(req, self.authorized[0]) - self.assertEqual(2, self.app.call_count) - - def test_copy_new_version(self): - self.app.register( - 'COPY', '/v1/a/src_cont/src_obj', swob.HTTPOk, {}, 'passed') - self.app.register( - 'GET', '/v1/a/tgt_cont/tgt_obj', swob.HTTPOk, - {'last-modified': 'Thu, 1 Jan 1970 00:00:01 GMT'}, 'passed') - self.app.register( - 'PUT', '/v1/a/ver_cont/007tgt_obj/0000000001.00000', swob.HTTPOk, - {}, None) - cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) - req = Request.blank( - '/v1/a/src_cont/src_obj', - environ={'REQUEST_METHOD': 'COPY', 'swift.cache': cache, - 'CONTENT_LENGTH': '100'}, - headers={'Destination': 'tgt_cont/tgt_obj'}) - status, headers, body = self.call_vw(req) - self.assertEqual(status, '200 OK') - self.assertEqual(len(self.authorized), 1) - self.assertRequestEqual(req, self.authorized[0]) - self.assertEqual(3, self.app.call_count) - - def test_copy_new_version_different_account(self): - self.app.register( - 'COPY', '/v1/src_a/src_cont/src_obj', swob.HTTPOk, {}, 'passed') - self.app.register( - 'GET', '/v1/tgt_a/tgt_cont/tgt_obj', swob.HTTPOk, - {'last-modified': 'Thu, 1 Jan 1970 00:00:01 GMT'}, 'passed') - self.app.register( - 'PUT', '/v1/tgt_a/ver_cont/007tgt_obj/0000000001.00000', - swob.HTTPOk, {}, None) - cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) - req = Request.blank( - '/v1/src_a/src_cont/src_obj', - environ={'REQUEST_METHOD': 'COPY', 'swift.cache': cache, - 'CONTENT_LENGTH': '100'}, - headers={'Destination': 'tgt_cont/tgt_obj', - 'Destination-Account': 'tgt_a'}) - status, headers, body = self.call_vw(req) - self.assertEqual(status, '200 OK') - self.assertEqual(len(self.authorized), 1) - self.assertRequestEqual(req, self.authorized[0]) - self.assertEqual(3, self.app.call_count) - - def test_copy_new_version_bogus_account(self): - cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) - req = Request.blank( - '/v1/src_a/src_cont/src_obj', - environ={'REQUEST_METHOD': 'COPY', 'swift.cache': cache, - 'CONTENT_LENGTH': '100'}, - headers={'Destination': 'tgt_cont/tgt_obj', - 'Destination-Account': '/im/on/a/boat'}) - status, headers, body = self.call_vw(req) - self.assertEqual(status, '412 Precondition Failed') - def test_delete_first_object_success(self): self.app.register( 'DELETE', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') @@ -1057,3 +984,117 @@ class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase): ('PUT', '/v1/a/c/o'), ('DELETE', '/v1/a/ver_cont/001o/2'), ]) + + +class VersionedWritesCopyingTestCase(VersionedWritesBaseTestCase): + # verify interaction of copy and versioned_writes middlewares + + def setUp(self): + self.app = FakeSwift() + conf = {'allow_versioned_writes': 'true'} + self.vw = versioned_writes.filter_factory(conf)(self.app) + self.filter = copy.filter_factory({})(self.vw) + + def call_filter(self, req, **kwargs): + return self.call_app(req, app=self.filter, **kwargs) + + def test_copy_first_version(self): + # no existing object to move to the versions container + self.app.register( + 'GET', '/v1/a/tgt_cont/tgt_obj', swob.HTTPNotFound, {}, None) + self.app.register( + 'GET', '/v1/a/src_cont/src_obj', swob.HTTPOk, {}, 'passed') + self.app.register( + 'PUT', '/v1/a/tgt_cont/tgt_obj', swob.HTTPCreated, {}, 'passed') + cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) + req = Request.blank( + '/v1/a/src_cont/src_obj', + environ={'REQUEST_METHOD': 'COPY', 'swift.cache': cache, + 'CONTENT_LENGTH': '100'}, + headers={'Destination': 'tgt_cont/tgt_obj'}) + status, headers, body = self.call_filter(req) + self.assertEqual(status, '201 Created') + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/src_cont/src_obj', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a/tgt_cont/tgt_obj', self.authorized[1].path) + # note the GET on tgt_cont/tgt_obj is pre-authed + self.assertEqual(3, self.app.call_count, self.app.calls) + + def test_copy_new_version(self): + # existing object should be moved to versions container + self.app.register( + 'GET', '/v1/a/src_cont/src_obj', swob.HTTPOk, {}, 'passed') + self.app.register( + 'GET', '/v1/a/tgt_cont/tgt_obj', swob.HTTPOk, + {'last-modified': 'Thu, 1 Jan 1970 00:00:01 GMT'}, 'passed') + self.app.register( + 'PUT', '/v1/a/ver_cont/007tgt_obj/0000000001.00000', swob.HTTPOk, + {}, None) + self.app.register( + 'PUT', '/v1/a/tgt_cont/tgt_obj', swob.HTTPCreated, {}, 'passed') + cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) + req = Request.blank( + '/v1/a/src_cont/src_obj', + environ={'REQUEST_METHOD': 'COPY', 'swift.cache': cache, + 'CONTENT_LENGTH': '100'}, + headers={'Destination': 'tgt_cont/tgt_obj'}) + status, headers, body = self.call_filter(req) + self.assertEqual(status, '201 Created') + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/src_cont/src_obj', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a/tgt_cont/tgt_obj', self.authorized[1].path) + self.assertEqual(4, self.app.call_count) + + def test_copy_new_version_different_account(self): + self.app.register( + 'GET', '/v1/src_a/src_cont/src_obj', swob.HTTPOk, {}, 'passed') + self.app.register( + 'GET', '/v1/tgt_a/tgt_cont/tgt_obj', swob.HTTPOk, + {'last-modified': 'Thu, 1 Jan 1970 00:00:01 GMT'}, 'passed') + self.app.register( + 'PUT', '/v1/tgt_a/ver_cont/007tgt_obj/0000000001.00000', + swob.HTTPOk, {}, None) + self.app.register( + 'PUT', '/v1/tgt_a/tgt_cont/tgt_obj', swob.HTTPCreated, {}, + 'passed') + cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) + req = Request.blank( + '/v1/src_a/src_cont/src_obj', + environ={'REQUEST_METHOD': 'COPY', 'swift.cache': cache, + 'CONTENT_LENGTH': '100'}, + headers={'Destination': 'tgt_cont/tgt_obj', + 'Destination-Account': 'tgt_a'}) + status, headers, body = self.call_filter(req) + self.assertEqual(status, '201 Created') + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/src_a/src_cont/src_obj', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/tgt_a/tgt_cont/tgt_obj', self.authorized[1].path) + self.assertEqual(4, self.app.call_count) + + def test_copy_object_no_versioning_with_container_config_true(self): + # set False to versions_write obviously and expect no extra + # COPY called (just copy object as normal) + self.vw.conf = {'allow_versioned_writes': 'false'} + self.app.register( + 'GET', '/v1/a/src_cont/src_obj', swob.HTTPOk, {}, 'passed') + self.app.register( + 'PUT', '/v1/a/tgt_cont/tgt_obj', swob.HTTPCreated, {}, 'passed') + cache = FakeCache({'versions': 'ver_cont'}) + req = Request.blank( + '/v1/a/src_cont/src_obj', + environ={'REQUEST_METHOD': 'COPY', 'swift.cache': cache}, + headers={'Destination': '/tgt_cont/tgt_obj'}) + status, headers, body = self.call_filter(req) + self.assertEqual(status, '201 Created') + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/src_cont/src_obj', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a/tgt_cont/tgt_obj', self.authorized[1].path) + self.assertEqual(2, self.app.call_count) diff --git a/test/unit/common/test_constraints.py b/test/unit/common/test_constraints.py index 2f7fb85d9b..f9829d81d5 100644 --- a/test/unit/common/test_constraints.py +++ b/test/unit/common/test_constraints.py @@ -173,33 +173,6 @@ class TestConstraints(unittest.TestCase): '/', headers=headers), 'object_name').status_int, HTTP_NOT_IMPLEMENTED) - def test_check_object_creation_copy(self): - headers = {'Content-Length': '0', - 'X-Copy-From': 'c/o2', - 'Content-Type': 'text/plain'} - self.assertEqual(constraints.check_object_creation(Request.blank( - '/', headers=headers), 'object_name'), None) - - headers = {'Content-Length': '1', - 'X-Copy-From': 'c/o2', - 'Content-Type': 'text/plain'} - self.assertEqual(constraints.check_object_creation(Request.blank( - '/', headers=headers), 'object_name').status_int, - HTTP_BAD_REQUEST) - - headers = {'Transfer-Encoding': 'chunked', - 'X-Copy-From': 'c/o2', - 'Content-Type': 'text/plain'} - self.assertEqual(constraints.check_object_creation(Request.blank( - '/', headers=headers), 'object_name'), None) - - # a content-length header is always required - headers = {'X-Copy-From': 'c/o2', - 'Content-Type': 'text/plain'} - self.assertEqual(constraints.check_object_creation(Request.blank( - '/', headers=headers), 'object_name').status_int, - HTTP_LENGTH_REQUIRED) - def test_check_object_creation_name_length(self): headers = {'Transfer-Encoding': 'chunked', 'Content-Type': 'text/plain'} @@ -459,60 +432,6 @@ class TestConstraints(unittest.TestCase): self.assertTrue(c.MAX_HEADER_SIZE > c.MAX_META_NAME_LENGTH) self.assertTrue(c.MAX_HEADER_SIZE > c.MAX_META_VALUE_LENGTH) - def test_validate_copy_from(self): - req = Request.blank( - '/v/a/c/o', - headers={'x-copy-from': 'c/o2'}) - src_cont, src_obj = constraints.check_copy_from_header(req) - self.assertEqual(src_cont, 'c') - self.assertEqual(src_obj, 'o2') - req = Request.blank( - '/v/a/c/o', - headers={'x-copy-from': 'c/subdir/o2'}) - src_cont, src_obj = constraints.check_copy_from_header(req) - self.assertEqual(src_cont, 'c') - self.assertEqual(src_obj, 'subdir/o2') - req = Request.blank( - '/v/a/c/o', - headers={'x-copy-from': '/c/o2'}) - src_cont, src_obj = constraints.check_copy_from_header(req) - self.assertEqual(src_cont, 'c') - self.assertEqual(src_obj, 'o2') - - def test_validate_bad_copy_from(self): - req = Request.blank( - '/v/a/c/o', - headers={'x-copy-from': 'bad_object'}) - self.assertRaises(HTTPException, - constraints.check_copy_from_header, req) - - def test_validate_destination(self): - req = Request.blank( - '/v/a/c/o', - headers={'destination': 'c/o2'}) - src_cont, src_obj = constraints.check_destination_header(req) - self.assertEqual(src_cont, 'c') - self.assertEqual(src_obj, 'o2') - req = Request.blank( - '/v/a/c/o', - headers={'destination': 'c/subdir/o2'}) - src_cont, src_obj = constraints.check_destination_header(req) - self.assertEqual(src_cont, 'c') - self.assertEqual(src_obj, 'subdir/o2') - req = Request.blank( - '/v/a/c/o', - headers={'destination': '/c/o2'}) - src_cont, src_obj = constraints.check_destination_header(req) - self.assertEqual(src_cont, 'c') - self.assertEqual(src_obj, 'o2') - - def test_validate_bad_destination(self): - req = Request.blank( - '/v/a/c/o', - headers={'destination': 'bad_object'}) - self.assertRaises(HTTPException, - constraints.check_destination_header, req) - def test_check_account_format(self): req = Request.blank( '/v/a/c/o', diff --git a/test/unit/common/test_swob.py b/test/unit/common/test_swob.py index 4f8d8f7be9..f1a11e1fcb 100644 --- a/test/unit/common/test_swob.py +++ b/test/unit/common/test_swob.py @@ -431,9 +431,10 @@ class TestRequest(unittest.TestCase): def test_invalid_req_environ_property_args(self): # getter only property try: - swift.common.swob.Request.blank('/', params={'a': 'b'}) + swift.common.swob.Request.blank( + '/', host_url='http://example.com:8080/v1/a/c/o') except TypeError as e: - self.assertEqual("got unexpected keyword argument 'params'", + self.assertEqual("got unexpected keyword argument 'host_url'", str(e)) else: self.assertTrue(False, "invalid req_environ_property " @@ -525,6 +526,14 @@ class TestRequest(unittest.TestCase): self.assertEqual(req.params['a'], 'b') self.assertEqual(req.params['c'], 'd') + new_params = {'e': 'f', 'g': 'h'} + req.params = new_params + self.assertDictEqual(new_params, req.params) + + new_params = (('i', 'j'), ('k', 'l')) + req.params = new_params + self.assertDictEqual(dict(new_params), req.params) + def test_timestamp_missing(self): req = swift.common.swob.Request.blank('/') self.assertRaises(exceptions.InvalidTimestamp, diff --git a/test/unit/common/test_wsgi.py b/test/unit/common/test_wsgi.py index f39f215499..e09f339266 100644 --- a/test/unit/common/test_wsgi.py +++ b/test/unit/common/test_wsgi.py @@ -136,6 +136,11 @@ class TestWSGI(unittest.TestCase): expected = swift.common.middleware.gatekeeper.GatekeeperMiddleware self.assertTrue(isinstance(app, expected)) + app = app.app + expected = \ + swift.common.middleware.copy.ServerSideCopyMiddleware + self.assertIsInstance(app, expected) + app = app.app expected = swift.common.middleware.dlo.DynamicLargeObject self.assertTrue(isinstance(app, expected)) @@ -1437,6 +1442,7 @@ class TestPipelineModification(unittest.TestCase): self.assertEqual(self.pipeline_modules(app), ['swift.common.middleware.catch_errors', 'swift.common.middleware.gatekeeper', + 'swift.common.middleware.copy', 'swift.common.middleware.dlo', 'swift.common.middleware.versioned_writes', 'swift.proxy.server']) @@ -1468,6 +1474,7 @@ class TestPipelineModification(unittest.TestCase): self.assertEqual(self.pipeline_modules(app), ['swift.common.middleware.catch_errors', 'swift.common.middleware.gatekeeper', + 'swift.common.middleware.copy', 'swift.common.middleware.dlo', 'swift.common.middleware.versioned_writes', 'swift.common.middleware.healthcheck', @@ -1506,6 +1513,7 @@ class TestPipelineModification(unittest.TestCase): self.assertEqual(self.pipeline_modules(app), ['swift.common.middleware.catch_errors', 'swift.common.middleware.gatekeeper', + 'swift.common.middleware.copy', 'swift.common.middleware.slo', 'swift.common.middleware.dlo', 'swift.common.middleware.versioned_writes', @@ -1605,6 +1613,7 @@ class TestPipelineModification(unittest.TestCase): self.assertEqual(self.pipeline_modules(app), [ 'swift.common.middleware.catch_errors', 'swift.common.middleware.gatekeeper', + 'swift.common.middleware.copy', 'swift.common.middleware.dlo', 'swift.common.middleware.versioned_writes', 'swift.common.middleware.healthcheck', @@ -1619,6 +1628,7 @@ class TestPipelineModification(unittest.TestCase): 'swift.common.middleware.gatekeeper', 'swift.common.middleware.healthcheck', 'swift.common.middleware.catch_errors', + 'swift.common.middleware.copy', 'swift.common.middleware.dlo', 'swift.common.middleware.versioned_writes', 'swift.proxy.server']) @@ -1632,6 +1642,7 @@ class TestPipelineModification(unittest.TestCase): 'swift.common.middleware.healthcheck', 'swift.common.middleware.catch_errors', 'swift.common.middleware.gatekeeper', + 'swift.common.middleware.copy', 'swift.common.middleware.dlo', 'swift.common.middleware.versioned_writes', 'swift.proxy.server']) @@ -1666,7 +1677,7 @@ class TestPipelineModification(unittest.TestCase): tempdir, policy.ring_name + '.ring.gz') app = wsgi.loadapp(conf_path) - proxy_app = app.app.app.app.app.app + proxy_app = app.app.app.app.app.app.app self.assertEqual(proxy_app.account_ring.serialized_path, account_ring_path) self.assertEqual(proxy_app.container_ring.serialized_path, diff --git a/test/unit/proxy/controllers/test_obj.py b/test/unit/proxy/controllers/test_obj.py index d18ac4299b..95b92b298a 100755 --- a/test/unit/proxy/controllers/test_obj.py +++ b/test/unit/proxy/controllers/test_obj.py @@ -649,7 +649,7 @@ class TestReplicatedObjController(BaseObjectControllerMixin, def test_PUT_error_during_transfer_data(self): class FakeReader(object): def read(self, size): - raise exceptions.ChunkReadError('exception message') + raise IOError('error message') req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT', body='test body') @@ -747,62 +747,6 @@ class TestReplicatedObjController(BaseObjectControllerMixin, resp = req.get_response(self.app) self.assertEqual(resp.status_int, 404) - def test_POST_as_COPY_simple(self): - req = swift.common.swob.Request.blank('/v1/a/c/o', method='POST') - get_resp = [200] * self.obj_ring.replicas + \ - [404] * self.obj_ring.max_more_nodes - put_resp = [201] * self.obj_ring.replicas - codes = get_resp + put_resp - with set_http_connect(*codes): - resp = req.get_response(self.app) - self.assertEqual(resp.status_int, 202) - self.assertEqual(req.environ['QUERY_STRING'], '') - self.assertTrue('swift.post_as_copy' in req.environ) - - def test_POST_as_COPY_static_large_object(self): - req = swift.common.swob.Request.blank('/v1/a/c/o', method='POST') - get_resp = [200] * self.obj_ring.replicas + \ - [404] * self.obj_ring.max_more_nodes - put_resp = [201] * self.obj_ring.replicas - codes = get_resp + put_resp - slo_headers = \ - [{'X-Static-Large-Object': True}] * self.obj_ring.replicas - get_headers = slo_headers + [{}] * (len(codes) - len(slo_headers)) - headers = {'headers': get_headers} - with set_http_connect(*codes, **headers): - resp = req.get_response(self.app) - self.assertEqual(resp.status_int, 202) - self.assertEqual(req.environ['QUERY_STRING'], '') - self.assertTrue('swift.post_as_copy' in req.environ) - - def test_POST_delete_at(self): - t = str(int(time.time() + 100)) - req = swob.Request.blank('/v1/a/c/o', method='POST', - headers={'Content-Type': 'foo/bar', - 'X-Delete-At': t}) - post_headers = [] - - def capture_headers(ip, port, device, part, method, path, headers, - **kwargs): - if method == 'POST': - post_headers.append(headers) - x_newest_responses = [200] * self.obj_ring.replicas + \ - [404] * self.obj_ring.max_more_nodes - post_resp = [200] * self.obj_ring.replicas - codes = x_newest_responses + post_resp - with set_http_connect(*codes, give_connect=capture_headers): - resp = req.get_response(self.app) - self.assertEqual(resp.status_int, 200) - self.assertEqual(req.environ['QUERY_STRING'], '') # sanity - self.assertTrue('swift.post_as_copy' in req.environ) - - for given_headers in post_headers: - self.assertEqual(given_headers.get('X-Delete-At'), t) - self.assertTrue('X-Delete-At-Host' in given_headers) - self.assertTrue('X-Delete-At-Device' in given_headers) - self.assertTrue('X-Delete-At-Partition' in given_headers) - self.assertTrue('X-Delete-At-Container' in given_headers) - def test_PUT_delete_at(self): t = str(int(time.time() + 100)) req = swob.Request.blank('/v1/a/c/o', method='PUT', body='', @@ -1000,43 +944,6 @@ class TestReplicatedObjController(BaseObjectControllerMixin, resp = req.get_response(self.app) self.assertEqual(resp.status_int, 202) - def test_COPY_simple(self): - req = swift.common.swob.Request.blank( - '/v1/a/c/o', method='COPY', - headers={'Content-Length': 0, - 'Destination': 'c/o-copy'}) - head_resp = [200] * self.obj_ring.replicas + \ - [404] * self.obj_ring.max_more_nodes - put_resp = [201] * self.obj_ring.replicas - codes = head_resp + put_resp - with set_http_connect(*codes): - resp = req.get_response(self.app) - self.assertEqual(resp.status_int, 201) - - def test_PUT_log_info(self): - req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT') - req.headers['x-copy-from'] = 'some/where' - req.headers['Content-Length'] = 0 - # override FakeConn default resp headers to keep log_info clean - resp_headers = {'x-delete-at': None} - head_resp = [200] * self.obj_ring.replicas + \ - [404] * self.obj_ring.max_more_nodes - put_resp = [201] * self.obj_ring.replicas - codes = head_resp + put_resp - with set_http_connect(*codes, headers=resp_headers): - resp = req.get_response(self.app) - self.assertEqual(resp.status_int, 201) - self.assertEqual( - req.environ.get('swift.log_info'), ['x-copy-from:some/where']) - # and then check that we don't do that for originating POSTs - req = swift.common.swob.Request.blank('/v1/a/c/o') - req.method = 'POST' - req.headers['x-copy-from'] = 'else/where' - with set_http_connect(*codes, headers=resp_headers): - resp = req.get_response(self.app) - self.assertEqual(resp.status_int, 202) - self.assertEqual(req.environ.get('swift.log_info'), None) - @patch_policies( [StoragePolicy(0, '1-replica', True), @@ -1397,7 +1304,7 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): def test_PUT_ec_error_during_transfer_data(self): class FakeReader(object): def read(self, size): - raise exceptions.ChunkReadError('exception message') + raise IOError('error message') req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT', body='test body') @@ -1603,72 +1510,6 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): resp = req.get_response(self.app) self.assertEqual(resp.status_int, 201) - def test_COPY_cross_policy_type_from_replicated(self): - self.app.per_container_info = { - 'c1': self.app.container_info.copy(), - 'c2': self.app.container_info.copy(), - } - # make c2 use replicated storage policy 1 - self.app.per_container_info['c2']['storage_policy'] = '1' - - # a put request with copy from source c2 - req = swift.common.swob.Request.blank('/v1/a/c1/o', method='PUT', - body='', headers={ - 'X-Copy-From': 'c2/o'}) - - # c2 get - codes = [200] * self.replicas(POLICIES[1]) - codes += [404] * POLICIES[1].object_ring.max_more_nodes - # c1 put - codes += [201] * self.replicas() - expect_headers = { - 'X-Obj-Metadata-Footer': 'yes', - 'X-Obj-Multiphase-Commit': 'yes' - } - with set_http_connect(*codes, expect_headers=expect_headers): - resp = req.get_response(self.app) - self.assertEqual(resp.status_int, 201) - - def test_COPY_cross_policy_type_to_replicated(self): - self.app.per_container_info = { - 'c1': self.app.container_info.copy(), - 'c2': self.app.container_info.copy(), - } - # make c1 use replicated storage policy 1 - self.app.per_container_info['c1']['storage_policy'] = '1' - - # a put request with copy from source c2 - req = swift.common.swob.Request.blank('/v1/a/c1/o', method='PUT', - body='', headers={ - 'X-Copy-From': 'c2/o'}) - - # c2 get - codes = [404, 200] * self.policy.ec_ndata - headers = { - 'X-Object-Sysmeta-Ec-Content-Length': 0, - } - # c1 put - codes += [201] * self.replicas(POLICIES[1]) - with set_http_connect(*codes, headers=headers): - resp = req.get_response(self.app) - self.assertEqual(resp.status_int, 201) - - def test_COPY_cross_policy_type_unknown(self): - self.app.per_container_info = { - 'c1': self.app.container_info.copy(), - 'c2': self.app.container_info.copy(), - } - # make c1 use some made up storage policy index - self.app.per_container_info['c1']['storage_policy'] = '13' - - # a COPY request of c2 with destination in c1 - req = swift.common.swob.Request.blank('/v1/a/c2/o', method='COPY', - body='', headers={ - 'Destination': 'c1/o'}) - with set_http_connect(): - resp = req.get_response(self.app) - self.assertEqual(resp.status_int, 503) - def _make_ec_archive_bodies(self, test_body, policy=None): policy = policy or self.policy segment_size = policy.ec_segment_size @@ -2378,40 +2219,6 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): resp = req.get_response(self.app) self.assertEqual(resp.status_int, 503) - def test_COPY_with_ranges(self): - req = swift.common.swob.Request.blank( - '/v1/a/c/o', method='COPY', - headers={'Destination': 'c1/o', - 'Range': 'bytes=5-10'}) - # turn a real body into fragments - segment_size = self.policy.ec_segment_size - real_body = ('asdf' * segment_size)[:-10] - - # split it up into chunks - chunks = [real_body[x:x + segment_size] - for x in range(0, len(real_body), segment_size)] - - # we need only first chunk to rebuild 5-10 range - fragments = self.policy.pyeclib_driver.encode(chunks[0]) - fragment_payloads = [] - fragment_payloads.append(fragments) - - node_fragments = zip(*fragment_payloads) - self.assertEqual(len(node_fragments), self.replicas()) # sanity - headers = {'X-Object-Sysmeta-Ec-Content-Length': str(len(real_body))} - responses = [(200, ''.join(node_fragments[i]), headers) - for i in range(POLICIES.default.ec_ndata)] - responses += [(201, '', {})] * self.obj_ring.replicas - status_codes, body_iter, headers = zip(*responses) - expect_headers = { - 'X-Obj-Metadata-Footer': 'yes', - 'X-Obj-Multiphase-Commit': 'yes' - } - with set_http_connect(*status_codes, body_iter=body_iter, - headers=headers, expect_headers=expect_headers): - resp = req.get_response(self.app) - self.assertEqual(resp.status_int, 201) - def test_GET_with_invalid_ranges(self): # real body size is segment_size - 10 (just 1 segment) segment_size = self.policy.ec_segment_size @@ -2424,18 +2231,6 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): self._test_invalid_ranges('GET', real_body, segment_size, '%s-' % (segment_size + 10)) - def test_COPY_with_invalid_ranges(self): - # real body size is segment_size - 10 (just 1 segment) - segment_size = self.policy.ec_segment_size - real_body = ('a' * segment_size)[:-10] - - # range is out of real body but in segment size - self._test_invalid_ranges('COPY', real_body, - segment_size, '%s-' % (segment_size - 10)) - # range is out of both real body and segment size - self._test_invalid_ranges('COPY', real_body, - segment_size, '%s-' % (segment_size + 10)) - def _test_invalid_ranges(self, method, real_body, segment_size, req_range): # make a request with range starts from more than real size. body_etag = md5(real_body).hexdigest() diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 6e55e74fa2..1fc021a542 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -63,7 +63,8 @@ from swift.proxy.controllers.obj import ReplicatedObjectController from swift.account import server as account_server from swift.container import server as container_server from swift.obj import server as object_server -from swift.common.middleware import proxy_logging, versioned_writes +from swift.common.middleware import proxy_logging, versioned_writes, \ + copy from swift.common.middleware.acl import parse_acl, format_acl from swift.common.exceptions import ChunkReadTimeout, DiskFileNotExist, \ APIVersionError, ChunkWriteTimeout @@ -3017,7 +3018,6 @@ class TestObjectController(unittest.TestCase): @unpatch_policies def test_PUT_POST_last_modified(self): prolis = _test_sockets[0] - prosrv = _test_servers[0] def _do_HEAD(): # do a HEAD to get reported last modified time @@ -3078,9 +3078,7 @@ class TestObjectController(unittest.TestCase): _do_conditional_GET_checks(last_modified_put) - # now POST to the object using default object_post_as_copy setting - orig_post_as_copy = prosrv.object_post_as_copy - + # now POST to the object # last-modified rounded in sec so sleep a sec to increment sleep(1) @@ -3101,31 +3099,6 @@ class TestObjectController(unittest.TestCase): self.assertNotEqual(last_modified_put, last_modified_head) _do_conditional_GET_checks(last_modified_head) - # now POST using non-default object_post_as_copy setting - try: - # last-modified rounded in sec so sleep a sec to increment - last_modified_post = last_modified_head - sleep(1) - prosrv.object_post_as_copy = not orig_post_as_copy - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('POST /v1/a/c/o.last_modified HTTP/1.1\r\n' - 'Host: localhost\r\nConnection: close\r\n' - 'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 202' - self.assertEqual(headers[:len(exp)], exp) - for line in headers.split('\r\n'): - self.assertFalse(line.startswith(lm_hdr)) - finally: - prosrv.object_post_as_copy = orig_post_as_copy - - # last modified time will have changed due to POST - last_modified_head = _do_HEAD() - self.assertNotEqual(last_modified_post, last_modified_head) - _do_conditional_GET_checks(last_modified_head) - def test_PUT_auto_content_type(self): with save_globals(): controller = ReplicatedObjectController( @@ -3412,59 +3385,6 @@ class TestObjectController(unittest.TestCase): } check_request(request, **expectations) - # and this time with post as copy - self.app.object_post_as_copy = True - self.app.memcache.store = {} - backend_requests = [] - req = Request.blank('/v1/a/c/o', {}, method='POST', - headers={'X-Object-Meta-Color': 'Blue', - 'X-Backend-Storage-Policy-Index': 0}) - with mocked_http_conn( - 200, 200, 200, 200, 200, 201, 201, 201, - headers=resp_headers, give_connect=capture_requests - ) as fake_conn: - resp = req.get_response(self.app) - self.assertRaises(StopIteration, fake_conn.code_iter.next) - self.assertEqual(resp.status_int, 202) - self.assertEqual(len(backend_requests), 8) - policy0 = {'X-Backend-Storage-Policy-Index': '0'} - policy1 = {'X-Backend-Storage-Policy-Index': '1'} - expected = [ - # account info - {'method': 'HEAD', 'path': '/0/a'}, - # container info - {'method': 'HEAD', 'path': '/0/a/c'}, - # x-newests - {'method': 'GET', 'path': '/0/a/c/o', 'headers': policy1}, - {'method': 'GET', 'path': '/0/a/c/o', 'headers': policy1}, - {'method': 'GET', 'path': '/0/a/c/o', 'headers': policy1}, - # new writes - {'method': 'PUT', 'path': '/0/a/c/o', 'headers': policy0}, - {'method': 'PUT', 'path': '/0/a/c/o', 'headers': policy0}, - {'method': 'PUT', 'path': '/0/a/c/o', 'headers': policy0}, - ] - for request, expectations in zip(backend_requests, expected): - check_request(request, **expectations) - - def test_POST_as_copy(self): - with save_globals(): - def test_status_map(statuses, expected): - set_http_connect(*statuses) - self.app.memcache.store = {} - req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'}, - headers={'Content-Type': 'foo/bar'}) - self.app.update_request(req) - res = req.get_response(self.app) - expected = str(expected) - self.assertEqual(res.status[:len(expected)], expected) - test_status_map((200, 200, 200, 200, 200, 202, 202, 202), 202) - test_status_map((200, 200, 200, 200, 200, 202, 202, 500), 202) - test_status_map((200, 200, 200, 200, 200, 202, 500, 500), 503) - test_status_map((200, 200, 200, 200, 200, 202, 404, 500), 503) - test_status_map((200, 200, 200, 200, 200, 202, 404, 404), 404) - test_status_map((200, 200, 200, 200, 200, 404, 500, 500), 503) - test_status_map((200, 200, 200, 200, 200, 404, 404, 404), 404) - def test_DELETE(self): with save_globals(): def test_status_map(statuses, expected): @@ -3611,26 +3531,6 @@ class TestObjectController(unittest.TestCase): res = req.get_response(self.app) self.assertEqual(res.status_int, 400) - def test_POST_as_copy_meta_val_len(self): - with save_globals(): - limit = constraints.MAX_META_VALUE_LENGTH - set_http_connect(200, 200, 200, 200, 200, 202, 202, 202) - # acct cont objc objc objc obj obj obj - req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'}, - headers={'Content-Type': 'foo/bar', - 'X-Object-Meta-Foo': 'x' * limit}) - self.app.update_request(req) - res = req.get_response(self.app) - self.assertEqual(res.status_int, 202) - set_http_connect(202, 202, 202) - req = Request.blank( - '/v1/a/c/o', {'REQUEST_METHOD': 'POST'}, - headers={'Content-Type': 'foo/bar', - 'X-Object-Meta-Foo': 'x' * (limit + 1)}) - self.app.update_request(req) - res = req.get_response(self.app) - self.assertEqual(res.status_int, 400) - def test_POST_meta_key_len(self): with save_globals(): limit = constraints.MAX_META_NAME_LENGTH @@ -3653,27 +3553,6 @@ class TestObjectController(unittest.TestCase): res = req.get_response(self.app) self.assertEqual(res.status_int, 400) - def test_POST_as_copy_meta_key_len(self): - with save_globals(): - limit = constraints.MAX_META_NAME_LENGTH - set_http_connect(200, 200, 200, 200, 200, 202, 202, 202) - # acct cont objc objc objc obj obj obj - req = Request.blank( - '/v1/a/c/o', {'REQUEST_METHOD': 'POST'}, - headers={'Content-Type': 'foo/bar', - ('X-Object-Meta-' + 'x' * limit): 'x'}) - self.app.update_request(req) - res = req.get_response(self.app) - self.assertEqual(res.status_int, 202) - set_http_connect(202, 202, 202) - req = Request.blank( - '/v1/a/c/o', {'REQUEST_METHOD': 'POST'}, - headers={'Content-Type': 'foo/bar', - ('X-Object-Meta-' + 'x' * (limit + 1)): 'x'}) - self.app.update_request(req) - res = req.get_response(self.app) - self.assertEqual(res.status_int, 400) - def test_POST_meta_count(self): with save_globals(): limit = constraints.MAX_META_COUNT @@ -4419,25 +4298,6 @@ class TestObjectController(unittest.TestCase): resp = controller.POST(req) self.assertEqual(resp.status_int, 404) - def test_PUT_POST_as_copy_requires_container_exist(self): - with save_globals(): - self.app.memcache = FakeMemcacheReturnsNone() - controller = ReplicatedObjectController( - self.app, 'account', 'container', 'object') - set_http_connect(200, 404, 404, 404, 200, 200, 200) - req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}) - self.app.update_request(req) - resp = controller.PUT(req) - self.assertEqual(resp.status_int, 404) - - set_http_connect(200, 404, 404, 404, 200, 200, 200, 200, 200, 200) - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'POST'}, - headers={'Content-Type': 'text/plain'}) - self.app.update_request(req) - resp = controller.POST(req) - self.assertEqual(resp.status_int, 404) - def test_bad_metadata(self): with save_globals(): controller = ReplicatedObjectController( @@ -4554,755 +4414,6 @@ class TestObjectController(unittest.TestCase): raise self.fail('UN-USED STATUS CODES: %r' % unused_status_list) - def test_basic_put_with_x_copy_from(self): - req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, - headers={'Content-Length': '0', - 'X-Copy-From': 'c/o'}) - status_list = (200, 200, 200, 200, 200, 201, 201, 201) - # acct cont objc objc objc obj obj obj - with self.controller_context(req, *status_list) as controller: - resp = controller.PUT(req) - self.assertEqual(resp.status_int, 201) - self.assertEqual(resp.headers['x-copied-from'], 'c/o') - - def test_basic_put_with_x_copy_from_account(self): - req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'}, - headers={'Content-Length': '0', - 'X-Copy-From': 'c/o', - 'X-Copy-From-Account': 'a'}) - status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201) - # acct cont acc1 con1 objc objc objc obj obj obj - with self.controller_context(req, *status_list) as controller: - resp = controller.PUT(req) - self.assertEqual(resp.status_int, 201) - self.assertEqual(resp.headers['x-copied-from'], 'c/o') - self.assertEqual(resp.headers['x-copied-from-account'], 'a') - - def test_basic_put_with_x_copy_from_across_container(self): - req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, - headers={'Content-Length': '0', - 'X-Copy-From': 'c2/o'}) - status_list = (200, 200, 200, 200, 200, 200, 201, 201, 201) - # acct cont conc objc objc objc obj obj obj - with self.controller_context(req, *status_list) as controller: - resp = controller.PUT(req) - self.assertEqual(resp.status_int, 201) - self.assertEqual(resp.headers['x-copied-from'], 'c2/o') - - def test_basic_put_with_x_copy_from_across_container_and_account(self): - req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'}, - headers={'Content-Length': '0', - 'X-Copy-From': 'c2/o', - 'X-Copy-From-Account': 'a'}) - status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201) - # acct cont acc1 con1 objc objc objc obj obj obj - with self.controller_context(req, *status_list) as controller: - resp = controller.PUT(req) - self.assertEqual(resp.status_int, 201) - self.assertEqual(resp.headers['x-copied-from'], 'c2/o') - self.assertEqual(resp.headers['x-copied-from-account'], 'a') - - def test_copy_non_zero_content_length(self): - req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, - headers={'Content-Length': '5', - 'X-Copy-From': 'c/o'}) - status_list = (200, 200) - # acct cont - with self.controller_context(req, *status_list) as controller: - resp = controller.PUT(req) - self.assertEqual(resp.status_int, 400) - - def test_copy_non_zero_content_length_with_account(self): - req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'}, - headers={'Content-Length': '5', - 'X-Copy-From': 'c/o', - 'X-Copy-From-Account': 'a'}) - status_list = (200, 200) - # acct cont - with self.controller_context(req, *status_list) as controller: - resp = controller.PUT(req) - self.assertEqual(resp.status_int, 400) - - def test_copy_with_slashes_in_x_copy_from(self): - # extra source path parsing - req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, - headers={'Content-Length': '0', - 'X-Copy-From': 'c/o/o2'}) - status_list = (200, 200, 200, 200, 200, 201, 201, 201) - # acct cont objc objc objc obj obj obj - with self.controller_context(req, *status_list) as controller: - resp = controller.PUT(req) - self.assertEqual(resp.status_int, 201) - self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2') - - def test_copy_with_slashes_in_x_copy_from_and_account(self): - # extra source path parsing - req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'}, - headers={'Content-Length': '0', - 'X-Copy-From': 'c/o/o2', - 'X-Copy-From-Account': 'a'}) - status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201) - # acct cont acc1 con1 objc objc objc obj obj obj - with self.controller_context(req, *status_list) as controller: - resp = controller.PUT(req) - self.assertEqual(resp.status_int, 201) - self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2') - self.assertEqual(resp.headers['x-copied-from-account'], 'a') - - def test_copy_with_spaces_in_x_copy_from(self): - # space in soure path - req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, - headers={'Content-Length': '0', - 'X-Copy-From': 'c/o%20o2'}) - status_list = (200, 200, 200, 200, 200, 201, 201, 201) - # acct cont objc objc objc obj obj obj - with self.controller_context(req, *status_list) as controller: - resp = controller.PUT(req) - self.assertEqual(resp.status_int, 201) - self.assertEqual(resp.headers['x-copied-from'], 'c/o%20o2') - - def test_copy_with_spaces_in_x_copy_from_and_account(self): - # space in soure path - req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'}, - headers={'Content-Length': '0', - 'X-Copy-From': 'c/o%20o2', - 'X-Copy-From-Account': 'a'}) - status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201) - # acct cont acc1 con1 objc objc objc obj obj obj - with self.controller_context(req, *status_list) as controller: - resp = controller.PUT(req) - self.assertEqual(resp.status_int, 201) - self.assertEqual(resp.headers['x-copied-from'], 'c/o%20o2') - self.assertEqual(resp.headers['x-copied-from-account'], 'a') - - def test_copy_with_leading_slash_in_x_copy_from(self): - # repeat tests with leading / - req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, - headers={'Content-Length': '0', - 'X-Copy-From': '/c/o'}) - status_list = (200, 200, 200, 200, 200, 201, 201, 201) - # acct cont objc objc objc obj obj obj - with self.controller_context(req, *status_list) as controller: - resp = controller.PUT(req) - self.assertEqual(resp.status_int, 201) - self.assertEqual(resp.headers['x-copied-from'], 'c/o') - - def test_copy_with_leading_slash_in_x_copy_from_and_account(self): - # repeat tests with leading / - req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'}, - headers={'Content-Length': '0', - 'X-Copy-From': '/c/o', - 'X-Copy-From-Account': 'a'}) - status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201) - # acct cont acc1 con1 objc objc objc obj obj obj - with self.controller_context(req, *status_list) as controller: - resp = controller.PUT(req) - self.assertEqual(resp.status_int, 201) - self.assertEqual(resp.headers['x-copied-from'], 'c/o') - self.assertEqual(resp.headers['x-copied-from-account'], 'a') - - def test_copy_with_leading_slash_and_slashes_in_x_copy_from(self): - req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, - headers={'Content-Length': '0', - 'X-Copy-From': '/c/o/o2'}) - status_list = (200, 200, 200, 200, 200, 201, 201, 201) - # acct cont objc objc objc obj obj obj - with self.controller_context(req, *status_list) as controller: - resp = controller.PUT(req) - self.assertEqual(resp.status_int, 201) - self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2') - - def test_copy_with_leading_slash_and_slashes_in_x_copy_from_acct(self): - req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'}, - headers={'Content-Length': '0', - 'X-Copy-From': '/c/o/o2', - 'X-Copy-From-Account': 'a'}) - status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201) - # acct cont acc1 con1 objc objc objc obj obj obj - with self.controller_context(req, *status_list) as controller: - resp = controller.PUT(req) - self.assertEqual(resp.status_int, 201) - self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2') - self.assertEqual(resp.headers['x-copied-from-account'], 'a') - - def test_copy_with_no_object_in_x_copy_from(self): - req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, - headers={'Content-Length': '0', - 'X-Copy-From': '/c'}) - status_list = (200, 200) - # acct cont - with self.controller_context(req, *status_list) as controller: - try: - controller.PUT(req) - except HTTPException as resp: - self.assertEqual(resp.status_int // 100, 4) # client error - else: - raise self.fail('Invalid X-Copy-From did not raise ' - 'client error') - - def test_copy_with_no_object_in_x_copy_from_and_account(self): - req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'}, - headers={'Content-Length': '0', - 'X-Copy-From': '/c', - 'X-Copy-From-Account': 'a'}) - status_list = (200, 200) - # acct cont - with self.controller_context(req, *status_list) as controller: - try: - controller.PUT(req) - except HTTPException as resp: - self.assertEqual(resp.status_int // 100, 4) # client error - else: - raise self.fail('Invalid X-Copy-From did not raise ' - 'client error') - - def test_copy_server_error_reading_source(self): - req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, - headers={'Content-Length': '0', - 'X-Copy-From': '/c/o'}) - status_list = (200, 200, 503, 503, 503) - # acct cont objc objc objc - with self.controller_context(req, *status_list) as controller: - resp = controller.PUT(req) - self.assertEqual(resp.status_int, 503) - - def test_copy_server_error_reading_source_and_account(self): - req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'}, - headers={'Content-Length': '0', - 'X-Copy-From': '/c/o', - 'X-Copy-From-Account': 'a'}) - status_list = (200, 200, 200, 200, 503, 503, 503) - # acct cont acct cont objc objc objc - with self.controller_context(req, *status_list) as controller: - resp = controller.PUT(req) - self.assertEqual(resp.status_int, 503) - - def test_copy_not_found_reading_source(self): - req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, - headers={'Content-Length': '0', - 'X-Copy-From': '/c/o'}) - # not found - status_list = (200, 200, 404, 404, 404) - # acct cont objc objc objc - with self.controller_context(req, *status_list) as controller: - resp = controller.PUT(req) - self.assertEqual(resp.status_int, 404) - - def test_copy_not_found_reading_source_and_account(self): - req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'}, - headers={'Content-Length': '0', - 'X-Copy-From': '/c/o', - 'X-Copy-From-Account': 'a'}) - # not found - status_list = (200, 200, 200, 200, 404, 404, 404) - # acct cont acct cont objc objc objc - with self.controller_context(req, *status_list) as controller: - resp = controller.PUT(req) - self.assertEqual(resp.status_int, 404) - - def test_copy_with_some_missing_sources(self): - req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, - headers={'Content-Length': '0', - 'X-Copy-From': '/c/o'}) - status_list = (200, 200, 404, 404, 200, 201, 201, 201) - # acct cont objc objc objc obj obj obj - with self.controller_context(req, *status_list) as controller: - resp = controller.PUT(req) - self.assertEqual(resp.status_int, 201) - - def test_copy_with_some_missing_sources_and_account(self): - req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'}, - headers={'Content-Length': '0', - 'X-Copy-From': '/c/o', - 'X-Copy-From-Account': 'a'}) - status_list = (200, 200, 200, 200, 404, 404, 200, 201, 201, 201) - # acct cont acct cont objc objc objc obj obj obj - with self.controller_context(req, *status_list) as controller: - resp = controller.PUT(req) - self.assertEqual(resp.status_int, 201) - - def test_copy_with_object_metadata(self): - req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, - headers={'Content-Length': '0', - 'X-Copy-From': '/c/o', - 'X-Object-Meta-Ours': 'okay'}) - # test object metadata - status_list = (200, 200, 200, 200, 200, 201, 201, 201) - # acct cont objc objc objc obj obj obj - with self.controller_context(req, *status_list) as controller: - resp = controller.PUT(req) - self.assertEqual(resp.status_int, 201) - self.assertEqual(resp.headers.get('x-object-meta-test'), 'testing') - self.assertEqual(resp.headers.get('x-object-meta-ours'), 'okay') - self.assertEqual(resp.headers.get('x-delete-at'), '9876543210') - - def test_copy_with_object_metadata_and_account(self): - req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'}, - headers={'Content-Length': '0', - 'X-Copy-From': '/c/o', - 'X-Object-Meta-Ours': 'okay', - 'X-Copy-From-Account': 'a'}) - # test object metadata - status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201) - # acct cont acct cont objc objc objc obj obj obj - with self.controller_context(req, *status_list) as controller: - resp = controller.PUT(req) - self.assertEqual(resp.status_int, 201) - self.assertEqual(resp.headers.get('x-object-meta-test'), 'testing') - self.assertEqual(resp.headers.get('x-object-meta-ours'), 'okay') - self.assertEqual(resp.headers.get('x-delete-at'), '9876543210') - - @_limit_max_file_size - def test_copy_source_larger_than_max_file_size(self): - req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, - headers={'Content-Length': '0', - 'X-Copy-From': '/c/o'}) - # copy-from object is too large to fit in target object - - class LargeResponseBody(object): - - def __len__(self): - return constraints.MAX_FILE_SIZE + 1 - - def __getitem__(self, key): - return '' - - copy_from_obj_body = LargeResponseBody() - status_list = (200, 200, 200, 200, 200) - # acct cont objc objc objc - kwargs = dict(body=copy_from_obj_body) - with self.controller_context(req, *status_list, - **kwargs) as controller: - self.app.update_request(req) - - self.app.memcache.store = {} - try: - resp = controller.PUT(req) - except HTTPException as resp: - pass - self.assertEqual(resp.status_int, 413) - - def test_basic_COPY(self): - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'COPY'}, - headers={'Destination': 'c/o2'}) - status_list = (200, 200, 200, 200, 200, 201, 201, 201) - # acct cont objc objc objc obj obj obj - with self.controller_context(req, *status_list) as controller: - resp = controller.COPY(req) - self.assertEqual(resp.status_int, 201) - self.assertEqual(resp.headers['x-copied-from'], 'c/o') - - def test_basic_COPY_account(self): - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'COPY'}, - headers={'Destination': 'c1/o2', - 'Destination-Account': 'a1'}) - status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201) - # acct cont acct cont objc objc objc obj obj obj - with self.controller_context(req, *status_list) as controller: - resp = controller.COPY(req) - self.assertEqual(resp.status_int, 201) - self.assertEqual(resp.headers['x-copied-from'], 'c/o') - self.assertEqual(resp.headers['x-copied-from-account'], 'a') - - def test_COPY_across_containers(self): - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'COPY'}, - headers={'Destination': 'c2/o'}) - status_list = (200, 200, 200, 200, 200, 200, 201, 201, 201) - # acct cont c2 objc objc objc obj obj obj - with self.controller_context(req, *status_list) as controller: - resp = controller.COPY(req) - self.assertEqual(resp.status_int, 201) - self.assertEqual(resp.headers['x-copied-from'], 'c/o') - - def test_COPY_source_with_slashes_in_name(self): - req = Request.blank('/v1/a/c/o/o2', - environ={'REQUEST_METHOD': 'COPY'}, - headers={'Destination': 'c/o'}) - status_list = (200, 200, 200, 200, 200, 201, 201, 201) - # acct cont objc objc objc obj obj obj - with self.controller_context(req, *status_list) as controller: - resp = controller.COPY(req) - self.assertEqual(resp.status_int, 201) - self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2') - - def test_COPY_account_source_with_slashes_in_name(self): - req = Request.blank('/v1/a/c/o/o2', - environ={'REQUEST_METHOD': 'COPY'}, - headers={'Destination': 'c1/o', - 'Destination-Account': 'a1'}) - status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201) - # acct cont acct cont objc objc objc obj obj obj - with self.controller_context(req, *status_list) as controller: - resp = controller.COPY(req) - self.assertEqual(resp.status_int, 201) - self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2') - self.assertEqual(resp.headers['x-copied-from-account'], 'a') - - def test_COPY_destination_leading_slash(self): - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'COPY'}, - headers={'Destination': '/c/o'}) - status_list = (200, 200, 200, 200, 200, 201, 201, 201) - # acct cont objc objc objc obj obj obj - with self.controller_context(req, *status_list) as controller: - resp = controller.COPY(req) - self.assertEqual(resp.status_int, 201) - self.assertEqual(resp.headers['x-copied-from'], 'c/o') - - def test_COPY_account_destination_leading_slash(self): - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'COPY'}, - headers={'Destination': '/c1/o', - 'Destination-Account': 'a1'}) - status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201) - # acct cont acct cont objc objc objc obj obj obj - with self.controller_context(req, *status_list) as controller: - resp = controller.COPY(req) - self.assertEqual(resp.status_int, 201) - self.assertEqual(resp.headers['x-copied-from'], 'c/o') - self.assertEqual(resp.headers['x-copied-from-account'], 'a') - - def test_COPY_source_with_slashes_destination_leading_slash(self): - req = Request.blank('/v1/a/c/o/o2', - environ={'REQUEST_METHOD': 'COPY'}, - headers={'Destination': '/c/o'}) - status_list = (200, 200, 200, 200, 200, 201, 201, 201) - # acct cont objc objc objc obj obj obj - with self.controller_context(req, *status_list) as controller: - resp = controller.COPY(req) - self.assertEqual(resp.status_int, 201) - self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2') - - def test_COPY_account_source_with_slashes_destination_leading_slash(self): - req = Request.blank('/v1/a/c/o/o2', - environ={'REQUEST_METHOD': 'COPY'}, - headers={'Destination': '/c1/o', - 'Destination-Account': 'a1'}) - status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201) - # acct cont acct cont objc objc objc obj obj obj - with self.controller_context(req, *status_list) as controller: - resp = controller.COPY(req) - self.assertEqual(resp.status_int, 201) - self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2') - self.assertEqual(resp.headers['x-copied-from-account'], 'a') - - def test_COPY_no_object_in_destination(self): - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'COPY'}, - headers={'Destination': 'c_o'}) - status_list = [] # no requests needed - with self.controller_context(req, *status_list) as controller: - self.assertRaises(HTTPException, controller.COPY, req) - - def test_COPY_account_no_object_in_destination(self): - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'COPY'}, - headers={'Destination': 'c_o', - 'Destination-Account': 'a1'}) - status_list = [] # no requests needed - with self.controller_context(req, *status_list) as controller: - self.assertRaises(HTTPException, controller.COPY, req) - - def test_COPY_server_error_reading_source(self): - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'COPY'}, - headers={'Destination': '/c/o'}) - status_list = (200, 200, 503, 503, 503) - # acct cont objc objc objc - with self.controller_context(req, *status_list) as controller: - resp = controller.COPY(req) - self.assertEqual(resp.status_int, 503) - - def test_COPY_account_server_error_reading_source(self): - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'COPY'}, - headers={'Destination': '/c1/o', - 'Destination-Account': 'a1'}) - status_list = (200, 200, 200, 200, 503, 503, 503) - # acct cont acct cont objc objc objc - with self.controller_context(req, *status_list) as controller: - resp = controller.COPY(req) - self.assertEqual(resp.status_int, 503) - - def test_COPY_not_found_reading_source(self): - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'COPY'}, - headers={'Destination': '/c/o'}) - status_list = (200, 200, 404, 404, 404) - # acct cont objc objc objc - with self.controller_context(req, *status_list) as controller: - resp = controller.COPY(req) - self.assertEqual(resp.status_int, 404) - - def test_COPY_account_not_found_reading_source(self): - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'COPY'}, - headers={'Destination': '/c1/o', - 'Destination-Account': 'a1'}) - status_list = (200, 200, 200, 200, 404, 404, 404) - # acct cont acct cont objc objc objc - with self.controller_context(req, *status_list) as controller: - resp = controller.COPY(req) - self.assertEqual(resp.status_int, 404) - - def test_COPY_with_some_missing_sources(self): - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'COPY'}, - headers={'Destination': '/c/o'}) - status_list = (200, 200, 404, 404, 200, 201, 201, 201) - # acct cont objc objc objc obj obj obj - with self.controller_context(req, *status_list) as controller: - resp = controller.COPY(req) - self.assertEqual(resp.status_int, 201) - - def test_COPY_account_with_some_missing_sources(self): - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'COPY'}, - headers={'Destination': '/c1/o', - 'Destination-Account': 'a1'}) - status_list = (200, 200, 200, 200, 404, 404, 200, 201, 201, 201) - # acct cont acct cont objc objc objc obj obj obj - with self.controller_context(req, *status_list) as controller: - resp = controller.COPY(req) - self.assertEqual(resp.status_int, 201) - - def test_COPY_with_metadata(self): - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'COPY'}, - headers={'Destination': '/c/o', - 'X-Object-Meta-Ours': 'okay'}) - status_list = (200, 200, 200, 200, 200, 201, 201, 201) - # acct cont objc objc objc obj obj obj - with self.controller_context(req, *status_list) as controller: - resp = controller.COPY(req) - self.assertEqual(resp.status_int, 201) - self.assertEqual(resp.headers.get('x-object-meta-test'), - 'testing') - self.assertEqual(resp.headers.get('x-object-meta-ours'), 'okay') - self.assertEqual(resp.headers.get('x-delete-at'), '9876543210') - - def test_COPY_account_with_metadata(self): - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'COPY'}, - headers={'Destination': '/c1/o', - 'X-Object-Meta-Ours': 'okay', - 'Destination-Account': 'a1'}) - status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201) - # acct cont acct cont objc objc objc obj obj obj - with self.controller_context(req, *status_list) as controller: - resp = controller.COPY(req) - self.assertEqual(resp.status_int, 201) - self.assertEqual(resp.headers.get('x-object-meta-test'), - 'testing') - self.assertEqual(resp.headers.get('x-object-meta-ours'), 'okay') - self.assertEqual(resp.headers.get('x-delete-at'), '9876543210') - - @_limit_max_file_size - def test_COPY_source_larger_than_max_file_size(self): - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'COPY'}, - headers={'Destination': '/c/o'}) - - class LargeResponseBody(object): - - def __len__(self): - return constraints.MAX_FILE_SIZE + 1 - - def __getitem__(self, key): - return '' - - copy_from_obj_body = LargeResponseBody() - status_list = (200, 200, 200, 200, 200) - # acct cont objc objc objc - kwargs = dict(body=copy_from_obj_body) - with self.controller_context(req, *status_list, - **kwargs) as controller: - try: - resp = controller.COPY(req) - except HTTPException as resp: - pass - self.assertEqual(resp.status_int, 413) - - @_limit_max_file_size - def test_COPY_account_source_larger_than_max_file_size(self): - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'COPY'}, - headers={'Destination': '/c1/o', - 'Destination-Account': 'a1'}) - - class LargeResponseBody(object): - - def __len__(self): - return constraints.MAX_FILE_SIZE + 1 - - def __getitem__(self, key): - return '' - - copy_from_obj_body = LargeResponseBody() - status_list = (200, 200, 200, 200, 200) - # acct cont objc objc objc - kwargs = dict(body=copy_from_obj_body) - with self.controller_context(req, *status_list, - **kwargs) as controller: - try: - resp = controller.COPY(req) - except HTTPException as resp: - pass - self.assertEqual(resp.status_int, 413) - - def test_COPY_newest(self): - with save_globals(): - controller = ReplicatedObjectController( - self.app, 'a', 'c', 'o') - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'COPY'}, - headers={'Destination': '/c/o'}) - req.account = 'a' - controller.object_name = 'o' - set_http_connect(200, 200, 200, 200, 200, 201, 201, 201, - # act cont objc objc objc obj obj obj - timestamps=('1', '1', '1', '3', '2', '4', '4', - '4')) - self.app.memcache.store = {} - resp = controller.COPY(req) - self.assertEqual(resp.status_int, 201) - self.assertEqual(resp.headers['x-copied-from-last-modified'], - '3') - - def test_COPY_account_newest(self): - with save_globals(): - controller = ReplicatedObjectController( - self.app, 'a', 'c', 'o') - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'COPY'}, - headers={'Destination': '/c1/o', - 'Destination-Account': 'a1'}) - req.account = 'a' - controller.object_name = 'o' - set_http_connect(200, 200, 200, 200, 200, 200, 200, 201, 201, 201, - # act cont acct cont objc objc objc obj obj obj - timestamps=('1', '1', '1', '1', '3', '2', '1', - '4', '4', '4')) - self.app.memcache.store = {} - resp = controller.COPY(req) - self.assertEqual(resp.status_int, 201) - self.assertEqual(resp.headers['x-copied-from-last-modified'], - '3') - - def test_COPY_delete_at(self): - with save_globals(): - backend_requests = [] - - def capture_requests(ipaddr, port, device, partition, method, path, - headers=None, query_string=None): - backend_requests.append((method, path, headers)) - - controller = ReplicatedObjectController( - self.app, 'a', 'c', 'o') - set_http_connect(200, 200, 200, 200, 200, 201, 201, 201, - give_connect=capture_requests) - self.app.memcache.store = {} - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'COPY'}, - headers={'Destination': '/c/o'}) - - self.app.update_request(req) - resp = controller.COPY(req) - self.assertEqual(201, resp.status_int) # sanity - for method, path, given_headers in backend_requests: - if method != 'PUT': - continue - self.assertEqual(given_headers.get('X-Delete-At'), - '9876543210') - self.assertTrue('X-Delete-At-Host' in given_headers) - self.assertTrue('X-Delete-At-Device' in given_headers) - self.assertTrue('X-Delete-At-Partition' in given_headers) - self.assertTrue('X-Delete-At-Container' in given_headers) - - def test_COPY_account_delete_at(self): - with save_globals(): - backend_requests = [] - - def capture_requests(ipaddr, port, device, partition, method, path, - headers=None, query_string=None): - backend_requests.append((method, path, headers)) - - controller = ReplicatedObjectController( - self.app, 'a', 'c', 'o') - set_http_connect(200, 200, 200, 200, 200, 200, 200, 201, 201, 201, - give_connect=capture_requests) - self.app.memcache.store = {} - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'COPY'}, - headers={'Destination': '/c1/o', - 'Destination-Account': 'a1'}) - - self.app.update_request(req) - resp = controller.COPY(req) - self.assertEqual(201, resp.status_int) # sanity - for method, path, given_headers in backend_requests: - if method != 'PUT': - continue - self.assertEqual(given_headers.get('X-Delete-At'), - '9876543210') - self.assertTrue('X-Delete-At-Host' in given_headers) - self.assertTrue('X-Delete-At-Device' in given_headers) - self.assertTrue('X-Delete-At-Partition' in given_headers) - self.assertTrue('X-Delete-At-Container' in given_headers) - - def test_chunked_put(self): - - class ChunkedFile(object): - - def __init__(self, bytes): - self.bytes = bytes - self.read_bytes = 0 - - @property - def bytes_left(self): - return self.bytes - self.read_bytes - - def read(self, amt=None): - if self.read_bytes >= self.bytes: - raise StopIteration() - if not amt: - amt = self.bytes_left - data = 'a' * min(amt, self.bytes_left) - self.read_bytes += len(data) - return data - - with save_globals(): - set_http_connect(201, 201, 201, 201) - controller = ReplicatedObjectController( - self.app, 'account', 'container', 'object') - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'COPY'}, - headers={'Transfer-Encoding': 'chunked', - 'Content-Type': 'foo/bar'}) - - req.body_file = ChunkedFile(10) - self.app.memcache.store = {} - self.app.update_request(req) - res = controller.PUT(req) - self.assertEqual(res.status_int // 100, 2) # success - - # test 413 entity to large - set_http_connect(201, 201, 201, 201) - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'COPY'}, - headers={'Transfer-Encoding': 'chunked', - 'Content-Type': 'foo/bar'}) - req.body_file = ChunkedFile(11) - self.app.memcache.store = {} - self.app.update_request(req) - - with mock.patch('swift.common.constraints.MAX_FILE_SIZE', 10): - res = controller.PUT(req) - self.assertEqual(res.status_int, 413) - @unpatch_policies def test_chunked_put_bad_version(self): # Check bad version @@ -5720,24 +4831,6 @@ class TestObjectController(unittest.TestCase): controller.POST(req) self.assertTrue(called[0]) - def test_POST_as_copy_calls_authorize(self): - called = [False] - - def authorize(req): - called[0] = True - return HTTPUnauthorized(request=req) - with save_globals(): - set_http_connect(200, 200, 200, 200, 200, 201, 201, 201) - controller = ReplicatedObjectController( - self.app, 'account', 'container', 'object') - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'POST'}, - headers={'Content-Length': '5'}, body='12345') - req.environ['swift.authorize'] = authorize - self.app.update_request(req) - controller.POST(req) - self.assertTrue(called[0]) - def test_PUT_calls_authorize(self): called = [False] @@ -5755,24 +4848,6 @@ class TestObjectController(unittest.TestCase): controller.PUT(req) self.assertTrue(called[0]) - def test_COPY_calls_authorize(self): - called = [False] - - def authorize(req): - called[0] = True - return HTTPUnauthorized(request=req) - with save_globals(): - set_http_connect(200, 200, 200, 200, 200, 201, 201, 201) - controller = ReplicatedObjectController( - self.app, 'account', 'container', 'object') - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'COPY'}, - headers={'Destination': 'c/o'}) - req.environ['swift.authorize'] = authorize - self.app.update_request(req) - controller.COPY(req) - self.assertTrue(called[0]) - def test_POST_converts_delete_after_to_delete_at(self): with save_globals(): self.app.object_post_as_copy = False @@ -6021,12 +5096,12 @@ class TestObjectController(unittest.TestCase): self.assertEqual( 'https://foo.bar', resp.headers['access-control-allow-origin']) - for verb in 'OPTIONS COPY GET POST PUT DELETE HEAD'.split(): + for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split(): self.assertTrue( verb in resp.headers['access-control-allow-methods']) self.assertEqual( len(resp.headers['access-control-allow-methods'].split(', ')), - 7) + 6) self.assertEqual('999', resp.headers['access-control-max-age']) req = Request.blank( '/v1/a/c/o.jpg', @@ -6039,10 +5114,10 @@ class TestObjectController(unittest.TestCase): req.content_length = 0 resp = controller.OPTIONS(req) self.assertEqual(200, resp.status_int) - for verb in 'OPTIONS COPY GET POST PUT DELETE HEAD'.split(): + for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split(): self.assertTrue( verb in resp.headers['Allow']) - self.assertEqual(len(resp.headers['Allow'].split(', ')), 7) + self.assertEqual(len(resp.headers['Allow'].split(', ')), 6) req = Request.blank( '/v1/a/c/o.jpg', {'REQUEST_METHOD': 'OPTIONS'}, @@ -6075,12 +5150,12 @@ class TestObjectController(unittest.TestCase): resp = controller.OPTIONS(req) self.assertEqual(200, resp.status_int) self.assertEqual('*', resp.headers['access-control-allow-origin']) - for verb in 'OPTIONS COPY GET POST PUT DELETE HEAD'.split(): + for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split(): self.assertTrue( verb in resp.headers['access-control-allow-methods']) self.assertEqual( len(resp.headers['access-control-allow-methods'].split(', ')), - 7) + 6) self.assertEqual('999', resp.headers['access-control-max-age']) def _get_CORS_response(self, container_cors, strict_mode, object_get=None): @@ -9232,9 +8307,10 @@ class TestSocketObjectVersions(unittest.TestCase): conf = {'devices': _testdir, 'swift_dir': _testdir, 'mount_check': 'false', 'allowed_headers': allowed_headers} prosrv = versioned_writes.VersionedWritesMiddleware( - proxy_logging.ProxyLoggingMiddleware( - _test_servers[0], conf, - logger=_test_servers[0].logger), + copy.ServerSideCopyMiddleware( + proxy_logging.ProxyLoggingMiddleware( + _test_servers[0], conf, + logger=_test_servers[0].logger), conf), {}) self.coro = spawn(wsgi.server, prolis, prosrv, NullLogger()) # replace global prosrv with one that's filtered with version diff --git a/test/unit/proxy/test_sysmeta.py b/test/unit/proxy/test_sysmeta.py index 3b3f8ddfd9..9548680791 100644 --- a/test/unit/proxy/test_sysmeta.py +++ b/test/unit/proxy/test_sysmeta.py @@ -283,94 +283,3 @@ class TestObjectSysmeta(unittest.TestCase): self._assertInHeaders(resp, self.changed_sysmeta_headers) self._assertInHeaders(resp, self.new_sysmeta_headers) self._assertNotInHeaders(resp, self.original_sysmeta_headers_2) - - def test_sysmeta_not_updated_by_POST(self): - self.app.object_post_as_copy = False - self._test_sysmeta_not_updated_by_POST() - - def test_sysmeta_not_updated_by_POST_as_copy(self): - self.app.object_post_as_copy = True - self._test_sysmeta_not_updated_by_POST() - - def test_sysmeta_updated_by_COPY(self): - # check sysmeta is updated by a COPY in same way as user meta - path = '/v1/a/c/o' - dest = '/c/o2' - env = {'REQUEST_METHOD': 'PUT'} - hdrs = dict(self.original_sysmeta_headers_1) - hdrs.update(self.original_sysmeta_headers_2) - hdrs.update(self.original_meta_headers_1) - hdrs.update(self.original_meta_headers_2) - req = Request.blank(path, environ=env, headers=hdrs, body='x') - resp = req.get_response(self.app) - self._assertStatus(resp, 201) - - env = {'REQUEST_METHOD': 'COPY'} - hdrs = dict(self.changed_sysmeta_headers) - hdrs.update(self.new_sysmeta_headers) - hdrs.update(self.changed_meta_headers) - hdrs.update(self.new_meta_headers) - hdrs.update(self.bad_headers) - hdrs.update({'Destination': dest}) - req = Request.blank(path, environ=env, headers=hdrs) - resp = req.get_response(self.app) - self._assertStatus(resp, 201) - self._assertInHeaders(resp, self.changed_sysmeta_headers) - self._assertInHeaders(resp, self.new_sysmeta_headers) - self._assertInHeaders(resp, self.original_sysmeta_headers_2) - self._assertInHeaders(resp, self.changed_meta_headers) - self._assertInHeaders(resp, self.new_meta_headers) - self._assertInHeaders(resp, self.original_meta_headers_2) - self._assertNotInHeaders(resp, self.bad_headers) - - req = Request.blank('/v1/a/c/o2', environ={}) - resp = req.get_response(self.app) - self._assertStatus(resp, 200) - self._assertInHeaders(resp, self.changed_sysmeta_headers) - self._assertInHeaders(resp, self.new_sysmeta_headers) - self._assertInHeaders(resp, self.original_sysmeta_headers_2) - self._assertInHeaders(resp, self.changed_meta_headers) - self._assertInHeaders(resp, self.new_meta_headers) - self._assertInHeaders(resp, self.original_meta_headers_2) - self._assertNotInHeaders(resp, self.bad_headers) - - def test_sysmeta_updated_by_COPY_from(self): - # check sysmeta is updated by a COPY in same way as user meta - path = '/v1/a/c/o' - env = {'REQUEST_METHOD': 'PUT'} - hdrs = dict(self.original_sysmeta_headers_1) - hdrs.update(self.original_sysmeta_headers_2) - hdrs.update(self.original_meta_headers_1) - hdrs.update(self.original_meta_headers_2) - req = Request.blank(path, environ=env, headers=hdrs, body='x') - resp = req.get_response(self.app) - self._assertStatus(resp, 201) - - env = {'REQUEST_METHOD': 'PUT'} - hdrs = dict(self.changed_sysmeta_headers) - hdrs.update(self.new_sysmeta_headers) - hdrs.update(self.changed_meta_headers) - hdrs.update(self.new_meta_headers) - hdrs.update(self.bad_headers) - hdrs.update({'X-Copy-From': '/c/o'}) - req = Request.blank('/v1/a/c/o2', environ=env, headers=hdrs, body='') - resp = req.get_response(self.app) - self._assertStatus(resp, 201) - self._assertInHeaders(resp, self.changed_sysmeta_headers) - self._assertInHeaders(resp, self.new_sysmeta_headers) - self._assertInHeaders(resp, self.original_sysmeta_headers_2) - self._assertInHeaders(resp, self.changed_meta_headers) - self._assertInHeaders(resp, self.new_meta_headers) - self._assertInHeaders(resp, self.original_meta_headers_2) - self._assertNotInHeaders(resp, self.bad_headers) - - req = Request.blank('/v1/a/c/o2', environ={}) - resp = req.get_response(self.app) - self._assertStatus(resp, 200) - self._assertInHeaders(resp, self.changed_sysmeta_headers) - self._assertInHeaders(resp, self.new_sysmeta_headers) - self._assertInHeaders(resp, self.original_sysmeta_headers_2) - self._assertInHeaders(resp, self.changed_meta_headers) - self._assertInHeaders(resp, self.new_meta_headers) - self._assertInHeaders(resp, self.original_meta_headers_2) - self._assertNotInHeaders(resp, self.bad_headers) From ad16e2c77bb61bdf51a7d3b2c258daf69bfc74da Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Wed, 11 May 2016 19:54:47 -0700 Subject: [PATCH 115/141] Stop complaining about auditor_status files Following fd86d5a, the object-auditor would leave status files so it could resume where it left off if restarted. However, this would also cause the object-reconstructor to print warnings like: Unexpected entity in data dir: u'/srv/node4/sdb8/objects/auditor_status_ZBF.json' ...which isn't actually terribly useful or actionable. The auditor will clean it up (eventually); the operator doesn't have to do anything. Now, the reconstructor will specifically ignore those status files. Change-Id: I2f3d0bd2f1e242db6eb263c7755f1363d1430048 --- swift/obj/reconstructor.py | 3 +++ test/unit/obj/test_reconstructor.py | 39 +++++++++++++++++++++++++++-- 2 files changed, 40 insertions(+), 2 deletions(-) diff --git a/swift/obj/reconstructor.py b/swift/obj/reconstructor.py index 2c9e2b4c82..2b54ab89d2 100644 --- a/swift/obj/reconstructor.py +++ b/swift/obj/reconstructor.py @@ -843,6 +843,9 @@ class ObjectReconstructor(Daemon): self.part_count += len(partitions) for partition in partitions: part_path = join(obj_path, partition) + if partition in ('auditor_status_ALL.json', + 'auditor_status_ZBF.json'): + continue if not (partition.isdigit() and os.path.isdir(part_path)): self.logger.warning( diff --git a/test/unit/obj/test_reconstructor.py b/test/unit/obj/test_reconstructor.py index 13d29562a2..cf8474888b 100755 --- a/test/unit/obj/test_reconstructor.py +++ b/test/unit/obj/test_reconstructor.py @@ -823,8 +823,43 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase): self.assertFalse(os.path.exists(pol_1_part_1_path)) warnings = self.reconstructor.logger.get_lines_for_level('warning') self.assertEqual(1, len(warnings)) - self.assertTrue('Unexpected entity in data dir:' in warnings[0], - 'Warning not found in %s' % warnings) + self.assertIn('Unexpected entity in data dir:', warnings[0]) + + def test_ignores_status_file(self): + # Following fd86d5a, the auditor will leave status files on each device + # until an audit can complete. The reconstructor should ignore these + + @contextmanager + def status_files(*auditor_types): + status_paths = [os.path.join(self.objects_1, + 'auditor_status_%s.json' % typ) + for typ in auditor_types] + for status_path in status_paths: + self.assertFalse(os.path.exists(status_path)) # sanity check + with open(status_path, 'w'): + pass + self.assertTrue(os.path.isfile(status_path)) # sanity check + try: + yield status_paths + finally: + for status_path in status_paths: + try: + os.unlink(status_path) + except OSError as e: + if e.errno != 2: + raise + + # since our collect_parts job is a generator, that yields directly + # into build_jobs and then spawns it's safe to do the remove_files + # without making reconstructor startup slow + with status_files('ALL', 'ZBF') as status_paths: + self.reconstructor._reset_stats() + for part_info in self.reconstructor.collect_parts(): + self.assertNotIn(part_info['part_path'], status_paths) + warnings = self.reconstructor.logger.get_lines_for_level('warning') + self.assertEqual(0, len(warnings)) + for status_path in status_paths: + self.assertTrue(os.path.exists(status_path)) def _make_fake_ssync(self, ssync_calls): class _fake_ssync(object): From 92274d479010f691004b1885e354224dc9c3b61c Mon Sep 17 00:00:00 2001 From: Andreas Jaeger Date: Mon, 28 Mar 2016 18:21:22 +0200 Subject: [PATCH 116/141] List system dependencies for running common tests Add an other-requirements.txt file containing a cross-platform list of dependencies needed for running included tox-based tests. Also include a tox environment for convenience calling the bindep[*] utility to list any missing system requirements. Document bindep and other-requirements usage. This change is self-testing. For other-requirements.txt see also http://docs.openstack.org/infra/manual/drivers.html#package-requirements [*] http://docs.openstack.org/infra/bindep/ Change-Id: Iea6f5fecba3b7cb9f6dac7029c0f17fc31fc0e3c --- doc/source/development_guidelines.rst | 6 ++++++ doc/source/getting_started.rst | 1 + other-requirements.txt | 15 +++++++++++++++ tox.ini | 8 ++++++++ 4 files changed, 30 insertions(+) create mode 100644 other-requirements.txt diff --git a/doc/source/development_guidelines.rst b/doc/source/development_guidelines.rst index fd3607015f..6f0012c35f 100644 --- a/doc/source/development_guidelines.rst +++ b/doc/source/development_guidelines.rst @@ -27,6 +27,12 @@ To execute the tests: pip install tox +* Generate list of distribution packages to install for testing:: + + tox -e bindep + + Now install these packages using your distribution package manager + like apt-get, dnf, yum, or zypper. * Run Tox from the root of the swift repo:: diff --git a/doc/source/getting_started.rst b/doc/source/getting_started.rst index ba8790821b..8308c130ae 100644 --- a/doc/source/getting_started.rst +++ b/doc/source/getting_started.rst @@ -15,6 +15,7 @@ Swift is written in Python and has these dependencies: * rsync 3.0 * The Python packages listed in `the requirements file `_ * Testing additionally requires `the test dependencies `_ +* Testing requires `these distribution packages `_ There is no current support for Python 3. diff --git a/other-requirements.txt b/other-requirements.txt new file mode 100644 index 0000000000..394f2b0f7a --- /dev/null +++ b/other-requirements.txt @@ -0,0 +1,15 @@ +# This is a cross-platform list tracking distribution packages needed by tests; +# see http://docs.openstack.org/infra/bindep/ for additional information. + +build-essential [platform:dpkg] +gcc [platform:rpm] +gettext +liberasurecode-dev [platform:dpkg] +liberasurecode-devel [platform:rpm] +libffi-dev [platform:dpkg] +libffi-devel [platform:rpm] +memcached +python-dev [platform:dpkg] +python-devel [platform:rpm] +rsync +xfsprogs diff --git a/tox.ini b/tox.ini index f016316c49..68c1f6a0bd 100644 --- a/tox.ini +++ b/tox.ini @@ -80,3 +80,11 @@ commands = bandit -c bandit.yaml -r swift bin -n 5 -p gate ignore = F812,H101,H202,H233,H301,H306,H401,H403,H404,H405,H501,H703 exclude = .venv,.tox,dist,*egg show-source = True + +[testenv:bindep] +# Do not install any requirements. We want this to be fast and work even if +# system dependencies are missing, since it's used to tell you what system +# dependencies are missing! This also means that bindep must be installed +# separately, outside of the requirements files. +deps = bindep +commands = bindep test From 9729bc83eb6845e65f8244b4104b2c1678f8fe38 Mon Sep 17 00:00:00 2001 From: Christian Schwede Date: Mon, 11 Apr 2016 11:56:07 +0200 Subject: [PATCH 117/141] Don't delete misplaced dbs if not replicated If one uses only a single replica and a database file is placed on a wrong partition, it will be removed instead of replicated to the correct partition. There are two reasons for this: 1. The list of nodes is empty when there is only a single replica 2. all(responses) is True even if there is no response at all, and the latter is always True if there is no node to replicate to. This patch fixes this by adding a special case if used with only one replica to the node selection loop and ensures that the list of responses is not empty. Also adds a test that fails on current master and passes with this change. Closes-Bug: 1568591 Change-Id: I028ea8c1928e8c9a401db31fb266ff82606f8371 --- swift/common/db_replicator.py | 13 +++++++----- test/unit/common/test_db_replicator.py | 29 +++++++++++++++++++++++++- 2 files changed, 36 insertions(+), 6 deletions(-) diff --git a/swift/common/db_replicator.py b/swift/common/db_replicator.py index d4abf25efe..7115a8afe4 100644 --- a/swift/common/db_replicator.py +++ b/swift/common/db_replicator.py @@ -525,10 +525,13 @@ class Replicator(Daemon): if shouldbehere: shouldbehere = bool([n for n in nodes if n['id'] == node_id]) # See Footnote [1] for an explanation of the repl_nodes assignment. - i = 0 - while i < len(nodes) and nodes[i]['id'] != node_id: - i += 1 - repl_nodes = nodes[i + 1:] + nodes[:i] + if len(nodes) > 1: + i = 0 + while i < len(nodes) and nodes[i]['id'] != node_id: + i += 1 + repl_nodes = nodes[i + 1:] + nodes[:i] + else: # Special case if using only a single replica + repl_nodes = nodes more_nodes = self.ring.get_more_nodes(int(partition)) if not local_dev: # Check further if local device is a handoff node @@ -563,7 +566,7 @@ class Replicator(Daemon): except (Exception, Timeout): self.logger.exception('UNHANDLED EXCEPTION: in post replicate ' 'hook for %s', broker.db_file) - if not shouldbehere and all(responses): + if not shouldbehere and responses and all(responses): # If the db shouldn't be on this node and has been successfully # synced to all of its peers, it can be removed. if not self.delete_db(broker): diff --git a/test/unit/common/test_db_replicator.py b/test/unit/common/test_db_replicator.py index be17d42bf8..29d66df99d 100644 --- a/test/unit/common/test_db_replicator.py +++ b/test/unit/common/test_db_replicator.py @@ -73,7 +73,7 @@ class FakeRingWithSingleNode(object): class Ring(object): devs = [dict( id=1, weight=10.0, zone=1, ip='1.1.1.1', port=6200, device='sdb', - meta='', replication_ip='1.1.1.1', replication_port=6200 + meta='', replication_ip='1.1.1.1', replication_port=6200, region=1 )] def __init__(self, path, reload_time=15, ring_name=None): @@ -633,6 +633,9 @@ class TestDBReplicator(unittest.TestCase): def test_replicate_object_delete_because_not_shouldbehere(self): replicator = TestReplicator({}) + replicator.ring = FakeRingWithNodes().Ring('path') + replicator.brokerclass = FakeAccountBroker + replicator._repl_to_node = lambda *args: True replicator.delete_db = self.stub_delete_db replicator._replicate_object('0', '/path/to/file', 'node_id') self.assertEqual(['/path/to/file'], self.delete_db_calls) @@ -669,6 +672,30 @@ class TestDBReplicator(unittest.TestCase): [(('Found /path/to/file for /a%20c%20t/c%20o%20n when it should ' 'be on partition 0; will replicate out and remove.',), {})]) + def test_replicate_container_out_of_place_no_node(self): + replicator = TestReplicator({}, logger=unit.FakeLogger()) + replicator.ring = FakeRingWithSingleNode().Ring('path') + replicator._repl_to_node = lambda *args: True + + replicator.delete_db = self.stub_delete_db + # Correct node_id, wrong part + part = replicator.ring.get_part( + TEST_ACCOUNT_NAME, TEST_CONTAINER_NAME) + 1 + node_id = replicator.ring.get_part_nodes(part)[0]['id'] + replicator._replicate_object(str(part), '/path/to/file', node_id) + self.assertEqual(['/path/to/file'], self.delete_db_calls) + + self.delete_db_calls = [] + + # No nodes this time + replicator.ring.get_part_nodes = lambda *args: [] + replicator.delete_db = self.stub_delete_db + # Correct node_id, wrong part + part = replicator.ring.get_part( + TEST_ACCOUNT_NAME, TEST_CONTAINER_NAME) + 1 + replicator._replicate_object(str(part), '/path/to/file', node_id) + self.assertEqual([], self.delete_db_calls) + def test_replicate_object_different_region(self): db_replicator.ring = FakeRingWithNodes() replicator = TestReplicator({}) From ce022f4417e973215b563412b504a8a1b9ce240c Mon Sep 17 00:00:00 2001 From: Saverio Proto Date: Thu, 12 May 2016 15:30:17 +0200 Subject: [PATCH 118/141] README: fix broken link There is a broken link to CONTRIBUTING.rst introduced in commit a829bd59770681f9d6c1ef02a6e1d5e441587a23 Change-Id: Iaedf5ff3995229cf292202793809080f9f2c7fed --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 984d160e21..2c2831dede 100644 --- a/README.rst +++ b/README.rst @@ -33,7 +33,7 @@ Getting Started Swift is part of OpenStack and follows the code contribution, review, and testing processes common to all OpenStack projects. If you would like to start contributing, check out these -`notes `__ to help you get started. +`notes `__ to help you get started. The best place to get started is the `"SAIO - Swift All In One" `__. From b430c384db38ea1153f52d0accf0561dac1378a6 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Thu, 12 May 2016 14:57:56 +0100 Subject: [PATCH 119/141] Put back missing sysmeta unit tests We lost some unit tests from test_sysmeta.py in [1]. This patch restores them. Since the proxy no longer implements post-as-copy or COPY then we need to plumb in a copy middleware to the test app in test_sysmeta. It is a little odd perhaps to have these tests under test/unit/proxy but the alternative is to duplicate the test setup and base test code that already exists in test_sysmeta.py into test_copy.py. [1] commit 46d61a4dcd9a5d9157625c06d6fe7d916e80c3d2 (server side copy middleware) Change-Id: Iec02387ccbddbe3841a417880389c707cd5c0346 --- test/unit/proxy/test_sysmeta.py | 109 ++++++++++++++++++++++++++++++-- 1 file changed, 103 insertions(+), 6 deletions(-) diff --git a/test/unit/proxy/test_sysmeta.py b/test/unit/proxy/test_sysmeta.py index 9548680791..1a7f82334e 100644 --- a/test/unit/proxy/test_sysmeta.py +++ b/test/unit/proxy/test_sysmeta.py @@ -19,6 +19,8 @@ import unittest import os from tempfile import mkdtemp import shutil + +from swift.common.middleware.copy import ServerSideCopyMiddleware from swift.common.storage_policy import StoragePolicy from swift.common.swob import Request from swift.common.utils import mkdirs, split_path @@ -133,6 +135,7 @@ class TestObjectSysmeta(unittest.TestCase): logger=debug_logger('proxy-ut'), account_ring=FakeRing(replicas=1), container_ring=FakeRing(replicas=1)) + self.copy_app = ServerSideCopyMiddleware(self.app, {}) monkey_patch_mimetools() self.tmpdir = mkdtemp() self.testdir = os.path.join(self.tmpdir, @@ -239,7 +242,7 @@ class TestObjectSysmeta(unittest.TestCase): self._assertInHeaders(resp, self.new_meta_headers) self._assertNotInHeaders(resp, self.original_meta_headers_2) - def _test_sysmeta_not_updated_by_POST(self): + def _test_sysmeta_not_updated_by_POST(self, app): # check sysmeta is not changed by a POST but user meta is replaced path = '/v1/a/c/o' @@ -247,7 +250,7 @@ class TestObjectSysmeta(unittest.TestCase): hdrs = dict(self.original_sysmeta_headers_1) hdrs.update(self.original_meta_headers_1) req = Request.blank(path, environ=env, headers=hdrs, body='x') - resp = req.get_response(self.app) + resp = req.get_response(app) self._assertStatus(resp, 201) env = {'REQUEST_METHOD': 'POST'} @@ -257,11 +260,11 @@ class TestObjectSysmeta(unittest.TestCase): hdrs.update(self.new_meta_headers) hdrs.update(self.bad_headers) req = Request.blank(path, environ=env, headers=hdrs) - resp = req.get_response(self.app) + resp = req.get_response(app) self._assertStatus(resp, 202) req = Request.blank(path, environ={}) - resp = req.get_response(self.app) + resp = req.get_response(app) self._assertStatus(resp, 200) self._assertInHeaders(resp, self.original_sysmeta_headers_1) self._assertNotInHeaders(resp, self.new_sysmeta_headers) @@ -274,12 +277,106 @@ class TestObjectSysmeta(unittest.TestCase): hdrs.update(self.new_sysmeta_headers) hdrs.update(self.bad_headers) req = Request.blank(path, environ=env, headers=hdrs, body='x') - resp = req.get_response(self.app) + resp = req.get_response(app) self._assertStatus(resp, 201) req = Request.blank(path, environ={}) - resp = req.get_response(self.app) + resp = req.get_response(app) self._assertStatus(resp, 200) self._assertInHeaders(resp, self.changed_sysmeta_headers) self._assertInHeaders(resp, self.new_sysmeta_headers) self._assertNotInHeaders(resp, self.original_sysmeta_headers_2) + + def test_sysmeta_not_updated_by_POST(self): + # test fast-post by issuing requests to the proxy app + self._test_sysmeta_not_updated_by_POST(self.app) + + def test_sysmeta_not_updated_by_POST_as_copy(self): + # test post-as-copy by issuing requests to the copy middleware app + self.copy_app.object_post_as_copy = True + self._test_sysmeta_not_updated_by_POST(self.copy_app) + + def test_sysmeta_updated_by_COPY(self): + # check sysmeta is updated by a COPY in same way as user meta by + # issuing requests to the copy middleware app + path = '/v1/a/c/o' + dest = '/c/o2' + env = {'REQUEST_METHOD': 'PUT'} + hdrs = dict(self.original_sysmeta_headers_1) + hdrs.update(self.original_sysmeta_headers_2) + hdrs.update(self.original_meta_headers_1) + hdrs.update(self.original_meta_headers_2) + req = Request.blank(path, environ=env, headers=hdrs, body='x') + resp = req.get_response(self.copy_app) + self._assertStatus(resp, 201) + + env = {'REQUEST_METHOD': 'COPY'} + hdrs = dict(self.changed_sysmeta_headers) + hdrs.update(self.new_sysmeta_headers) + hdrs.update(self.changed_meta_headers) + hdrs.update(self.new_meta_headers) + hdrs.update(self.bad_headers) + hdrs.update({'Destination': dest}) + req = Request.blank(path, environ=env, headers=hdrs) + resp = req.get_response(self.copy_app) + self._assertStatus(resp, 201) + self._assertInHeaders(resp, self.changed_sysmeta_headers) + self._assertInHeaders(resp, self.new_sysmeta_headers) + self._assertInHeaders(resp, self.original_sysmeta_headers_2) + self._assertInHeaders(resp, self.changed_meta_headers) + self._assertInHeaders(resp, self.new_meta_headers) + self._assertInHeaders(resp, self.original_meta_headers_2) + self._assertNotInHeaders(resp, self.bad_headers) + + req = Request.blank('/v1/a/c/o2', environ={}) + resp = req.get_response(self.copy_app) + self._assertStatus(resp, 200) + self._assertInHeaders(resp, self.changed_sysmeta_headers) + self._assertInHeaders(resp, self.new_sysmeta_headers) + self._assertInHeaders(resp, self.original_sysmeta_headers_2) + self._assertInHeaders(resp, self.changed_meta_headers) + self._assertInHeaders(resp, self.new_meta_headers) + self._assertInHeaders(resp, self.original_meta_headers_2) + self._assertNotInHeaders(resp, self.bad_headers) + + def test_sysmeta_updated_by_COPY_from(self): + # check sysmeta is updated by a PUT with x-copy-from in same way as + # user meta by issuing requests to the copy middleware app + path = '/v1/a/c/o' + env = {'REQUEST_METHOD': 'PUT'} + hdrs = dict(self.original_sysmeta_headers_1) + hdrs.update(self.original_sysmeta_headers_2) + hdrs.update(self.original_meta_headers_1) + hdrs.update(self.original_meta_headers_2) + req = Request.blank(path, environ=env, headers=hdrs, body='x') + resp = req.get_response(self.copy_app) + self._assertStatus(resp, 201) + + env = {'REQUEST_METHOD': 'PUT'} + hdrs = dict(self.changed_sysmeta_headers) + hdrs.update(self.new_sysmeta_headers) + hdrs.update(self.changed_meta_headers) + hdrs.update(self.new_meta_headers) + hdrs.update(self.bad_headers) + hdrs.update({'X-Copy-From': '/c/o'}) + req = Request.blank('/v1/a/c/o2', environ=env, headers=hdrs, body='') + resp = req.get_response(self.copy_app) + self._assertStatus(resp, 201) + self._assertInHeaders(resp, self.changed_sysmeta_headers) + self._assertInHeaders(resp, self.new_sysmeta_headers) + self._assertInHeaders(resp, self.original_sysmeta_headers_2) + self._assertInHeaders(resp, self.changed_meta_headers) + self._assertInHeaders(resp, self.new_meta_headers) + self._assertInHeaders(resp, self.original_meta_headers_2) + self._assertNotInHeaders(resp, self.bad_headers) + + req = Request.blank('/v1/a/c/o2', environ={}) + resp = req.get_response(self.copy_app) + self._assertStatus(resp, 200) + self._assertInHeaders(resp, self.changed_sysmeta_headers) + self._assertInHeaders(resp, self.new_sysmeta_headers) + self._assertInHeaders(resp, self.original_sysmeta_headers_2) + self._assertInHeaders(resp, self.changed_meta_headers) + self._assertInHeaders(resp, self.new_meta_headers) + self._assertInHeaders(resp, self.original_meta_headers_2) + self._assertNotInHeaders(resp, self.bad_headers) From 6834547f668a4649f5a42b8a4edc43b844fd6bbe Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Fri, 22 Apr 2016 14:55:35 -0700 Subject: [PATCH 120/141] Clean up fallocate tests a little Change-Id: I01f1ad8ef0f8910718fd2fb30c9e8285358baf84 --- test/unit/common/test_utils.py | 133 +++++++++++++++++---------------- 1 file changed, 68 insertions(+), 65 deletions(-) diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index 406d156936..9b9ffe9b14 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -2618,67 +2618,68 @@ cluster_dfw1 = http://dfw1.host/v1/ utils.config_fallocate_value('1024') StatVFS.f_frsize = 1024 StatVFS.f_bavail = 1 - exc = None - try: + + with self.assertRaises(OSError) as catcher: fallocate(0, 1, 0, ctypes.c_uint64(0)) - except OSError as err: - exc = err - self.assertEqual(str(exc), - '[Errno 28] FALLOCATE_RESERVE fail 1024 <= 1024') - self.assertEqual(err.errno, errno.ENOSPC) + self.assertEqual( + str(catcher.exception), + '[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024' + % errno.ENOSPC) + self.assertEqual(catcher.exception.errno, errno.ENOSPC) + # Want 1024 reserved, have 512 * 2 free, so fails utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('1024') StatVFS.f_frsize = 512 StatVFS.f_bavail = 2 - exc = None - try: + with self.assertRaises(OSError) as catcher: fallocate(0, 1, 0, ctypes.c_uint64(0)) - except OSError as err: - exc = err - self.assertEqual(str(exc), - '[Errno 28] FALLOCATE_RESERVE fail 1024 <= 1024') - self.assertEqual(err.errno, errno.ENOSPC) + self.assertEqual( + str(catcher.exception), + '[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024' + % errno.ENOSPC) + self.assertEqual(catcher.exception.errno, errno.ENOSPC) + # Want 2048 reserved, have 1024 * 1 free, so fails utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('2048') StatVFS.f_frsize = 1024 StatVFS.f_bavail = 1 - exc = None - try: + with self.assertRaises(OSError) as catcher: fallocate(0, 1, 0, ctypes.c_uint64(0)) - except OSError as err: - exc = err - self.assertEqual(str(exc), - '[Errno 28] FALLOCATE_RESERVE fail 1024 <= 2048') - self.assertEqual(err.errno, errno.ENOSPC) + self.assertEqual( + str(catcher.exception), + '[Errno %d] FALLOCATE_RESERVE fail 1024 <= 2048' + % errno.ENOSPC) + self.assertEqual(catcher.exception.errno, errno.ENOSPC) + # Want 2048 reserved, have 512 * 2 free, so fails utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('2048') StatVFS.f_frsize = 512 StatVFS.f_bavail = 2 - exc = None - try: + with self.assertRaises(OSError) as catcher: fallocate(0, 1, 0, ctypes.c_uint64(0)) - except OSError as err: - exc = err - self.assertEqual(str(exc), - '[Errno 28] FALLOCATE_RESERVE fail 1024 <= 2048') - self.assertEqual(err.errno, errno.ENOSPC) + self.assertEqual( + str(catcher.exception), + '[Errno %d] FALLOCATE_RESERVE fail 1024 <= 2048' + % errno.ENOSPC) + self.assertEqual(catcher.exception.errno, errno.ENOSPC) + # Want 1023 reserved, have 1024 * 1 free, but file size is 1, so # fails utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('1023') StatVFS.f_frsize = 1024 StatVFS.f_bavail = 1 - exc = None - try: + with self.assertRaises(OSError) as catcher: fallocate(0, 1, 0, ctypes.c_uint64(1)) - except OSError as err: - exc = err - self.assertEqual(str(exc), - '[Errno 28] FALLOCATE_RESERVE fail 1023 <= 1023') - self.assertEqual(err.errno, errno.ENOSPC) + self.assertEqual( + str(catcher.exception), + '[Errno %d] FALLOCATE_RESERVE fail 1023 <= 1023' + % errno.ENOSPC) + self.assertEqual(catcher.exception.errno, errno.ENOSPC) + # Want 1022 reserved, have 1024 * 1 free, and file size is 1, so # succeeds utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ @@ -2686,6 +2687,7 @@ cluster_dfw1 = http://dfw1.host/v1/ StatVFS.f_frsize = 1024 StatVFS.f_bavail = 1 self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(1)), 0) + # Want 1% reserved, have 100 bytes * 2/100 free, and file size is # 99, so succeeds utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ @@ -2694,6 +2696,7 @@ cluster_dfw1 = http://dfw1.host/v1/ StatVFS.f_bavail = 2 StatVFS.f_blocks = 100 self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(99)), 0) + # Want 2% reserved, have 50 bytes * 2/50 free, and file size is 49, # so succeeds utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ @@ -2702,6 +2705,7 @@ cluster_dfw1 = http://dfw1.host/v1/ StatVFS.f_bavail = 2 StatVFS.f_blocks = 50 self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(49)), 0) + # Want 100% reserved, have 100 * 100/100 free, and file size is 0, # so fails. utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ @@ -2709,15 +2713,14 @@ cluster_dfw1 = http://dfw1.host/v1/ StatVFS.f_frsize = 100 StatVFS.f_bavail = 100 StatVFS.f_blocks = 100 - exc = None - try: + with self.assertRaises(OSError) as catcher: fallocate(0, 1, 0, ctypes.c_uint64(0)) - except OSError as err: - exc = err - self.assertEqual(str(exc), - '[Errno 28] FALLOCATE_RESERVE fail 100.0 <= ' - '100.0') - self.assertEqual(err.errno, errno.ENOSPC) + self.assertEqual( + str(catcher.exception), + '[Errno %d] FALLOCATE_RESERVE fail 100.0 <= 100.0' + % errno.ENOSPC) + self.assertEqual(catcher.exception.errno, errno.ENOSPC) + # Want 1% reserved, have 100 * 2/100 free, and file size is 101, # so fails. utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ @@ -2725,29 +2728,28 @@ cluster_dfw1 = http://dfw1.host/v1/ StatVFS.f_frsize = 100 StatVFS.f_bavail = 2 StatVFS.f_blocks = 100 - exc = None - try: + with self.assertRaises(OSError) as catcher: fallocate(0, 1, 0, ctypes.c_uint64(101)) - except OSError as err: - exc = err - self.assertEqual(str(exc), - '[Errno 28] FALLOCATE_RESERVE fail 0.99 <= 1.0') - self.assertEqual(err.errno, errno.ENOSPC) - # Want 98% reserved, have 100 bytes * 99/100 free, and file size + self.assertEqual( + str(catcher.exception), + '[Errno %d] FALLOCATE_RESERVE fail 0.99 <= 1.0' + % errno.ENOSPC) + self.assertEqual(catcher.exception.errno, errno.ENOSPC) + # is 100, so fails utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('98%') StatVFS.f_frsize = 100 StatVFS.f_bavail = 99 StatVFS.f_blocks = 100 - exc = None - try: + with self.assertRaises(OSError) as catcher: fallocate(0, 1, 0, ctypes.c_uint64(100)) - except OSError as err: - exc = err - self.assertEqual(str(exc), - '[Errno 28] FALLOCATE_RESERVE fail 98.0 <= 98.0') - self.assertEqual(err.errno, errno.ENOSPC) + self.assertEqual( + str(catcher.exception), + '[Errno %d] FALLOCATE_RESERVE fail 98.0 <= 98.0' + % errno.ENOSPC) + self.assertEqual(catcher.exception.errno, errno.ENOSPC) + # Want 2% reserved, have 1000 bytes * 21/1000 free, and file size # is 999, so succeeds. utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ @@ -2756,6 +2758,7 @@ cluster_dfw1 = http://dfw1.host/v1/ StatVFS.f_bavail = 21 StatVFS.f_blocks = 1000 self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(999)), 0) + # Want 2% resereved, have 1000 bytes * 21/1000 free, and file size # is 1000, so fails. utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ @@ -2763,14 +2766,14 @@ cluster_dfw1 = http://dfw1.host/v1/ StatVFS.f_frsize = 1000 StatVFS.f_bavail = 21 StatVFS.f_blocks = 1000 - exc = None - try: + with self.assertRaises(OSError) as catcher: fallocate(0, 1, 0, ctypes.c_uint64(1000)) - except OSError as err: - exc = err - self.assertEqual(str(exc), - '[Errno 28] FALLOCATE_RESERVE fail 2.0 <= 2.0') - self.assertEqual(err.errno, errno.ENOSPC) + self.assertEqual( + str(catcher.exception), + '[Errno %d] FALLOCATE_RESERVE fail 2.0 <= 2.0' + % errno.ENOSPC) + self.assertEqual(catcher.exception.errno, errno.ENOSPC) + finally: utils.FALLOCATE_RESERVE = orig_FALLOCATE_RESERVE utils.os.fstatvfs = orig_fstatvfs From eda1b330f3cf7bf9dd643854d4a221b6f024884d Mon Sep 17 00:00:00 2001 From: Paul Dardeau Date: Thu, 12 May 2016 18:58:15 +0000 Subject: [PATCH 121/141] fix docs word usage for large obj copy Change-Id: I2b6ca3f9277b344ff5b05a85304084a7edcbcb66 --- swift/common/middleware/copy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/swift/common/middleware/copy.py b/swift/common/middleware/copy.py index e895813e8d..b446b1b7b3 100644 --- a/swift/common/middleware/copy.py +++ b/swift/common/middleware/copy.py @@ -102,7 +102,7 @@ accounts: ------------------- Large Object Copy ------------------- -The best option to copy a large option is to copy segments individually. +The best option to copy a large object is to copy segments individually. To copy the manifest object of a large object, add the query parameter to the copy request:: From 721c788b9a0ceb22ffac0b2c797890898096a9f4 Mon Sep 17 00:00:00 2001 From: Kota Tsuyuzaki Date: Thu, 12 May 2016 18:57:31 -0700 Subject: [PATCH 122/141] Import BrainSpliter directly in expirer probe Change-Id: Ib3cfe38d1b17c09c7248b5cbcfe4e2eadff7acd9 --- test/probe/test_object_expirer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/probe/test_object_expirer.py b/test/probe/test_object_expirer.py index 3f8f39deed..e25ddbbc08 100644 --- a/test/probe/test_object_expirer.py +++ b/test/probe/test_object_expirer.py @@ -23,7 +23,7 @@ from swift.common.manager import Manager from swift.common.utils import Timestamp from test.probe.common import ReplProbeTest, ENABLED_POLICIES -from test.probe.test_container_merge_policy_index import BrainSplitter +from test.probe.brain import BrainSplitter from swiftclient import client From 30e39cc9fa6353be37cd3b5a515f09b42ceb3b20 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Wed, 4 May 2016 12:02:07 -0700 Subject: [PATCH 123/141] Skip SLO-reconciling probe test when SLO is off The probe test in question tries to make a manifest referencing unavailable objects and expects that to fail with a 400. If the SLO middleware is enabled, then it checks the segments, can't find some, and returns the 400. If it's disabled, the PUT succeeds and makes an object whose contents are some JSON blob. In the latter case, the probe test would fail because it expected a real SLO manifest but didn't find one. Now we skip the remainder of the test when we detect that SLO is not enabled. Change-Id: I3e7e8e98107608e675efc24156e703bc167458bb --- .../test_container_merge_policy_index.py | 25 +++++++++++++------ 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/test/probe/test_container_merge_policy_index.py b/test/probe/test_container_merge_policy_index.py index 3472488f5f..829329a7eb 100644 --- a/test/probe/test_container_merge_policy_index.py +++ b/test/probe/test_container_merge_policy_index.py @@ -20,6 +20,7 @@ import unittest from nose import SkipTest +from six.moves.urllib.parse import urlparse from swift.common.manager import Manager from swift.common.internal_client import InternalClient from swift.common import utils, direct_client @@ -237,6 +238,14 @@ class TestContainerMergePolicyIndex(ReplProbeTest): orig_policy_index, node)) def test_reconcile_manifest(self): + info_url = "%s://%s/info" % (urlparse(self.url).scheme, + urlparse(self.url).netloc) + proxy_conn = client.http_connection(info_url) + cluster_info = client.get_capabilities(proxy_conn) + if 'slo' not in cluster_info: + raise SkipTest("SLO not enabled in proxy; " + "can't test manifest reconciliation") + # this test is not only testing a split brain scenario on # multiple policies with mis-placed objects - it even writes out # a static large object directly to the storage nodes while the @@ -278,18 +287,18 @@ class TestContainerMergePolicyIndex(ReplProbeTest): write_part(i) # write manifest - try: + with self.assertRaises(ClientException) as catcher: client.put_object(self.url, self.token, self.container_name, self.object_name, contents=utils.json.dumps(manifest_data), query_string='multipart-manifest=put') - except ClientException as err: - # so as it works out, you can't really upload a multi-part - # manifest for objects that are currently misplaced - you have to - # wait until they're all available - which is about the same as - # some other failure that causes data to be unavailable to the - # proxy at the time of upload - self.assertEqual(err.http_status, 400) + + # so as it works out, you can't really upload a multi-part + # manifest for objects that are currently misplaced - you have to + # wait until they're all available - which is about the same as + # some other failure that causes data to be unavailable to the + # proxy at the time of upload + self.assertEqual(catcher.exception.http_status, 400) # but what the heck, we'll sneak one in just to see what happens... direct_manifest_name = self.object_name + '-direct-test' From ce90a1e79e16472ff75d205c8d907a5991888b3a Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Thu, 21 Jan 2016 13:19:30 -0800 Subject: [PATCH 124/141] Make info caching work across subrequests Previously, if you called get_account_info, get_container_info, or get_object_info, then the results of that call would be cached in the WSGI environment as top-level keys. This is okay, except that if you, in middleware, copy the WSGI environment and then make a subrequest using the copy, information retrieved in the subrequest is cached only in the copy and not in the original. This can mean lots of extra trips to memcache for, say, SLO validation where the segments are in another container; the object HEAD ends up getting container info for the segment container, but then the next object HEAD gets it again. This commit moves the cache for get_*_info into a dictionary at environ['swift.infocache']; this way, you can shallow-copy the request environment and still get the benefits from the cache. Change-Id: I3481b38b41c33cd1e39e19baab56193c5f9bf6ac --- swift/common/wsgi.py | 3 +- swift/proxy/controllers/base.py | 41 ++-- .../common/middleware/test_account_quotas.py | 6 +- .../common/middleware/test_container_sync.py | 9 +- test/unit/common/middleware/test_formpost.py | 193 ++++++++++-------- .../common/middleware/test_keystoneauth.py | 22 +- test/unit/common/middleware/test_quotas.py | 10 +- test/unit/common/middleware/test_tempurl.py | 10 +- test/unit/common/test_wsgi.py | 5 + test/unit/proxy/controllers/test_account.py | 8 +- test/unit/proxy/controllers/test_base.py | 120 +++++++---- test/unit/proxy/controllers/test_container.py | 8 +- test/unit/proxy/test_server.py | 42 ++-- 13 files changed, 284 insertions(+), 193 deletions(-) diff --git a/swift/common/wsgi.py b/swift/common/wsgi.py index 986e86d640..fdd4a203ed 100644 --- a/swift/common/wsgi.py +++ b/swift/common/wsgi.py @@ -1099,7 +1099,8 @@ def make_env(env, method=None, path=None, agent='Swift', query_string=None, 'SERVER_PROTOCOL', 'swift.cache', 'swift.source', 'swift.trans_id', 'swift.authorize_override', 'swift.authorize', 'HTTP_X_USER_ID', 'HTTP_X_PROJECT_ID', - 'HTTP_REFERER', 'swift.orig_req_method', 'swift.log_info'): + 'HTTP_REFERER', 'swift.orig_req_method', 'swift.log_info', + 'swift.infocache'): if name in env: newenv[name] = env[name] if method: diff --git a/swift/proxy/controllers/base.py b/swift/proxy/controllers/base.py index 7dcc1ca3de..0cd209db65 100644 --- a/swift/proxy/controllers/base.py +++ b/swift/proxy/controllers/base.py @@ -381,6 +381,7 @@ def _set_info_cache(app, env, account, container, resp): :param container: the unquoted container name or None :param resp: the response received or None if info cache should be cleared """ + infocache = env.setdefault('swift.infocache', {}) if container: cache_time = app.recheck_container_existence @@ -399,7 +400,7 @@ def _set_info_cache(app, env, account, container, resp): # Next actually set both memcache and the env cache memcache = getattr(app, 'memcache', None) or env.get('swift.cache') if not cache_time: - env.pop(env_key, None) + infocache.pop(env_key, None) if memcache: memcache.delete(cache_key) return @@ -410,7 +411,7 @@ def _set_info_cache(app, env, account, container, resp): info = headers_to_account_info(resp.headers, resp.status_int) if memcache: memcache.set(cache_key, info, time=cache_time) - env[env_key] = info + infocache[env_key] = info def _set_object_info_cache(app, env, account, container, obj, resp): @@ -433,12 +434,12 @@ def _set_object_info_cache(app, env, account, container, obj, resp): env_key = get_object_env_key(account, container, obj) - if not resp: - env.pop(env_key, None) + if 'swift.infocache' in env and not resp: + env['swift.infocache'].pop(env_key, None) return info = headers_to_object_info(resp.headers, resp.status_int) - env[env_key] = info + env.setdefault('swift.infocache', {})[env_key] = info def clear_info_cache(app, env, account, container=None): @@ -464,8 +465,8 @@ def _get_info_cache(app, env, account, container=None): """ cache_key, env_key = _get_cache_key(account, container) - if env_key in env: - return env[env_key] + if 'swift.infocache' in env and env_key in env['swift.infocache']: + return env['swift.infocache'][env_key] memcache = getattr(app, 'memcache', None) or env.get('swift.cache') if memcache: info = memcache.get(cache_key) @@ -473,11 +474,11 @@ def _get_info_cache(app, env, account, container=None): for key in info: if isinstance(info[key], six.text_type): info[key] = info[key].encode("utf-8") - if isinstance(info[key], dict): + elif isinstance(info[key], dict): for subkey, value in info[key].items(): if isinstance(value, six.text_type): info[key][subkey] = value.encode("utf-8") - env[env_key] = info + env.setdefault('swift.infocache', {})[env_key] = info return info return None @@ -497,6 +498,7 @@ def _prepare_pre_auth_info_request(env, path, swift_source): # This is a sub request for container metadata- drop the Origin header from # the request so the it is not treated as a CORS request. newenv.pop('HTTP_ORIGIN', None) + # Note that Request.blank expects quoted path return Request.blank(quote(path), environ=newenv) @@ -513,6 +515,10 @@ def get_info(app, env, account, container=None, ret_not_found=False, :param env: the environment used by the current request :param account: The unquoted name of the account :param container: The unquoted name of the container (or None if account) + :param ret_not_found: if True, return info dictionary on 404; + if False, return None on 404 + :param swift_source: swift source logged for any subrequests made while + retrieving the account or container info :returns: the cached info or None if cannot be retrieved """ info = _get_info_cache(app, env, account, container) @@ -531,14 +537,15 @@ def get_info(app, env, account, container=None, ret_not_found=False, req = _prepare_pre_auth_info_request( env, path, (swift_source or 'GET_INFO')) - # Whenever we do a GET/HEAD, the GETorHEAD_base will set the info in - # the environment under environ[env_key] and in memcache. We will - # pick the one from environ[env_key] and use it to set the caller env + # Whenever we do a GET/HEAD, the GETorHEAD_base will set the info in the + # environment under environ['swift.infocache'][env_key] and in memcache. + # We will pick the one from environ['swift.infocache'][env_key] and use + # it to set the caller env resp = req.get_response(app) cache_key, env_key = _get_cache_key(account, container) try: - info = resp.environ[env_key] - env[env_key] = info + info = resp.environ['swift.infocache'][env_key] + env.setdefault('swift.infocache', {})[env_key] = info if ret_not_found or is_success(info['status']): return info except (KeyError, AttributeError): @@ -561,7 +568,7 @@ def _get_object_info(app, env, account, container, obj, swift_source=None): :returns: the cached info or None if cannot be retrieved """ env_key = get_object_env_key(account, container, obj) - info = env.get(env_key) + info = env.get('swift.infocache', {}).get(env_key) if info: return info # Not in cached, let's try the object servers @@ -572,8 +579,8 @@ def _get_object_info(app, env, account, container, obj, swift_source=None): # pick the one from environ[env_key] and use it to set the caller env resp = req.get_response(app) try: - info = resp.environ[env_key] - env[env_key] = info + info = resp.environ['swift.infocache'][env_key] + env.setdefault('swift.infocache', {})[env_key] = info return info except (KeyError, AttributeError): pass diff --git a/test/unit/common/middleware/test_account_quotas.py b/test/unit/common/middleware/test_account_quotas.py index b443b4a28d..87574bd14f 100644 --- a/test/unit/common/middleware/test_account_quotas.py +++ b/test/unit/common/middleware/test_account_quotas.py @@ -59,7 +59,8 @@ class FakeApp(object): if env['REQUEST_METHOD'] == "HEAD" and \ env['PATH_INFO'] == '/v1/a/c2/o2': env_key = get_object_env_key('a', 'c2', 'o2') - env[env_key] = headers_to_object_info(self.headers, 200) + env.setdefault('swift.infocache', {})[env_key] = \ + headers_to_object_info(self.headers, 200) start_response('200 OK', self.headers) elif env['REQUEST_METHOD'] == "HEAD" and \ env['PATH_INFO'] == '/v1/a/c2/o3': @@ -67,7 +68,8 @@ class FakeApp(object): else: # Cache the account_info (same as a real application) cache_key, env_key = _get_cache_key('a', None) - env[env_key] = headers_to_account_info(self.headers, 200) + env.setdefault('swift.infocache', {})[env_key] = \ + headers_to_account_info(self.headers, 200) start_response('200 OK', self.headers) return [] diff --git a/test/unit/common/middleware/test_container_sync.py b/test/unit/common/middleware/test_container_sync.py index 61a4735f15..9d5f1dd332 100644 --- a/test/unit/common/middleware/test_container_sync.py +++ b/test/unit/common/middleware/test_container_sync.py @@ -205,7 +205,8 @@ cluster_dfw1 = http://dfw1.host/v1/ def test_invalid_sig(self): req = swob.Request.blank( '/v1/a/c', headers={'x-container-sync-auth': 'US nonce sig'}) - req.environ[_get_cache_key('a', 'c')[1]] = {'sync_key': 'abc'} + infocache = req.environ.setdefault('swift.infocache', {}) + infocache[_get_cache_key('a', 'c')[1]] = {'sync_key': 'abc'} resp = req.get_response(self.sync) self.assertEqual(resp.status, '401 Unauthorized') self.assertEqual( @@ -224,7 +225,8 @@ cluster_dfw1 = http://dfw1.host/v1/ req = swob.Request.blank('/v1/a/c', headers={ 'x-container-sync-auth': 'US nonce ' + sig, 'x-backend-inbound-x-timestamp': ts}) - req.environ[_get_cache_key('a', 'c')[1]] = {'sync_key': 'abc'} + infocache = req.environ.setdefault('swift.infocache', {}) + infocache[_get_cache_key('a', 'c')[1]] = {'sync_key': 'abc'} resp = req.get_response(self.sync) self.assertEqual(resp.status, '200 OK') self.assertEqual(resp.body, 'Response to Authorized Request') @@ -238,7 +240,8 @@ cluster_dfw1 = http://dfw1.host/v1/ self.sync.realms_conf.key2('US'), 'abc') req = swob.Request.blank( '/v1/a/c', headers={'x-container-sync-auth': 'US nonce ' + sig}) - req.environ[_get_cache_key('a', 'c')[1]] = {'sync_key': 'abc'} + infocache = req.environ.setdefault('swift.infocache', {}) + infocache[_get_cache_key('a', 'c')[1]] = {'sync_key': 'abc'} resp = req.get_response(self.sync) self.assertEqual(resp.status, '200 OK') self.assertEqual(resp.body, 'Response to Authorized Request') diff --git a/test/unit/common/middleware/test_formpost.py b/test/unit/common/middleware/test_formpost.py index 4e6f24826f..6e9da72857 100644 --- a/test/unit/common/middleware/test_formpost.py +++ b/test/unit/common/middleware/test_formpost.py @@ -130,8 +130,9 @@ class TestFormPost(unittest.TestCase): meta[meta_name] = key _junk, account, _junk, _junk = split_path(path, 2, 4) - req.environ['swift.account/' + account] = self._fake_cache_env( - account, tempurl_keys) + req.environ.setdefault('swift.infocache', {}) + req.environ['swift.infocache']['swift.account/' + account] = \ + self._fake_cache_env(account, tempurl_keys) return req def _fake_cache_env(self, account, tempurl_keys=()): @@ -221,6 +222,7 @@ class TestFormPost(unittest.TestCase): 'SERVER_NAME': '172.16.83.128', 'SERVER_PORT': '8080', 'SERVER_PROTOCOL': 'HTTP/1.0', + 'swift.infocache': {}, 'wsgi.errors': wsgi_errors, 'wsgi.multiprocess': False, 'wsgi.multithread': True, @@ -247,8 +249,8 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '/v1/AUTH_test/container', '', 1024, 10, int(time() - 10), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.account/AUTH_test'] = self._fake_cache_env( - 'AUTH_test', [key]) + env['swift.infocache']['swift.account/AUTH_test'] = ( + self._fake_cache_env('AUTH_test', [key])) self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -351,9 +353,10 @@ class TestFormPost(unittest.TestCase): 'SERVER_NAME': '172.16.83.128', 'SERVER_PORT': '8080', 'SERVER_PROTOCOL': 'HTTP/1.0', - 'swift.account/AUTH_test': self._fake_cache_env( - 'AUTH_test', [key]), - 'swift.container/AUTH_test/container': {'meta': {}}, + 'swift.infocache': { + 'swift.account/AUTH_test': self._fake_cache_env( + 'AUTH_test', [key]), + 'swift.container/AUTH_test/container': {'meta': {}}}, 'wsgi.errors': wsgi_errors, 'wsgi.input': wsgi_input, 'wsgi.multiprocess': False, @@ -467,9 +470,10 @@ class TestFormPost(unittest.TestCase): 'SERVER_NAME': '172.16.83.128', 'SERVER_PORT': '8080', 'SERVER_PROTOCOL': 'HTTP/1.0', - 'swift.account/AUTH_test': self._fake_cache_env( - 'AUTH_test', [key]), - 'swift.container/AUTH_test/container': {'meta': {}}, + 'swift.infocache': { + 'swift.account/AUTH_test': self._fake_cache_env( + 'AUTH_test', [key]), + 'swift.container/AUTH_test/container': {'meta': {}}}, 'wsgi.errors': wsgi_errors, 'wsgi.input': wsgi_input, 'wsgi.multiprocess': False, @@ -586,9 +590,10 @@ class TestFormPost(unittest.TestCase): 'SERVER_NAME': '172.16.83.128', 'SERVER_PORT': '8080', 'SERVER_PROTOCOL': 'HTTP/1.0', - 'swift.account/AUTH_test': self._fake_cache_env( - 'AUTH_test', [key]), - 'swift.container/AUTH_test/container': {'meta': {}}, + 'swift.infocache': { + 'swift.account/AUTH_test': self._fake_cache_env( + 'AUTH_test', [key]), + 'swift.container/AUTH_test/container': {'meta': {}}}, 'wsgi.errors': wsgi_errors, 'wsgi.input': wsgi_input, 'wsgi.multiprocess': False, @@ -701,9 +706,10 @@ class TestFormPost(unittest.TestCase): 'SERVER_NAME': '172.16.83.128', 'SERVER_PORT': '8080', 'SERVER_PROTOCOL': 'HTTP/1.0', - 'swift.account/AUTH_test': self._fake_cache_env( - 'AUTH_test', [key]), - 'swift.container/AUTH_test/container': {'meta': {}}, + 'swift.infocache': { + 'swift.account/AUTH_test': self._fake_cache_env( + 'AUTH_test', [key]), + 'swift.container/AUTH_test/container': {'meta': {}}}, 'wsgi.errors': wsgi_errors, 'wsgi.input': wsgi_input, 'wsgi.multiprocess': False, @@ -747,9 +753,10 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', 'http://brim.net', 5, 10, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'XX' + b'\r\n'.join(body)) - env['swift.account/AUTH_test'] = self._fake_cache_env( - 'AUTH_test', [key]) - env['swift.container/AUTH_test/container'] = {'meta': {}} + env['swift.infocache']['swift.account/AUTH_test'] = ( + self._fake_cache_env('AUTH_test', [key])) + env['swift.infocache']['swift.container/AUTH_test/container'] = { + 'meta': {}} self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -783,9 +790,10 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', 'http://brim.net', 5, 10, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.account/AUTH_test'] = self._fake_cache_env( - 'AUTH_test', [key]) - env['swift.container/AUTH_test/container'] = {'meta': {}} + env['swift.infocache']['swift.account/AUTH_test'] = ( + self._fake_cache_env('AUTH_test', [key])) + env['swift.infocache']['swift.container/AUTH_test/container'] = { + 'meta': {}} self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -814,9 +822,10 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', 'http://brim.net', 1024, 1, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.account/AUTH_test'] = self._fake_cache_env( - 'AUTH_test', [key]) - env['swift.container/AUTH_test/container'] = {'meta': {}} + env['swift.infocache']['swift.account/AUTH_test'] = ( + self._fake_cache_env('AUTH_test', [key])) + env['swift.infocache']['swift.container/AUTH_test/container'] = { + 'meta': {}} self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -855,9 +864,10 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key) env['QUERY_STRING'] = 'this=should¬=get&passed' env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.account/AUTH_test'] = self._fake_cache_env( - 'AUTH_test', [key]) - env['swift.container/AUTH_test/container'] = {'meta': {}} + env['swift.infocache']['swift.account/AUTH_test'] = ( + self._fake_cache_env('AUTH_test', [key])) + env['swift.infocache']['swift.container/AUTH_test/container'] = { + 'meta': {}} self.app = FakeApp( iter([('201 Created', {}, ''), ('201 Created', {}, '')]), @@ -890,9 +900,10 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', 'http://brim.net', 1024, 10, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.account/AUTH_test'] = self._fake_cache_env( - 'AUTH_test', [key]) - env['swift.container/AUTH_test/container'] = {'meta': {}} + env['swift.infocache']['swift.account/AUTH_test'] = ( + self._fake_cache_env('AUTH_test', [key])) + env['swift.infocache']['swift.container/AUTH_test/container'] = { + 'meta': {}} self.app = FakeApp(iter([('404 Not Found', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -976,9 +987,10 @@ class TestFormPost(unittest.TestCase): if six.PY3: wsgi_input = wsgi_input.encode('utf-8') env['wsgi.input'] = BytesIO(wsgi_input) - env['swift.account/AUTH_test'] = self._fake_cache_env( - 'AUTH_test', [key]) - env['swift.container/AUTH_test/container'] = {'meta': {}} + env['swift.infocache']['swift.account/AUTH_test'] = ( + self._fake_cache_env('AUTH_test', [key])) + env['swift.infocache']['swift.container/AUTH_test/container'] = { + 'meta': {}} self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -1047,9 +1059,10 @@ class TestFormPost(unittest.TestCase): if six.PY3: wsgi_input = wsgi_input.encode('utf-8') env['wsgi.input'] = BytesIO(wsgi_input) - env['swift.account/AUTH_test'] = self._fake_cache_env( - 'AUTH_test', [key]) - env['swift.container/AUTH_test/container'] = {'meta': {}} + env['swift.infocache']['swift.account/AUTH_test'] = ( + self._fake_cache_env('AUTH_test', [key])) + env['swift.infocache']['swift.container/AUTH_test/container'] = { + 'meta': {}} self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -1087,9 +1100,10 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', 'http://redirect', 1024, 10, int(time() + 86400), key, user_agent=False) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.account/AUTH_test'] = self._fake_cache_env( - 'AUTH_test', [key]) - env['swift.container/AUTH_test/container'] = {'meta': {}} + env['swift.infocache']['swift.account/AUTH_test'] = ( + self._fake_cache_env('AUTH_test', [key])) + env['swift.infocache']['swift.container/AUTH_test/container'] = { + 'meta': {}} self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -1108,9 +1122,10 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', 'http://redirect', 1024, 10, int(time() + 86400), key, user_agent=False) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.account/AUTH_test'] = self._fake_cache_env( - 'AUTH_test', [key]) - env['swift.container/AUTH_test/container'] = {'meta': {}} + env['swift.infocache']['swift.account/AUTH_test'] = ( + self._fake_cache_env('AUTH_test', [key])) + env['swift.infocache']['swift.container/AUTH_test/container'] = { + 'meta': {}} env['HTTP_ORIGIN'] = 'http://localhost:5000' self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', @@ -1137,9 +1152,10 @@ class TestFormPost(unittest.TestCase): int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) # Stick it in X-Account-Meta-Temp-URL-Key-2 and make sure we get it - env['swift.account/AUTH_test'] = self._fake_cache_env( - 'AUTH_test', ['bert', key]) - env['swift.container/AUTH_test/container'] = {'meta': {}} + env['swift.infocache']['swift.account/AUTH_test'] = ( + self._fake_cache_env('AUTH_test', ['bert', key])) + env['swift.infocache']['swift.container/AUTH_test/container'] = \ + {'meta': {}} self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -1173,9 +1189,11 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', 'http://redirect', 1024, 10, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.account/AUTH_test'] = self._fake_cache_env('AUTH_test') + env['swift.infocache']['swift.account/AUTH_test'] = ( + self._fake_cache_env('AUTH_test')) # Stick it in X-Container-Meta-Temp-URL-Key-2 and ensure we get it - env['swift.container/AUTH_test/container'] = {'meta': meta} + env['swift.infocache']['swift.container/AUTH_test/container'] = { + 'meta': meta} self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -1199,9 +1217,10 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', 'http://redirect', 1024, 10, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.account/AUTH_test'] = self._fake_cache_env( - 'AUTH_test', [key]) - env['swift.container/AUTH_test/container'] = {'meta': {}} + env['swift.infocache']['swift.account/AUTH_test'] = ( + self._fake_cache_env('AUTH_test', [key])) + env['swift.infocache']['swift.container/AUTH_test/container'] = { + 'meta': {}} self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -1237,9 +1256,10 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', 'http://redirect?one=two', 1024, 10, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.account/AUTH_test'] = self._fake_cache_env( - 'AUTH_test', [key]) - env['swift.container/AUTH_test/container'] = {'meta': {}} + env['swift.infocache']['swift.account/AUTH_test'] = ( + self._fake_cache_env('AUTH_test', [key])) + env['swift.infocache']['swift.container/AUTH_test/container'] = { + 'meta': {}} self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -1275,9 +1295,10 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.account/AUTH_test'] = self._fake_cache_env( - 'AUTH_test', [key]) - env['swift.container/AUTH_test/container'] = {'meta': {}} + env['swift.infocache']['swift.account/AUTH_test'] = ( + self._fake_cache_env('AUTH_test', [key])) + env['swift.infocache']['swift.container/AUTH_test/container'] = { + 'meta': {}} self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -1312,8 +1333,8 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '/v1/AUTH_test/container', '', 1024, 10, int(time() - 10), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.account/AUTH_test'] = self._fake_cache_env( - 'AUTH_test', [key]) + env['swift.infocache']['swift.account/AUTH_test'] = ( + self._fake_cache_env('AUTH_test', [key])) self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -1346,8 +1367,8 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) # Change key to invalidate sig - env['swift.account/AUTH_test'] = self._fake_cache_env( - 'AUTH_test', [key + ' is bogus now']) + env['swift.infocache']['swift.account/AUTH_test'] = ( + self._fake_cache_env('AUTH_test', [key + ' is bogus now'])) self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -1379,8 +1400,8 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'XX' + b'\r\n'.join(body)) - env['swift.account/AUTH_test'] = self._fake_cache_env( - 'AUTH_test', [key]) + env['swift.infocache']['swift.account/AUTH_test'] = ( + self._fake_cache_env('AUTH_test', [key])) self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -1412,8 +1433,8 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '/v2/AUTH_test/container', '', 1024, 10, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.account/AUTH_test'] = self._fake_cache_env( - 'AUTH_test', [key]) + env['swift.infocache']['swift.account/AUTH_test'] = ( + self._fake_cache_env('AUTH_test', [key])) self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -1445,8 +1466,8 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '//AUTH_test/container', '', 1024, 10, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.account/AUTH_test'] = self._fake_cache_env( - 'AUTH_test', [key]) + env['swift.infocache']['swift.account/AUTH_test'] = ( + self._fake_cache_env('AUTH_test', [key])) self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -1478,8 +1499,8 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '/v1//container', '', 1024, 10, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.account/AUTH_test'] = self._fake_cache_env( - 'AUTH_test', [key]) + env['swift.infocache']['swift.account/AUTH_test'] = ( + self._fake_cache_env('AUTH_test', [key])) self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -1511,8 +1532,8 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '/v1/AUTH_tst/container', '', 1024, 10, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.account/AUTH_test'] = self._fake_cache_env( - 'AUTH_test', [key]) + env['swift.infocache']['swift.account/AUTH_test'] = ( + self._fake_cache_env('AUTH_test', [key])) self.app = FakeApp(iter([ ('200 Ok', {'x-account-meta-temp-url-key': 'def'}, ''), ('201 Created', {}, ''), @@ -1546,8 +1567,8 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '/v1/AUTH_test', '', 1024, 10, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.account/AUTH_test'] = self._fake_cache_env( - 'AUTH_test', [key]) + env['swift.infocache']['swift.account/AUTH_test'] = ( + self._fake_cache_env('AUTH_test', [key])) self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -1584,8 +1605,8 @@ class TestFormPost(unittest.TestCase): body[i] = 'badvalue' break env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.account/AUTH_test'] = self._fake_cache_env( - 'AUTH_test', [key]) + env['swift.infocache']['swift.account/AUTH_test'] = ( + self._fake_cache_env('AUTH_test', [key])) self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -1625,9 +1646,10 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key) wsgi_input = b'\r\n'.join(x_delete_body_part + body) env['wsgi.input'] = BytesIO(wsgi_input) - env['swift.account/AUTH_test'] = self._fake_cache_env( - 'AUTH_test', [key]) - env['swift.container/AUTH_test/container'] = {'meta': {}} + env['swift.infocache']['swift.account/AUTH_test'] = ( + self._fake_cache_env('AUTH_test', [key])) + env['swift.infocache']['swift.container/AUTH_test/container'] = { + 'meta': {}} self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -1668,8 +1690,8 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key) wsgi_input = b'\r\n'.join(x_delete_body_part + body) env['wsgi.input'] = BytesIO(wsgi_input) - env['swift.account/AUTH_test'] = self._fake_cache_env( - 'AUTH_test', [key]) + env['swift.infocache']['swift.account/AUTH_test'] = ( + self._fake_cache_env('AUTH_test', [key])) self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -1703,9 +1725,10 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key) wsgi_input = b'\r\n'.join(x_delete_body_part + body) env['wsgi.input'] = BytesIO(wsgi_input) - env['swift.account/AUTH_test'] = self._fake_cache_env( - 'AUTH_test', [key]) - env['swift.container/AUTH_test/container'] = {'meta': {}} + env['swift.infocache']['swift.account/AUTH_test'] = ( + self._fake_cache_env('AUTH_test', [key])) + env['swift.infocache']['swift.container/AUTH_test/container'] = { + 'meta': {}} self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -1746,8 +1769,8 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key) wsgi_input = b'\r\n'.join(x_delete_body_part + body) env['wsgi.input'] = BytesIO(wsgi_input) - env['swift.account/AUTH_test'] = self._fake_cache_env( - 'AUTH_test', [key]) + env['swift.infocache']['swift.account/AUTH_test'] = ( + self._fake_cache_env('AUTH_test', [key])) self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) diff --git a/test/unit/common/middleware/test_keystoneauth.py b/test/unit/common/middleware/test_keystoneauth.py index 08aa86b3de..4b82c88dc4 100644 --- a/test/unit/common/middleware/test_keystoneauth.py +++ b/test/unit/common/middleware/test_keystoneauth.py @@ -252,7 +252,7 @@ class SwiftAuth(unittest.TestCase): path = '/v1/' + account # fake cached account info _, info_key = _get_cache_key(account, None) - env = {info_key: {'status': 0, 'sysmeta': {}}, + env = {'swift.infocache': {info_key: {'status': 0, 'sysmeta': {}}}, 'keystone.token_info': _fake_token_info(version='3')} req = Request.blank(path, environ=env, headers=headers) req.method = 'POST' @@ -281,7 +281,7 @@ class SwiftAuth(unittest.TestCase): path = '/v1/' + account # fake cached account info _, info_key = _get_cache_key(account, None) - env = {info_key: {'status': 0, 'sysmeta': {}}, + env = {'swift.infocache': {info_key: {'status': 0, 'sysmeta': {}}}, 'keystone.token_info': _fake_token_info(version='3')} req = Request.blank(path, environ=env, headers=headers) req.method = 'POST' @@ -303,7 +303,7 @@ class SwiftAuth(unittest.TestCase): path = '/v1/' + account _, info_key = _get_cache_key(account, None) # v2 token - env = {info_key: {'status': 0, 'sysmeta': {}}, + env = {'swift.infocache': {info_key: {'status': 0, 'sysmeta': {}}}, 'keystone.token_info': _fake_token_info(version='2')} req = Request.blank(path, environ=env, headers=headers) req.method = 'POST' @@ -325,7 +325,7 @@ class SwiftAuth(unittest.TestCase): path = '/v1/' + account _, info_key = _get_cache_key(account, None) # v2 token - env = {info_key: {'status': 0, 'sysmeta': {}}, + env = {'swift.infocache': {info_key: {'status': 0, 'sysmeta': {}}}, 'keystone.token_info': _fake_token_info(version='2')} req = Request.blank(path, environ=env, headers=headers) req.method = 'POST' @@ -382,7 +382,7 @@ class ServiceTokenFunctionality(unittest.TestCase): service_role=service_role) (version, account, _junk, _junk) = split_path(path, 2, 4, True) _, info_key = _get_cache_key(account, None) - env = {info_key: {'status': 0, 'sysmeta': {}}, + env = {'swift.infocache': {info_key: {'status': 0, 'sysmeta': {}}}, 'keystone.token_info': _fake_token_info(version='2')} if environ: env.update(environ) @@ -596,8 +596,9 @@ class TestAuthorize(BaseTestAuthorize): path = '/v1/%s/c' % account # fake cached account info _, info_key = _get_cache_key(account, None) - default_env = {'REMOTE_USER': identity['HTTP_X_TENANT_ID'], - info_key: {'status': 200, 'sysmeta': {}}} + default_env = { + 'REMOTE_USER': identity['HTTP_X_TENANT_ID'], + 'swift.infocache': {info_key: {'status': 200, 'sysmeta': {}}}} default_env.update(identity) if env: default_env.update(env) @@ -986,7 +987,7 @@ class TestAuthorize(BaseTestAuthorize): info = {'sysmeta': sysmeta} _, info_key = _get_cache_key('AUTH_1234', None) env = {'PATH_INFO': '/v1/AUTH_1234', - info_key: info} + 'swift.infocache': {info_key: info}} # account does not exist info['status'] = 404 @@ -1029,7 +1030,8 @@ class TestIsNameAllowedInACL(BaseTestAuthorize): # pretend account exists info = {'status': 200, 'sysmeta': sysmeta} _, info_key = _get_cache_key(account, None) - req = Request.blank(path, environ={info_key: info}) + req = Request.blank(path, + environ={'swift.infocache': {info_key: info}}) if scoped == 'account': project_name = 'account_name' @@ -1215,7 +1217,7 @@ class TestSetProjectDomain(BaseTestAuthorize): sysmeta['project-domain-id'] = sysmeta_project_domain_id info = {'status': status, 'sysmeta': sysmeta} _, info_key = _get_cache_key(account, None) - env = {info_key: info} + env = {'swift.infocache': {info_key: info}} # create fake env identity env_id = self._get_env_id(tenant_id=req_project_id, diff --git a/test/unit/common/middleware/test_quotas.py b/test/unit/common/middleware/test_quotas.py index f99b8df663..f4eba5b76c 100644 --- a/test/unit/common/middleware/test_quotas.py +++ b/test/unit/common/middleware/test_quotas.py @@ -245,8 +245,9 @@ class ContainerQuotaCopyingTestCases(unittest.TestCase): 'status': 200, 'object_count': 1} req = Request.blank('/v1/a/c2/o2', environ={'REQUEST_METHOD': 'COPY', - 'swift.container/a/c': a_c_cache, - 'swift.container/a2/c': a2_c_cache}, + 'swift.infocache': { + 'swift.container/a/c': a_c_cache, + 'swift.container/a2/c': a2_c_cache}}, headers={'Destination': '/c/o', 'Destination-Account': 'a2'}) res = req.get_response(self.copy_filter) @@ -261,8 +262,9 @@ class ContainerQuotaCopyingTestCases(unittest.TestCase): 'status': 200, 'object_count': 1} req = Request.blank('/v1/a2/c/o', environ={'REQUEST_METHOD': 'PUT', - 'swift.container/a/c': a_c_cache, - 'swift.container/a2/c': a2_c_cache}, + 'swift.infocache': { + 'swift.container/a/c': a_c_cache, + 'swift.container/a2/c': a2_c_cache}}, headers={'X-Copy-From': '/c2/o2', 'X-Copy-From-Account': 'a'}) res = req.get_response(self.copy_filter) diff --git a/test/unit/common/middleware/test_tempurl.py b/test/unit/common/middleware/test_tempurl.py index 0fc895f9e2..fed3cbd17d 100644 --- a/test/unit/common/middleware/test_tempurl.py +++ b/test/unit/common/middleware/test_tempurl.py @@ -96,7 +96,8 @@ class TestTempURL(unittest.TestCase): if key: meta[meta_name] = key - environ['swift.account/' + account] = { + ic = environ.setdefault('swift.infocache', {}) + ic['swift.account/' + account] = { 'status': 204, 'container_count': '0', 'total_object_count': '0', @@ -109,7 +110,7 @@ class TestTempURL(unittest.TestCase): meta[meta_name] = key container_cache_key = 'swift.container/' + account + '/c' - environ.setdefault(container_cache_key, {'meta': meta}) + ic.setdefault(container_cache_key, {'meta': meta}) def test_passthrough(self): resp = self._make_request('/v1/a/c/o').get_response(self.tempurl) @@ -159,7 +160,8 @@ class TestTempURL(unittest.TestCase): self.assert_valid_sig(expires, path, [key1, key2], sig) def test_get_valid_container_keys(self): - environ = {} + ic = {} + environ = {'swift.infocache': ic} # Add two static container keys container_keys = ['me', 'other'] meta = {} @@ -167,7 +169,7 @@ class TestTempURL(unittest.TestCase): meta_name = 'Temp-URL-key' + (("-%d" % (idx + 1) if idx else "")) if key: meta[meta_name] = key - environ['swift.container/a/c'] = {'meta': meta} + ic['swift.container/a/c'] = {'meta': meta} method = 'GET' expires = int(time() + 86400) diff --git a/test/unit/common/test_wsgi.py b/test/unit/common/test_wsgi.py index 9c16daf805..cc33833714 100644 --- a/test/unit/common/test_wsgi.py +++ b/test/unit/common/test_wsgi.py @@ -839,6 +839,11 @@ class TestWSGI(unittest.TestCase): self.assertTrue('HTTP_REFERER' in newenv) self.assertEqual(newenv['HTTP_REFERER'], 'http://blah.example.com') + def test_make_env_keeps_infocache(self): + oldenv = {'swift.infocache': {}} + newenv = wsgi.make_env(oldenv) + self.assertIs(newenv.get('swift.infocache'), oldenv['swift.infocache']) + class TestServersPerPortStrategy(unittest.TestCase): def setUp(self): diff --git a/test/unit/proxy/controllers/test_account.py b/test/unit/proxy/controllers/test_account.py index d3dd9cf504..d60a017fa5 100644 --- a/test/unit/proxy/controllers/test_account.py +++ b/test/unit/proxy/controllers/test_account.py @@ -68,9 +68,11 @@ class TestAccountController(unittest.TestCase): req = Request.blank('/v1/AUTH_bob', {'PATH_INFO': '/v1/AUTH_bob'}) resp = controller.HEAD(req) self.assertEqual(2, resp.status_int // 100) - self.assertTrue('swift.account/AUTH_bob' in resp.environ) - self.assertEqual(headers_to_account_info(resp.headers), - resp.environ['swift.account/AUTH_bob']) + self.assertTrue( + 'swift.account/AUTH_bob' in resp.environ['swift.infocache']) + self.assertEqual( + headers_to_account_info(resp.headers), + resp.environ['swift.infocache']['swift.account/AUTH_bob']) def test_swift_owner(self): owner_headers = { diff --git a/test/unit/proxy/controllers/test_base.py b/test/unit/proxy/controllers/test_base.py index e85ee7ba50..1ad7624386 100644 --- a/test/unit/proxy/controllers/test_base.py +++ b/test/unit/proxy/controllers/test_base.py @@ -120,10 +120,10 @@ class FakeApp(object): def __init__(self, response_factory=None, statuses=None): self.responses = response_factory or \ DynamicResponseFactory(*statuses or []) - self.sources = [] + self.captured_envs = [] def __call__(self, environ, start_response): - self.sources.append(environ.get('swift.source')) + self.captured_envs.append(environ) response = self.responses.get_response(environ) reason = RESPONSE_REASONS[response.status_int][0] start_response('%d %s' % (response.status_int, reason), @@ -167,31 +167,37 @@ class TestFuncs(unittest.TestCase): 'http_connect', fake_http_connect(200)): resp = base.GETorHEAD_base(req, 'object', iter(nodes), 'part', '/a/c/o/with/slashes') - self.assertTrue('swift.object/a/c/o/with/slashes' in resp.environ) + infocache = resp.environ['swift.infocache'] + self.assertTrue('swift.object/a/c/o/with/slashes' in infocache) self.assertEqual( - resp.environ['swift.object/a/c/o/with/slashes']['status'], 200) + infocache['swift.object/a/c/o/with/slashes']['status'], 200) + req = Request.blank('/v1/a/c/o') with patch('swift.proxy.controllers.base.' 'http_connect', fake_http_connect(200)): resp = base.GETorHEAD_base(req, 'object', iter(nodes), 'part', '/a/c/o') - self.assertTrue('swift.object/a/c/o' in resp.environ) - self.assertEqual(resp.environ['swift.object/a/c/o']['status'], 200) + infocache = resp.environ['swift.infocache'] + self.assertTrue('swift.object/a/c/o' in infocache) + self.assertEqual(infocache['swift.object/a/c/o']['status'], 200) + req = Request.blank('/v1/a/c') with patch('swift.proxy.controllers.base.' 'http_connect', fake_http_connect(200)): resp = base.GETorHEAD_base(req, 'container', iter(nodes), 'part', '/a/c') - self.assertTrue('swift.container/a/c' in resp.environ) - self.assertEqual(resp.environ['swift.container/a/c']['status'], 200) + infocache = resp.environ['swift.infocache'] + self.assertTrue('swift.container/a/c' in infocache) + self.assertEqual(infocache['swift.container/a/c']['status'], 200) req = Request.blank('/v1/a') with patch('swift.proxy.controllers.base.' 'http_connect', fake_http_connect(200)): resp = base.GETorHEAD_base(req, 'account', iter(nodes), 'part', '/a') - self.assertTrue('swift.account/a' in resp.environ) - self.assertEqual(resp.environ['swift.account/a']['status'], 200) + infocache = resp.environ['swift.infocache'] + self.assertTrue('swift.account/a' in infocache) + self.assertEqual(infocache['swift.account/a']['status'], 200) # Run the above tests again, but this time with concurrent_reads # turned on @@ -209,26 +215,28 @@ class TestFuncs(unittest.TestCase): resp = base.GETorHEAD_base( req, 'object', iter(nodes), 'part', '/a/c/o/with/slashes', concurrency=concurrent_get_threads) - self.assertTrue('swift.object/a/c/o/with/slashes' in resp.environ) + infocache = resp.environ['swift.infocache'] + self.assertTrue('swift.object/a/c/o/with/slashes' in infocache) self.assertEqual( - resp.environ['swift.object/a/c/o/with/slashes']['status'], 200) + infocache['swift.object/a/c/o/with/slashes']['status'], 200) req = Request.blank('/v1/a/c/o') with patch('swift.proxy.controllers.base.http_connect', fake_http_connect(200, slow_connect=True)): resp = base.GETorHEAD_base( req, 'object', iter(nodes), 'part', '/a/c/o', concurrency=concurrent_get_threads) - self.assertTrue('swift.object/a/c/o' in resp.environ) - self.assertEqual(resp.environ['swift.object/a/c/o']['status'], 200) + infocache = resp.environ['swift.infocache'] + self.assertTrue('swift.object/a/c/o' in infocache) + self.assertEqual(infocache['swift.object/a/c/o']['status'], 200) req = Request.blank('/v1/a/c') with patch('swift.proxy.controllers.base.http_connect', fake_http_connect(200, slow_connect=True)): resp = base.GETorHEAD_base( req, 'container', iter(nodes), 'part', '/a/c', concurrency=concurrent_get_threads) - self.assertTrue('swift.container/a/c' in resp.environ) - self.assertEqual(resp.environ['swift.container/a/c']['status'], - 200) + infocache = resp.environ['swift.infocache'] + self.assertTrue('swift.container/a/c' in infocache) + self.assertEqual(infocache['swift.container/a/c']['status'], 200) req = Request.blank('/v1/a') with patch('swift.proxy.controllers.base.http_connect', @@ -236,8 +244,9 @@ class TestFuncs(unittest.TestCase): resp = base.GETorHEAD_base( req, 'account', iter(nodes), 'part', '/a', concurrency=concurrent_get_threads) - self.assertTrue('swift.account/a' in resp.environ) - self.assertEqual(resp.environ['swift.account/a']['status'], 200) + infocache = resp.environ['swift.infocache'] + self.assertTrue('swift.account/a' in infocache) + self.assertEqual(infocache['swift.account/a']['status'], 200) def test_get_info(self): app = FakeApp() @@ -249,7 +258,7 @@ class TestFuncs(unittest.TestCase): self.assertEqual(info_a['bytes'], 6666) self.assertEqual(info_a['total_object_count'], 1000) # Make sure the env cache is set - self.assertEqual(env.get('swift.account/a'), info_a) + self.assertEqual(env['swift.infocache'].get('swift.account/a'), info_a) # Make sure the app was called self.assertEqual(app.responses.stats['account'], 1) @@ -260,7 +269,7 @@ class TestFuncs(unittest.TestCase): self.assertEqual(info_a['bytes'], 6666) self.assertEqual(info_a['total_object_count'], 1000) # Make sure the env cache is set - self.assertEqual(env.get('swift.account/a'), info_a) + self.assertEqual(env['swift.infocache'].get('swift.account/a'), info_a) # Make sure the app was NOT called AGAIN self.assertEqual(app.responses.stats['account'], 1) @@ -271,8 +280,10 @@ class TestFuncs(unittest.TestCase): self.assertEqual(info_c['bytes'], 6666) self.assertEqual(info_c['object_count'], 1000) # Make sure the env cache is set - self.assertEqual(env.get('swift.account/a'), info_a) - self.assertEqual(env.get('swift.container/a/c'), info_c) + self.assertEqual( + env['swift.infocache'].get('swift.account/a'), info_a) + self.assertEqual( + env['swift.infocache'].get('swift.container/a/c'), info_c) # Make sure the app was called for container self.assertEqual(app.responses.stats['container'], 1) @@ -286,22 +297,25 @@ class TestFuncs(unittest.TestCase): self.assertEqual(info_c['bytes'], 6666) self.assertEqual(info_c['object_count'], 1000) # Make sure the env cache is set - self.assertEqual(env.get('swift.account/a'), info_a) - self.assertEqual(env.get('swift.container/a/c'), info_c) + self.assertEqual( + env['swift.infocache'].get('swift.account/a'), info_a) + self.assertEqual( + env['swift.infocache'].get('swift.container/a/c'), info_c) # check app calls both account and container self.assertEqual(app.responses.stats['account'], 1) self.assertEqual(app.responses.stats['container'], 1) # This time do an env cached call to container while account is not # cached - del(env['swift.account/a']) + del(env['swift.infocache']['swift.account/a']) info_c = get_info(app, env, 'a', 'c') # Check that you got proper info self.assertEqual(info_a['status'], 200) self.assertEqual(info_c['bytes'], 6666) self.assertEqual(info_c['object_count'], 1000) # Make sure the env cache is set and account still not cached - self.assertEqual(env.get('swift.container/a/c'), info_c) + self.assertEqual( + env['swift.infocache'].get('swift.container/a/c'), info_c) # no additional calls were made self.assertEqual(app.responses.stats['account'], 1) self.assertEqual(app.responses.stats['container'], 1) @@ -315,7 +329,8 @@ class TestFuncs(unittest.TestCase): self.assertEqual(info_a['bytes'], None) self.assertEqual(info_a['total_object_count'], None) # Make sure the env cache is set - self.assertEqual(env.get('swift.account/a'), info_a) + self.assertEqual( + env['swift.infocache'].get('swift.account/a'), info_a) # and account was called self.assertEqual(app.responses.stats['account'], 1) @@ -326,7 +341,8 @@ class TestFuncs(unittest.TestCase): self.assertEqual(info_a['bytes'], None) self.assertEqual(info_a['total_object_count'], None) # Make sure the env cache is set - self.assertEqual(env.get('swift.account/a'), info_a) + self.assertEqual( + env['swift.infocache'].get('swift.account/a'), info_a) # add account was NOT called AGAIN self.assertEqual(app.responses.stats['account'], 1) @@ -336,7 +352,8 @@ class TestFuncs(unittest.TestCase): info_a = get_info(app, env, 'a') # Check that you got proper info self.assertEqual(info_a, None) - self.assertEqual(env['swift.account/a']['status'], 404) + self.assertEqual( + env['swift.infocache']['swift.account/a']['status'], 404) # and account was called self.assertEqual(app.responses.stats['account'], 1) @@ -344,7 +361,8 @@ class TestFuncs(unittest.TestCase): info_a = get_info(None, env, 'a') # Check that you got proper info self.assertEqual(info_a, None) - self.assertEqual(env['swift.account/a']['status'], 404) + self.assertEqual( + env['swift.infocache']['swift.account/a']['status'], 404) # add account was NOT called AGAIN self.assertEqual(app.responses.stats['account'], 1) @@ -352,14 +370,16 @@ class TestFuncs(unittest.TestCase): app = FakeApp() req = Request.blank("/v1/a/c", environ={'swift.cache': FakeCache()}) get_container_info(req.environ, app, swift_source='MC') - self.assertEqual(app.sources, ['GET_INFO', 'MC']) + self.assertEqual([e['swift.source'] for e in app.captured_envs], + ['GET_INFO', 'MC']) def test_get_object_info_swift_source(self): app = FakeApp() req = Request.blank("/v1/a/c/o", environ={'swift.cache': FakeCache()}) get_object_info(req.environ, app, swift_source='LU') - self.assertEqual(app.sources, ['LU']) + self.assertEqual([e['swift.source'] for e in app.captured_envs], + ['LU']) def test_get_container_info_no_cache(self): req = Request.blank("/v1/AUTH_account/cont", @@ -401,9 +421,10 @@ class TestFuncs(unittest.TestCase): def test_get_container_info_env(self): cache_key = get_container_memcache_key("account", "cont") env_key = 'swift.%s' % cache_key - req = Request.blank("/v1/account/cont", - environ={env_key: {'bytes': 3867}, - 'swift.cache': FakeCache({})}) + req = Request.blank( + "/v1/account/cont", + environ={'swift.infocache': {env_key: {'bytes': 3867}}, + 'swift.cache': FakeCache({})}) resp = get_container_info(req.environ, 'xxx') self.assertEqual(resp['bytes'], 3867) @@ -411,7 +432,18 @@ class TestFuncs(unittest.TestCase): app = FakeApp() req = Request.blank("/v1/a", environ={'swift.cache': FakeCache()}) get_account_info(req.environ, app, swift_source='MC') - self.assertEqual(app.sources, ['MC']) + self.assertEqual([e['swift.source'] for e in app.captured_envs], + ['MC']) + + def test_get_account_info_infocache(self): + app = FakeApp() + ic = {} + req = Request.blank("/v1/a", environ={'swift.cache': FakeCache(), + 'swift.infocache': ic}) + get_account_info(req.environ, app) + got_infocaches = [e['swift.infocache'] for e in app.captured_envs] + self.assertEqual(1, len(got_infocaches)) + self.assertIs(ic, got_infocaches[0]) def test_get_account_info_no_cache(self): app = FakeApp() @@ -451,9 +483,10 @@ class TestFuncs(unittest.TestCase): def test_get_account_info_env(self): cache_key = get_account_memcache_key("account") env_key = 'swift.%s' % cache_key - req = Request.blank("/v1/account", - environ={env_key: {'bytes': 3867}, - 'swift.cache': FakeCache({})}) + req = Request.blank( + "/v1/account", + environ={'swift.infocache': {env_key: {'bytes': 3867}}, + 'swift.cache': FakeCache({})}) resp = get_account_info(req.environ, 'xxx') self.assertEqual(resp['bytes'], 3867) @@ -463,9 +496,10 @@ class TestFuncs(unittest.TestCase): 'type': 'application/json', 'meta': {}} env_key = get_object_env_key("account", "cont", "obj") - req = Request.blank("/v1/account/cont/obj", - environ={env_key: cached, - 'swift.cache': FakeCache({})}) + req = Request.blank( + "/v1/account/cont/obj", + environ={'swift.infocache': {env_key: cached}, + 'swift.cache': FakeCache({})}) resp = get_object_info(req.environ, 'xxx') self.assertEqual(resp['length'], 3333) self.assertEqual(resp['type'], 'application/json') diff --git a/test/unit/proxy/controllers/test_container.py b/test/unit/proxy/controllers/test_container.py index a95e058452..851ecc81d9 100644 --- a/test/unit/proxy/controllers/test_container.py +++ b/test/unit/proxy/controllers/test_container.py @@ -102,9 +102,11 @@ class TestContainerController(TestRingBase): req = Request.blank('/v1/a/c', {'PATH_INFO': '/v1/a/c'}) resp = controller.HEAD(req) self.assertEqual(2, resp.status_int // 100) - self.assertTrue("swift.container/a/c" in resp.environ) - self.assertEqual(headers_to_container_info(resp.headers), - resp.environ['swift.container/a/c']) + self.assertTrue( + "swift.container/a/c" in resp.environ['swift.infocache']) + self.assertEqual( + headers_to_container_info(resp.headers), + resp.environ['swift.infocache']['swift.container/a/c']) def test_swift_owner(self): owner_headers = { diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 1fc021a542..f2e4986650 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -6319,22 +6319,23 @@ class TestContainerController(unittest.TestCase): res = controller.HEAD(req) self.assertEqual(res.status[:len(str(expected))], str(expected)) + infocache = res.environ.get('swift.infocache', {}) if expected < 400: - self.assertTrue('x-works' in res.headers) + self.assertIn('x-works', res.headers) self.assertEqual(res.headers['x-works'], 'yes') if c_expected: - self.assertTrue('swift.container/a/c' in res.environ) + self.assertIn('swift.container/a/c', infocache) self.assertEqual( - res.environ['swift.container/a/c']['status'], + infocache['swift.container/a/c']['status'], c_expected) else: - self.assertTrue('swift.container/a/c' not in res.environ) + self.assertNotIn('swift.container/a/c', infocache) if a_expected: - self.assertTrue('swift.account/a' in res.environ) - self.assertEqual(res.environ['swift.account/a']['status'], + self.assertIn('swift.account/a', infocache) + self.assertEqual(infocache['swift.account/a']['status'], a_expected) else: - self.assertTrue('swift.account/a' not in res.environ) + self.assertNotIn('swift.account/a', res.environ) set_http_connect(*statuses, **kwargs) self.app.memcache.store = {} @@ -6343,22 +6344,23 @@ class TestContainerController(unittest.TestCase): res = controller.GET(req) self.assertEqual(res.status[:len(str(expected))], str(expected)) + infocache = res.environ.get('swift.infocache', {}) if expected < 400: self.assertTrue('x-works' in res.headers) self.assertEqual(res.headers['x-works'], 'yes') if c_expected: - self.assertTrue('swift.container/a/c' in res.environ) + self.assertIn('swift.container/a/c', infocache) self.assertEqual( - res.environ['swift.container/a/c']['status'], + infocache['swift.container/a/c']['status'], c_expected) else: - self.assertTrue('swift.container/a/c' not in res.environ) + self.assertNotIn('swift.container/a/c', infocache) if a_expected: - self.assertTrue('swift.account/a' in res.environ) - self.assertEqual(res.environ['swift.account/a']['status'], + self.assertIn('swift.account/a', infocache) + self.assertEqual(infocache['swift.account/a']['status'], a_expected) else: - self.assertTrue('swift.account/a' not in res.environ) + self.assertNotIn('swift.account/a', infocache) # In all the following tests cache 200 for account # return and ache vary for container # return 200 and cache 200 for and container @@ -6970,8 +6972,8 @@ class TestContainerController(unittest.TestCase): self.app.update_request(req) res = controller.GET(req) self.assertEqual(res.status_int, 204) - self.assertEqual( - res.environ['swift.container/a/c']['status'], 204) + ic = res.environ['swift.infocache'] + self.assertEqual(ic['swift.container/a/c']['status'], 204) self.assertEqual(res.content_length, 0) self.assertTrue('transfer-encoding' not in res.headers) @@ -6989,7 +6991,9 @@ class TestContainerController(unittest.TestCase): req.environ['swift.authorize'] = authorize self.app.update_request(req) res = controller.GET(req) - self.assertEqual(res.environ['swift.container/a/c']['status'], 201) + self.assertEqual( + res.environ['swift.infocache']['swift.container/a/c']['status'], + 201) self.assertTrue(called[0]) def test_HEAD_calls_authorize(self): @@ -7457,16 +7461,18 @@ class TestAccountController(unittest.TestCase): self.app.update_request(req) res = method(req) self.assertEqual(res.status_int, expected) + infocache = res.environ.get('swift.infocache', {}) if env_expected: - self.assertEqual(res.environ['swift.account/a']['status'], + self.assertEqual(infocache['swift.account/a']['status'], env_expected) set_http_connect(*statuses) req = Request.blank('/v1/a/', {}) self.app.update_request(req) res = method(req) + infocache = res.environ.get('swift.infocache', {}) self.assertEqual(res.status_int, expected) if env_expected: - self.assertEqual(res.environ['swift.account/a']['status'], + self.assertEqual(infocache['swift.account/a']['status'], env_expected) def test_OPTIONS(self): From 1c88d2cb818cec07d12ac17be5820cdd769aea5d Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Thu, 11 Feb 2016 15:51:45 -0800 Subject: [PATCH 125/141] Fix up get_account_info and get_container_info get_account_info used to work like this: * make an account HEAD request * ignore the response * get the account info by digging around in the request environment, where it had been deposited by elves or something Not actually elves, but the proxy's GETorHEAD_base method would take the HEAD response and cache it in the response environment, which was the same object as the request environment, thus enabling get_account_info to find it. This was extraordinarily brittle. If a WSGI middleware were to shallow-copy the request environment, then any middlewares to its left could not use get_account_info, as the left middleware's request environment would no longer be identical to the response environment down in GETorHEAD_base. Now, get_account_info works like this: * make an account HEAD request. * if the account info is in the request environment, return it. This is an optimization to avoid a double-set in memcached. * else, compute the account info from the response headers, store it in caches, and return it. This is much easier to think about; get_account_info can get and cache account info all on its own; the cache check and cache set are right next to each other. All the above is true for get_container_info as well. get_info() is still around, but it's just a shim. It was trying to unify get_account_info and get_container_info to exploit the commonalities, but the number of times that "if container:" showed up in get_info and its helpers really indicated that something was wrong. I'd rather have two functions with some duplication than one function with no duplication but a bunch of "if container:" branches. Other things of note: * a HEAD request to a deleted account returns 410, but get_account_info would return 404 since the 410 came from the account controller *after* GETorHEAD_base ran. Now get_account_info returns 410 as well. * cache validity period (recheck_account_existence and recheck_container_existence) is now communicated to get_account_info via an X-Backend header. This way, get_account_info doesn't need a reference to the swift.proxy.server.Application object. * both logged swift_source values are now correct for get_container_info calls; before, on a cold cache, get_container_info would call get_account_info but not pass along swift_source, resulting in get_account_info logging "GET_INFO" as the source. Amusingly, there was a unit test asserting this bogus behavior. * callers that modify the return value of get_account_info or of get_container_info don't modify what's stored in swift.infocache. * get_account_info on an account that *can* be autocreated but has not been will return a 200, same as a HEAD request. The old behavior was a 404 from get_account_info but a 200 from HEAD. Callers can tell the difference by looking at info['account_really_exists'] if they need to know the difference (there is one call site that needs to know, in container PUT). Note: this is for all accounts when the proxy's "account_autocreate" setting is on. Change-Id: I5167714025ec7237f7e6dd4759c2c6eb959b3fca --- swift/common/middleware/keystoneauth.py | 3 +- swift/proxy/controllers/account.py | 26 +- swift/proxy/controllers/base.py | 339 ++++++++++++------ swift/proxy/controllers/container.py | 17 +- swift/proxy/server.py | 9 +- test/unit/common/middleware/test_copy.py | 1 + test/unit/common/middleware/test_ratelimit.py | 4 +- test/unit/proxy/controllers/test_account.py | 18 + test/unit/proxy/controllers/test_base.py | 217 +++-------- test/unit/proxy/controllers/test_container.py | 9 +- test/unit/proxy/controllers/test_obj.py | 75 ++-- test/unit/proxy/test_server.py | 77 ++-- 12 files changed, 430 insertions(+), 365 deletions(-) diff --git a/swift/common/middleware/keystoneauth.py b/swift/common/middleware/keystoneauth.py index 651aeacfbb..ccdd2a8ba9 100644 --- a/swift/common/middleware/keystoneauth.py +++ b/swift/common/middleware/keystoneauth.py @@ -287,7 +287,8 @@ class KeystoneAuth(object): def _get_project_domain_id(self, environ): info = get_account_info(environ, self.app, 'KS') domain_id = info.get('sysmeta', {}).get('project-domain-id') - exists = is_success(info.get('status', 0)) + exists = (is_success(info.get('status', 0)) + and info.get('account_really_exists', True)) return exists, domain_id def _set_project_domain_id(self, req, path_parts, env_identity): diff --git a/swift/proxy/controllers/account.py b/swift/proxy/controllers/account.py index faf4ccdee6..2abb3d1f79 100644 --- a/swift/proxy/controllers/account.py +++ b/swift/proxy/controllers/account.py @@ -24,7 +24,8 @@ from swift.common.utils import public from swift.common.constraints import check_metadata from swift.common import constraints from swift.common.http import HTTP_NOT_FOUND, HTTP_GONE -from swift.proxy.controllers.base import Controller, clear_info_cache +from swift.proxy.controllers.base import Controller, clear_info_cache, \ + set_info_cache from swift.common.swob import HTTPBadRequest, HTTPMethodNotAllowed from swift.common.request_helpers import get_sys_meta_prefix @@ -57,6 +58,9 @@ class AccountController(Controller): resp.body = 'Account name length of %d longer than %d' % \ (len(self.account_name), constraints.MAX_ACCOUNT_NAME_LENGTH) + # Don't cache this. We know the account doesn't exist because + # the name is bad; we don't need to cache that because it's + # really cheap to recompute. return resp partition = self.app.account_ring.get_part(self.account_name) @@ -70,8 +74,28 @@ class AccountController(Controller): if resp.headers.get('X-Account-Status', '').lower() == 'deleted': resp.status = HTTP_GONE elif self.app.account_autocreate: + # This is kind of a lie; we pretend like the account is + # there, but it's not. We'll create it as soon as something + # tries to write to it, but we don't need databases on disk + # to tell us that nothing's there. + # + # We set a header so that certain consumers can tell it's a + # fake listing. The important one is the PUT of a container + # to an autocreate account; the proxy checks to see if the + # account exists before actually performing the PUT and + # creates the account if necessary. If we feed it a perfect + # lie, it'll just try to create the container without + # creating the account, and that'll fail. resp = account_listing_response(self.account_name, req, get_listing_content_type(req)) + resp.headers['X-Backend-Fake-Account-Listing'] = 'yes' + + # Cache this. We just made a request to a storage node and got + # up-to-date information for the account. + resp.headers['X-Backend-Recheck-Account-Existence'] = str( + self.app.recheck_account_existence) + set_info_cache(self.app, req.environ, self.account_name, None, resp) + if req.environ.get('swift_owner'): self.add_acls_from_sys_metadata(resp) else: diff --git a/swift/proxy/controllers/base.py b/swift/proxy/controllers/base.py index 0cd209db65..527b7ef81d 100644 --- a/swift/proxy/controllers/base.py +++ b/swift/proxy/controllers/base.py @@ -32,6 +32,7 @@ import functools import inspect import itertools import operator +from copy import deepcopy from sys import exc_info from swift import gettext_ as _ @@ -51,7 +52,7 @@ from swift.common.header_key_dict import HeaderKeyDict from swift.common.http import is_informational, is_success, is_redirection, \ is_server_error, HTTP_OK, HTTP_PARTIAL_CONTENT, HTTP_MULTIPLE_CHOICES, \ HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_SERVICE_UNAVAILABLE, \ - HTTP_INSUFFICIENT_STORAGE, HTTP_UNAUTHORIZED, HTTP_CONTINUE + HTTP_INSUFFICIENT_STORAGE, HTTP_UNAUTHORIZED, HTTP_CONTINUE, HTTP_GONE from swift.common.swob import Request, Response, Range, \ HTTPException, HTTPRequestedRangeNotSatisfiable, HTTPServiceUnavailable, \ status_map @@ -61,6 +62,10 @@ from swift.common.request_helpers import strip_sys_meta_prefix, \ from swift.common.storage_policy import POLICIES +DEFAULT_RECHECK_ACCOUNT_EXISTENCE = 60 # seconds +DEFAULT_RECHECK_CONTAINER_EXISTENCE = 60 # seconds + + def update_headers(response, headers): """ Helper function to update headers in the response. @@ -140,7 +145,7 @@ def headers_to_account_info(headers, status_int=HTTP_OK): Construct a cacheable dict of account info based on response headers. """ headers, meta, sysmeta = _prep_headers_to_info(headers, 'account') - return { + account_info = { 'status': status_int, # 'container_count' anomaly: # Previous code sometimes expects an int sometimes a string @@ -150,8 +155,12 @@ def headers_to_account_info(headers, status_int=HTTP_OK): 'total_object_count': headers.get('x-account-object-count'), 'bytes': headers.get('x-account-bytes-used'), 'meta': meta, - 'sysmeta': sysmeta + 'sysmeta': sysmeta, } + if is_success(status_int): + account_info['account_really_exists'] = not config_true_value( + headers.get('x-backend-fake-account-listing')) + return account_info def headers_to_container_info(headers, status_int=HTTP_OK): @@ -174,7 +183,7 @@ def headers_to_container_info(headers, status_int=HTTP_OK): 'max_age': meta.get('access-control-max-age') }, 'meta': meta, - 'sysmeta': sysmeta + 'sysmeta': sysmeta, } @@ -188,7 +197,7 @@ def headers_to_object_info(headers, status_int=HTTP_OK): 'type': headers.get('content-type'), 'etag': headers.get('etag'), 'meta': meta, - 'sysmeta': sysmeta + 'sysmeta': sysmeta, } return info @@ -280,8 +289,17 @@ def get_object_info(env, app, path=None, swift_source=None): split_path(path or env['PATH_INFO'], 4, 4, True) info = _get_object_info(app, env, account, container, obj, swift_source=swift_source) - if not info: + if info: + info = deepcopy(info) + else: info = headers_to_object_info({}, 0) + + for field in ('length',): + if info.get(field) is None: + info[field] = 0 + else: + info[field] = int(info[field]) + return info @@ -297,11 +315,55 @@ def get_container_info(env, app, swift_source=None): """ (version, account, container, unused) = \ split_path(env['PATH_INFO'], 3, 4, True) - info = get_info(app, env, account, container, ret_not_found=True, - swift_source=swift_source) + + # Check in environment cache and in memcache (in that order) + info = _get_info_from_caches(app, env, account, container) + if not info: + # Cache miss; go HEAD the container and populate the caches + env.setdefault('swift.infocache', {}) + # Before checking the container, make sure the account exists. + # + # If it is an autocreateable account, just assume it exists; don't + # HEAD the account, as a GET or HEAD response for an autocreateable + # account is successful whether the account actually has .db files + # on disk or not. + is_autocreate_account = account.startswith( + getattr(app, 'auto_create_account_prefix', '.')) + if not is_autocreate_account: + account_info = get_account_info(env, app, swift_source) + if not account_info or not is_success(account_info['status']): + return headers_to_container_info({}, 0) + + req = _prepare_pre_auth_info_request( + env, ("/%s/%s/%s" % (version, account, container)), + (swift_source or 'GET_CONTAINER_INFO')) + resp = req.get_response(app) + # Check in infocache to see if the proxy (or anyone else) already + # populated the cache for us. If they did, just use what's there. + # + # See similar comment in get_account_info() for justification. + info = _get_info_from_infocache(env, account, container) + if info is None: + info = set_info_cache(app, env, account, container, resp) + + if info: + info = deepcopy(info) # avoid mutating what's in swift.infocache + else: info = headers_to_container_info({}, 0) + + # Old data format in memcache immediately after a Swift upgrade; clean + # it up so consumers of get_container_info() aren't exposed to it. info.setdefault('storage_policy', '0') + if 'object_count' not in info and 'container_size' in info: + info['object_count'] = info.pop('container_size') + + for field in ('bytes', 'object_count'): + if info.get(field) is None: + info[field] = 0 + else: + info[field] = int(info[field]) + return info @@ -315,18 +377,50 @@ def get_account_info(env, app, swift_source=None): This call bypasses auth. Success does not imply that the request has authorization to the account. - :raises ValueError: when path can't be split(path, 2, 4) + :raises ValueError: when path doesn't contain an account """ (version, account, _junk, _junk) = \ split_path(env['PATH_INFO'], 2, 4, True) - info = get_info(app, env, account, ret_not_found=True, - swift_source=swift_source) + + # Check in environment cache and in memcache (in that order) + info = _get_info_from_caches(app, env, account) + + # Cache miss; go HEAD the account and populate the caches if not info: - info = headers_to_account_info({}, 0) - if info.get('container_count') is None: - info['container_count'] = 0 + env.setdefault('swift.infocache', {}) + req = _prepare_pre_auth_info_request( + env, "/%s/%s" % (version, account), + (swift_source or 'GET_ACCOUNT_INFO')) + resp = req.get_response(app) + # Check in infocache to see if the proxy (or anyone else) already + # populated the cache for us. If they did, just use what's there. + # + # The point of this is to avoid setting the value in memcached + # twice. Otherwise, we're needlessly sending requests across the + # network. + # + # If the info didn't make it into the cache, we'll compute it from + # the response and populate the cache ourselves. + # + # Note that this is taking "exists in infocache" to imply "exists in + # memcache". That's because we're trying to avoid superfluous + # network traffic, and checking in memcache prior to setting in + # memcache would defeat the purpose. + info = _get_info_from_infocache(env, account) + if info is None: + info = set_info_cache(app, env, account, None, resp) + + if info: + info = info.copy() # avoid mutating what's in swift.infocache else: - info['container_count'] = int(info['container_count']) + info = headers_to_account_info({}, 0) + + for field in ('container_count', 'bytes', 'total_object_count'): + if info.get(field) is None: + info[field] = 0 + else: + info[field] = int(info[field]) + return info @@ -335,7 +429,7 @@ def _get_cache_key(account, container): Get the keys for both memcache (cache_key) and env (env_key) where info about accounts and containers is cached - :param account: The name of the account + :param account: The name of the account :param container: The name of the container (or None if account) :returns: a tuple of (cache_key, env_key) """ @@ -356,7 +450,7 @@ def get_object_env_key(account, container, obj): """ Get the keys for env (env_key) where info about object is cached - :param account: The name of the account + :param account: The name of the account :param container: The name of the container :param obj: The name of the object :returns: a string env_key @@ -366,36 +460,36 @@ def get_object_env_key(account, container, obj): return env_key -def _set_info_cache(app, env, account, container, resp): +def set_info_cache(app, env, account, container, resp): """ Cache info in both memcache and env. - Caching is used to avoid unnecessary calls to account & container servers. - This is a private function that is being called by GETorHEAD_base and - by clear_info_cache. - Any attempt to GET or HEAD from the container/account server should use - the GETorHEAD_base interface which would than set the cache. - :param app: the application object :param account: the unquoted account name :param container: the unquoted container name or None - :param resp: the response received or None if info cache should be cleared + :param resp: the response received or None if info cache should be cleared + + :returns: the info that was placed into the cache, or None if the + request status was not in (404, 410, 2xx). """ infocache = env.setdefault('swift.infocache', {}) - if container: - cache_time = app.recheck_container_existence - else: - cache_time = app.recheck_account_existence + cache_time = None + if container and resp: + cache_time = int(resp.headers.get( + 'X-Backend-Recheck-Container-Existence', + DEFAULT_RECHECK_CONTAINER_EXISTENCE)) + elif resp: + cache_time = int(resp.headers.get( + 'X-Backend-Recheck-Account-Existence', + DEFAULT_RECHECK_ACCOUNT_EXISTENCE)) cache_key, env_key = _get_cache_key(account, container) if resp: - if resp.status_int == HTTP_NOT_FOUND: + if resp.status_int in (HTTP_NOT_FOUND, HTTP_GONE): cache_time *= 0.1 elif not is_success(resp.status_int): cache_time = None - else: - cache_time = None # Next actually set both memcache and the env cache memcache = getattr(app, 'memcache', None) or env.get('swift.cache') @@ -412,24 +506,23 @@ def _set_info_cache(app, env, account, container, resp): if memcache: memcache.set(cache_key, info, time=cache_time) infocache[env_key] = info + return info -def _set_object_info_cache(app, env, account, container, obj, resp): +def set_object_info_cache(app, env, account, container, obj, resp): """ - Cache object info env. Do not cache object information in - memcache. This is an intentional omission as it would lead - to cache pressure. This is a per-request cache. - - Caching is used to avoid unnecessary calls to object servers. - This is a private function that is being called by GETorHEAD_base. - Any attempt to GET or HEAD from the object server should use - the GETorHEAD_base interface which would then set the cache. + Cache object info in the WSGI environment, but not in memcache. Caching + in memcache would lead to cache pressure and mass evictions due to the + large number of objects in a typical Swift cluster. This is a + per-request cache only. :param app: the application object :param account: the unquoted account name - :param container: the unquoted container name or None - :param object: the unquoted object name or None - :param resp: the response received or None if info cache should be cleared + :param container: the unquoted container name + :param object: the unquoted object name + :param resp: a GET or HEAD response received from an object server, or + None if info cache should be cleared + :returns: the object info """ env_key = get_object_env_key(account, container, obj) @@ -440,6 +533,7 @@ def _set_object_info_cache(app, env, account, container, obj, resp): info = headers_to_object_info(resp.headers, resp.status_int) env.setdefault('swift.infocache', {})[env_key] = info + return info def clear_info_cache(app, env, account, container=None): @@ -447,26 +541,43 @@ def clear_info_cache(app, env, account, container=None): Clear the cached info in both memcache and env :param app: the application object + :param env: the WSGI environment :param account: the account name :param container: the containr name or None if setting info for containers """ - _set_info_cache(app, env, account, container, None) + set_info_cache(app, env, account, container, None) -def _get_info_cache(app, env, account, container=None): +def _get_info_from_infocache(env, account, container=None): """ - Get the cached info from env or memcache (if used) in that order - Used for both account and container info - A private function used by get_info + Get cached account or container information from request-environment + cache (swift.infocache). + + :param env: the environment used by the current request + :param account: the account name + :param container: the container name + + :returns: a dictionary of cached info on cache hit, None on miss + """ + _junk, env_key = _get_cache_key(account, container) + if 'swift.infocache' in env and env_key in env['swift.infocache']: + return env['swift.infocache'][env_key] + return None + + +def _get_info_from_memcache(app, env, account, container=None): + """ + Get cached account or container information from memcache :param app: the application object :param env: the environment used by the current request - :returns: the cached info or None if not cached - """ + :param account: the account name + :param container: the container name + :returns: a dictionary of cached info on cache hit, None on miss. Also + returns None if memcache is not in use. + """ cache_key, env_key = _get_cache_key(account, container) - if 'swift.infocache' in env and env_key in env['swift.infocache']: - return env['swift.infocache'][env_key] memcache = getattr(app, 'memcache', None) or env.get('swift.cache') if memcache: info = memcache.get(cache_key) @@ -483,6 +594,22 @@ def _get_info_cache(app, env, account, container=None): return None +def _get_info_from_caches(app, env, account, container=None): + """ + Get the cached info from env or memcache (if used) in that order. + Used for both account and container info. + + :param app: the application object + :param env: the environment used by the current request + :returns: the cached info or None if not cached + """ + + info = _get_info_from_infocache(env, account, container) + if info is None: + info = _get_info_from_memcache(app, env, account, container) + return info + + def _prepare_pre_auth_info_request(env, path, swift_source): """ Prepares a pre authed request to obtain info using a HEAD. @@ -499,14 +626,17 @@ def _prepare_pre_auth_info_request(env, path, swift_source): # the request so the it is not treated as a CORS request. newenv.pop('HTTP_ORIGIN', None) + # ACLs are only shown to account owners, so let's make sure this request + # looks like it came from the account owner. + newenv['swift_owner'] = True + # Note that Request.blank expects quoted path return Request.blank(quote(path), environ=newenv) -def get_info(app, env, account, container=None, ret_not_found=False, - swift_source=None): +def get_info(app, env, account, container=None, swift_source=None): """ - Get the info about accounts or containers + Get info about accounts or containers Note: This call bypasses auth. Success does not imply that the request has authorization to the info. @@ -515,42 +645,25 @@ def get_info(app, env, account, container=None, ret_not_found=False, :param env: the environment used by the current request :param account: The unquoted name of the account :param container: The unquoted name of the container (or None if account) - :param ret_not_found: if True, return info dictionary on 404; - if False, return None on 404 :param swift_source: swift source logged for any subrequests made while retrieving the account or container info - :returns: the cached info or None if cannot be retrieved + :returns: information about the specified entity in a dictionary. See + get_account_info and get_container_info for details on what's in the + dictionary. """ - info = _get_info_cache(app, env, account, container) - if info: - if ret_not_found or is_success(info['status']): - return info - return None - # Not in cache, let's try the account servers - path = '/v1/%s' % account - if container: - # Stop and check if we have an account? - if not get_info(app, env, account) and not account.startswith( - getattr(app, 'auto_create_account_prefix', '.')): - return None - path += '/' + container + env.setdefault('swift.infocache', {}) - req = _prepare_pre_auth_info_request( - env, path, (swift_source or 'GET_INFO')) - # Whenever we do a GET/HEAD, the GETorHEAD_base will set the info in the - # environment under environ['swift.infocache'][env_key] and in memcache. - # We will pick the one from environ['swift.infocache'][env_key] and use - # it to set the caller env - resp = req.get_response(app) - cache_key, env_key = _get_cache_key(account, container) - try: - info = resp.environ['swift.infocache'][env_key] - env.setdefault('swift.infocache', {})[env_key] = info - if ret_not_found or is_success(info['status']): - return info - except (KeyError, AttributeError): - pass - return None + if container: + path = '/v1/%s/%s' % (account, container) + path_env = env.copy() + path_env['PATH_INFO'] = path + return get_container_info(path_env, app, swift_source=swift_source) + else: + # account info + path = '/v1/%s' % (account,) + path_env = env.copy() + path_env['PATH_INFO'] = path + return get_account_info(path_env, app, swift_source=swift_source) def _get_object_info(app, env, account, container, obj, swift_source=None): @@ -571,20 +684,18 @@ def _get_object_info(app, env, account, container, obj, swift_source=None): info = env.get('swift.infocache', {}).get(env_key) if info: return info - # Not in cached, let's try the object servers + # Not in cache, let's try the object servers path = '/v1/%s/%s/%s' % (account, container, obj) req = _prepare_pre_auth_info_request(env, path, swift_source) - # Whenever we do a GET/HEAD, the GETorHEAD_base will set the info in - # the environment under environ[env_key]. We will - # pick the one from environ[env_key] and use it to set the caller env resp = req.get_response(app) - try: - info = resp.environ['swift.infocache'][env_key] - env.setdefault('swift.infocache', {})[env_key] = info - return info - except (KeyError, AttributeError): - pass - return None + # Unlike get_account_info() and get_container_info(), we don't save + # things in memcache, so we can store the info without network traffic, + # *and* the proxy doesn't cache object info for us, so there's no chance + # that the object info would be in the environment. Thus, we just + # compute the object info based on the response and stash it in + # swift.infocache. + info = set_object_info_cache(app, env, account, container, obj, resp) + return info def close_swift_conn(src): @@ -1355,8 +1466,14 @@ class Controller(object): env = getattr(req, 'environ', {}) else: env = {} - info = get_info(self.app, env, account) - if not info: + env.setdefault('swift.infocache', {}) + path_env = env.copy() + path_env['PATH_INFO'] = "/v1/%s" % (account,) + + info = get_account_info(path_env, self.app) + if (not info + or not is_success(info['status']) + or not info.get('account_really_exists', True)): return None, None, None if info.get('container_count') is None: container_count = 0 @@ -1383,8 +1500,11 @@ class Controller(object): env = getattr(req, 'environ', {}) else: env = {} - info = get_info(self.app, env, account, container) - if not info: + env.setdefault('swift.infocache', {}) + path_env = env.copy() + path_env['PATH_INFO'] = "/v1/%s/%s" % (account, container) + info = get_container_info(path_env, self.app) + if not info or not is_success(info.get('status')): info = headers_to_container_info({}, 0) info['partition'] = None info['nodes'] = None @@ -1672,17 +1792,7 @@ class Controller(object): req, handler.statuses, handler.reasons, handler.bodies, '%s %s' % (server_type, req.method), headers=handler.source_headers) - try: - (vrs, account, container) = req.split_path(2, 3) - _set_info_cache(self.app, req.environ, account, container, res) - except ValueError: - pass - try: - (vrs, account, container, obj) = req.split_path(4, 4, True) - _set_object_info_cache(self.app, req.environ, account, - container, obj, res) - except ValueError: - pass + # if a backend policy index is present in resp headers, translate it # here with the friendly policy name if 'X-Backend-Storage-Policy-Index' in res.headers and \ @@ -1697,6 +1807,7 @@ class Controller(object): 'Could not translate %s (%r) from %r to policy', 'X-Backend-Storage-Policy-Index', res.headers['X-Backend-Storage-Policy-Index'], path) + return res def is_origin_allowed(self, cors_info, origin): diff --git a/swift/proxy/controllers/container.py b/swift/proxy/controllers/container.py index 08a51f10d6..729f957aa9 100644 --- a/swift/proxy/controllers/container.py +++ b/swift/proxy/controllers/container.py @@ -22,7 +22,7 @@ from swift.common.constraints import check_metadata from swift.common import constraints from swift.common.http import HTTP_ACCEPTED, is_success from swift.proxy.controllers.base import Controller, delay_denial, \ - cors_validation, clear_info_cache + cors_validation, set_info_cache, clear_info_cache from swift.common.storage_policy import POLICIES from swift.common.swob import HTTPBadRequest, HTTPForbidden, \ HTTPNotFound @@ -85,11 +85,16 @@ class ContainerController(Controller): def GETorHEAD(self, req): """Handler for HTTP GET/HEAD requests.""" - if not self.account_info(self.account_name, req)[1]: + ai = self.account_info(self.account_name, req) + if not ai[1]: if 'swift.authorize' in req.environ: aresp = req.environ['swift.authorize'](req) if aresp: + # Don't cache this. It doesn't reflect the state of the + # container, just that the user can't access it. return aresp + # Don't cache this. The lack of account will be cached, and that + # is sufficient. return HTTPNotFound(request=req) part = self.app.container_ring.get_part( self.account_name, self.container_name) @@ -99,10 +104,18 @@ class ContainerController(Controller): resp = self.GETorHEAD_base( req, _('Container'), node_iter, part, req.swift_entity_path, concurrency) + # Cache this. We just made a request to a storage node and got + # up-to-date information for the container. + resp.headers['X-Backend-Recheck-Container-Existence'] = str( + self.app.recheck_container_existence) + set_info_cache(self.app, req.environ, self.account_name, + self.container_name, resp) if 'swift.authorize' in req.environ: req.acl = resp.headers.get('x-container-read') aresp = req.environ['swift.authorize'](req) if aresp: + # Don't cache this. It doesn't reflect the state of the + # container, just that the user can't access it. return aresp if not req.environ.get('swift_owner', False): for key in self.app.swift_owner_headers: diff --git a/swift/proxy/server.py b/swift/proxy/server.py index 963bf34f0e..4993c90735 100644 --- a/swift/proxy/server.py +++ b/swift/proxy/server.py @@ -36,7 +36,8 @@ from swift.common.utils import cache_from_env, get_logger, \ from swift.common.constraints import check_utf8, valid_api_version from swift.proxy.controllers import AccountController, ContainerController, \ ObjectControllerRouter, InfoController -from swift.proxy.controllers.base import get_container_info, NodeIter +from swift.proxy.controllers.base import get_container_info, NodeIter, \ + DEFAULT_RECHECK_CONTAINER_EXISTENCE, DEFAULT_RECHECK_ACCOUNT_EXISTENCE from swift.common.swob import HTTPBadRequest, HTTPForbidden, \ HTTPMethodNotAllowed, HTTPNotFound, HTTPPreconditionFailed, \ HTTPServerError, HTTPException, Request, HTTPServiceUnavailable @@ -106,9 +107,11 @@ class Application(object): self.error_suppression_limit = \ int(conf.get('error_suppression_limit', 10)) self.recheck_container_existence = \ - int(conf.get('recheck_container_existence', 60)) + int(conf.get('recheck_container_existence', + DEFAULT_RECHECK_CONTAINER_EXISTENCE)) self.recheck_account_existence = \ - int(conf.get('recheck_account_existence', 60)) + int(conf.get('recheck_account_existence', + DEFAULT_RECHECK_ACCOUNT_EXISTENCE)) self.allow_account_management = \ config_true_value(conf.get('allow_account_management', 'no')) self.container_ring = container_ring or Ring(swift_dir, diff --git a/test/unit/common/middleware/test_copy.py b/test/unit/common/middleware/test_copy.py index 190d7c9084..254203e630 100644 --- a/test/unit/common/middleware/test_copy.py +++ b/test/unit/common/middleware/test_copy.py @@ -1073,6 +1073,7 @@ class TestServerSideCopyConfiguration(unittest.TestCase): @patch_policies(with_ec_default=True) class TestServerSideCopyMiddlewareWithEC(unittest.TestCase): container_info = { + 'status': 200, 'write_acl': None, 'read_acl': None, 'storage_policy': None, diff --git a/test/unit/common/middleware/test_ratelimit.py b/test/unit/common/middleware/test_ratelimit.py index 44136d2801..66ca43d033 100644 --- a/test/unit/common/middleware/test_ratelimit.py +++ b/test/unit/common/middleware/test_ratelimit.py @@ -432,7 +432,7 @@ class TestRateLimit(unittest.TestCase): req.environ['swift.cache'] = FakeMemcache() req.environ['swift.cache'].set( get_container_memcache_key('a', 'c'), - {'container_size': 1}) + {'object_count': 1}) time_override = [0, 0, 0, 0, None] # simulates 4 requests coming in at same time, then sleeping @@ -466,7 +466,7 @@ class TestRateLimit(unittest.TestCase): req.environ['swift.cache'] = FakeMemcache() req.environ['swift.cache'].set( get_container_memcache_key('a', 'c'), - {'container_size': 1}) + {'object_count': 1}) with mock.patch('swift.common.middleware.ratelimit.get_account_info', lambda *args, **kwargs: {}): diff --git a/test/unit/proxy/controllers/test_account.py b/test/unit/proxy/controllers/test_account.py index d60a017fa5..4ef77f3dab 100644 --- a/test/unit/proxy/controllers/test_account.py +++ b/test/unit/proxy/controllers/test_account.py @@ -25,6 +25,7 @@ from test.unit import fake_http_connect, FakeRing, FakeMemcache from swift.common.storage_policy import StoragePolicy from swift.common.request_helpers import get_sys_meta_prefix import swift.proxy.controllers.base +from swift.proxy.controllers.base import get_account_info from test.unit import patch_policies @@ -378,5 +379,22 @@ class TestAccountController4Replicas(TestAccountController): self._assert_responses('POST', POST_TEST_CASES) +@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())]) +class TestGetAccountInfo(unittest.TestCase): + def setUp(self): + self.app = proxy_server.Application( + None, FakeMemcache(), + account_ring=FakeRing(), container_ring=FakeRing()) + + def test_get_deleted_account_410(self): + resp_headers = {'x-account-status': 'deleted'} + + req = Request.blank('/v1/a') + with mock.patch('swift.proxy.controllers.base.http_connect', + fake_http_connect(404, headers=resp_headers)): + info = get_account_info(req.environ, self.app) + self.assertEqual(410, info.get('status')) + + if __name__ == '__main__': unittest.main() diff --git a/test/unit/proxy/controllers/test_base.py b/test/unit/proxy/controllers/test_base.py index 1ad7624386..044a3b5cf0 100644 --- a/test/unit/proxy/controllers/test_base.py +++ b/test/unit/proxy/controllers/test_base.py @@ -21,14 +21,13 @@ from swift.proxy.controllers.base import headers_to_container_info, \ headers_to_account_info, headers_to_object_info, get_container_info, \ get_container_memcache_key, get_account_info, get_account_memcache_key, \ get_object_env_key, get_info, get_object_info, \ - Controller, GetOrHeadHandler, _set_info_cache, _set_object_info_cache, \ - bytes_to_skip + Controller, GetOrHeadHandler, bytes_to_skip from swift.common.swob import Request, HTTPException, RESPONSE_REASONS from swift.common import exceptions from swift.common.utils import split_path from swift.common.header_key_dict import HeaderKeyDict from swift.common.http import is_success -from swift.common.storage_policy import StoragePolicy, POLICIES +from swift.common.storage_policy import StoragePolicy from test.unit import fake_http_connect, FakeRing, FakeMemcache from swift.proxy import server as proxy_server from swift.common.request_helpers import get_sys_meta_prefix @@ -128,15 +127,6 @@ class FakeApp(object): reason = RESPONSE_REASONS[response.status_int][0] start_response('%d %s' % (response.status_int, reason), [(k, v) for k, v in response.headers.items()]) - # It's a bit strange, but the get_info cache stuff relies on the - # app setting some keys in the environment as it makes requests - # (in particular GETorHEAD_base) - so our fake does the same - _set_info_cache(self, environ, response.account, - response.container, response) - if response.obj: - _set_object_info_cache(self, environ, response.account, - response.container, response.obj, - response) return iter(response.body) @@ -158,96 +148,6 @@ class TestFuncs(unittest.TestCase): account_ring=FakeRing(), container_ring=FakeRing()) - def test_GETorHEAD_base(self): - base = Controller(self.app) - req = Request.blank('/v1/a/c/o/with/slashes') - ring = FakeRing() - nodes = list(ring.get_part_nodes(0)) + list(ring.get_more_nodes(0)) - with patch('swift.proxy.controllers.base.' - 'http_connect', fake_http_connect(200)): - resp = base.GETorHEAD_base(req, 'object', iter(nodes), 'part', - '/a/c/o/with/slashes') - infocache = resp.environ['swift.infocache'] - self.assertTrue('swift.object/a/c/o/with/slashes' in infocache) - self.assertEqual( - infocache['swift.object/a/c/o/with/slashes']['status'], 200) - - req = Request.blank('/v1/a/c/o') - with patch('swift.proxy.controllers.base.' - 'http_connect', fake_http_connect(200)): - resp = base.GETorHEAD_base(req, 'object', iter(nodes), 'part', - '/a/c/o') - infocache = resp.environ['swift.infocache'] - self.assertTrue('swift.object/a/c/o' in infocache) - self.assertEqual(infocache['swift.object/a/c/o']['status'], 200) - - req = Request.blank('/v1/a/c') - with patch('swift.proxy.controllers.base.' - 'http_connect', fake_http_connect(200)): - resp = base.GETorHEAD_base(req, 'container', iter(nodes), 'part', - '/a/c') - infocache = resp.environ['swift.infocache'] - self.assertTrue('swift.container/a/c' in infocache) - self.assertEqual(infocache['swift.container/a/c']['status'], 200) - - req = Request.blank('/v1/a') - with patch('swift.proxy.controllers.base.' - 'http_connect', fake_http_connect(200)): - resp = base.GETorHEAD_base(req, 'account', iter(nodes), 'part', - '/a') - infocache = resp.environ['swift.infocache'] - self.assertTrue('swift.account/a' in infocache) - self.assertEqual(infocache['swift.account/a']['status'], 200) - - # Run the above tests again, but this time with concurrent_reads - # turned on - policy = next(iter(POLICIES)) - concurrent_get_threads = policy.object_ring.replica_count - for concurrency_timeout in (0, 2): - self.app.concurrency_timeout = concurrency_timeout - req = Request.blank('/v1/a/c/o/with/slashes') - # NOTE: We are using slow_connect of fake_http_connect as using - # a concurrency of 0 when mocking the connection is a little too - # fast for eventlet. Network i/o will make this fine, but mocking - # it seems is too instantaneous. - with patch('swift.proxy.controllers.base.http_connect', - fake_http_connect(200, slow_connect=True)): - resp = base.GETorHEAD_base( - req, 'object', iter(nodes), 'part', '/a/c/o/with/slashes', - concurrency=concurrent_get_threads) - infocache = resp.environ['swift.infocache'] - self.assertTrue('swift.object/a/c/o/with/slashes' in infocache) - self.assertEqual( - infocache['swift.object/a/c/o/with/slashes']['status'], 200) - req = Request.blank('/v1/a/c/o') - with patch('swift.proxy.controllers.base.http_connect', - fake_http_connect(200, slow_connect=True)): - resp = base.GETorHEAD_base( - req, 'object', iter(nodes), 'part', '/a/c/o', - concurrency=concurrent_get_threads) - infocache = resp.environ['swift.infocache'] - self.assertTrue('swift.object/a/c/o' in infocache) - self.assertEqual(infocache['swift.object/a/c/o']['status'], 200) - req = Request.blank('/v1/a/c') - with patch('swift.proxy.controllers.base.http_connect', - fake_http_connect(200, slow_connect=True)): - resp = base.GETorHEAD_base( - req, 'container', iter(nodes), 'part', '/a/c', - concurrency=concurrent_get_threads) - infocache = resp.environ['swift.infocache'] - self.assertTrue('swift.container/a/c' in infocache) - self.assertEqual(infocache['swift.container/a/c']['status'], 200) - - req = Request.blank('/v1/a') - with patch('swift.proxy.controllers.base.http_connect', - fake_http_connect(200, slow_connect=True)): - resp = base.GETorHEAD_base( - req, 'account', iter(nodes), 'part', '/a', - concurrency=concurrent_get_threads) - infocache = resp.environ['swift.infocache'] - self.assertTrue('swift.account/a' in infocache) - self.assertEqual(infocache['swift.account/a']['status'], 200) - def test_get_info(self): app = FakeApp() # Do a non cached call to account @@ -257,38 +157,44 @@ class TestFuncs(unittest.TestCase): self.assertEqual(info_a['status'], 200) self.assertEqual(info_a['bytes'], 6666) self.assertEqual(info_a['total_object_count'], 1000) - # Make sure the env cache is set - self.assertEqual(env['swift.infocache'].get('swift.account/a'), info_a) + # Make sure the app was called self.assertEqual(app.responses.stats['account'], 1) + # Make sure the return value matches get_account_info + account_info = get_account_info({'PATH_INFO': '/v1/a'}, app) + self.assertEqual(info_a, account_info) + # Do an env cached call to account + app.responses.stats['account'] = 0 + app.responses.stats['container'] = 0 + info_a = get_info(app, env, 'a') # Check that you got proper info self.assertEqual(info_a['status'], 200) self.assertEqual(info_a['bytes'], 6666) self.assertEqual(info_a['total_object_count'], 1000) - # Make sure the env cache is set - self.assertEqual(env['swift.infocache'].get('swift.account/a'), info_a) + # Make sure the app was NOT called AGAIN - self.assertEqual(app.responses.stats['account'], 1) + self.assertEqual(app.responses.stats['account'], 0) # This time do env cached call to account and non cached to container + app.responses.stats['account'] = 0 + app.responses.stats['container'] = 0 + info_c = get_info(app, env, 'a', 'c') # Check that you got proper info self.assertEqual(info_c['status'], 200) self.assertEqual(info_c['bytes'], 6666) self.assertEqual(info_c['object_count'], 1000) - # Make sure the env cache is set - self.assertEqual( - env['swift.infocache'].get('swift.account/a'), info_a) - self.assertEqual( - env['swift.infocache'].get('swift.container/a/c'), info_c) - # Make sure the app was called for container + # Make sure the app was called for container but not account + self.assertEqual(app.responses.stats['account'], 0) self.assertEqual(app.responses.stats['container'], 1) - # This time do a non cached call to account than non cached to + # This time do a non-cached call to account then non-cached to # container + app.responses.stats['account'] = 0 + app.responses.stats['container'] = 0 app = FakeApp() env = {} # abandon previous call to env info_c = get_info(app, env, 'a', 'c') @@ -296,82 +202,31 @@ class TestFuncs(unittest.TestCase): self.assertEqual(info_c['status'], 200) self.assertEqual(info_c['bytes'], 6666) self.assertEqual(info_c['object_count'], 1000) - # Make sure the env cache is set - self.assertEqual( - env['swift.infocache'].get('swift.account/a'), info_a) - self.assertEqual( - env['swift.infocache'].get('swift.container/a/c'), info_c) # check app calls both account and container self.assertEqual(app.responses.stats['account'], 1) self.assertEqual(app.responses.stats['container'], 1) - # This time do an env cached call to container while account is not + # This time do an env-cached call to container while account is not # cached + app.responses.stats['account'] = 0 + app.responses.stats['container'] = 0 del(env['swift.infocache']['swift.account/a']) info_c = get_info(app, env, 'a', 'c') # Check that you got proper info self.assertEqual(info_a['status'], 200) self.assertEqual(info_c['bytes'], 6666) self.assertEqual(info_c['object_count'], 1000) - # Make sure the env cache is set and account still not cached - self.assertEqual( - env['swift.infocache'].get('swift.container/a/c'), info_c) + # no additional calls were made - self.assertEqual(app.responses.stats['account'], 1) - self.assertEqual(app.responses.stats['container'], 1) - - # Do a non cached call to account not found with ret_not_found - app = FakeApp(statuses=(404,)) - env = {} - info_a = get_info(app, env, 'a', ret_not_found=True) - # Check that you got proper info - self.assertEqual(info_a['status'], 404) - self.assertEqual(info_a['bytes'], None) - self.assertEqual(info_a['total_object_count'], None) - # Make sure the env cache is set - self.assertEqual( - env['swift.infocache'].get('swift.account/a'), info_a) - # and account was called - self.assertEqual(app.responses.stats['account'], 1) - - # Do a cached call to account not found with ret_not_found - info_a = get_info(app, env, 'a', ret_not_found=True) - # Check that you got proper info - self.assertEqual(info_a['status'], 404) - self.assertEqual(info_a['bytes'], None) - self.assertEqual(info_a['total_object_count'], None) - # Make sure the env cache is set - self.assertEqual( - env['swift.infocache'].get('swift.account/a'), info_a) - # add account was NOT called AGAIN - self.assertEqual(app.responses.stats['account'], 1) - - # Do a non cached call to account not found without ret_not_found - app = FakeApp(statuses=(404,)) - env = {} - info_a = get_info(app, env, 'a') - # Check that you got proper info - self.assertEqual(info_a, None) - self.assertEqual( - env['swift.infocache']['swift.account/a']['status'], 404) - # and account was called - self.assertEqual(app.responses.stats['account'], 1) - - # Do a cached call to account not found without ret_not_found - info_a = get_info(None, env, 'a') - # Check that you got proper info - self.assertEqual(info_a, None) - self.assertEqual( - env['swift.infocache']['swift.account/a']['status'], 404) - # add account was NOT called AGAIN - self.assertEqual(app.responses.stats['account'], 1) + self.assertEqual(app.responses.stats['account'], 0) + self.assertEqual(app.responses.stats['container'], 0) def test_get_container_info_swift_source(self): app = FakeApp() req = Request.blank("/v1/a/c", environ={'swift.cache': FakeCache()}) get_container_info(req.environ, app, swift_source='MC') self.assertEqual([e['swift.source'] for e in app.captured_envs], - ['GET_INFO', 'MC']) + ['MC', 'MC']) def test_get_object_info_swift_source(self): app = FakeApp() @@ -397,7 +252,7 @@ class TestFuncs(unittest.TestCase): self.assertEqual(info['status'], 0) def test_get_container_info_no_auto_account(self): - responses = DynamicResponseFactory(404, 200) + responses = DynamicResponseFactory(200) app = FakeApp(responses) req = Request.blank("/v1/.system_account/cont") info = get_container_info(req.environ, app) @@ -435,6 +290,13 @@ class TestFuncs(unittest.TestCase): self.assertEqual([e['swift.source'] for e in app.captured_envs], ['MC']) + def test_get_account_info_swift_owner(self): + app = FakeApp() + req = Request.blank("/v1/a", environ={'swift.cache': FakeCache()}) + get_account_info(req.environ, app) + self.assertEqual([e['swift_owner'] for e in app.captured_envs], + [True]) + def test_get_account_info_infocache(self): app = FakeApp() ic = {} @@ -454,7 +316,7 @@ class TestFuncs(unittest.TestCase): self.assertEqual(resp['total_object_count'], 1000) def test_get_account_info_cache(self): - # The original test that we prefer to preserve + # Works with fake apps that return ints in the headers cached = {'status': 404, 'bytes': 3333, 'total_object_count': 10} @@ -465,7 +327,8 @@ class TestFuncs(unittest.TestCase): self.assertEqual(resp['total_object_count'], 10) self.assertEqual(resp['status'], 404) - # Here is a more realistic test + # Works with strings too, like you get when parsing HTTP headers + # that came in through a socket from the account server cached = {'status': 404, 'bytes': '3333', 'container_count': '234', @@ -475,10 +338,10 @@ class TestFuncs(unittest.TestCase): environ={'swift.cache': FakeCache(cached)}) resp = get_account_info(req.environ, FakeApp()) self.assertEqual(resp['status'], 404) - self.assertEqual(resp['bytes'], '3333') + self.assertEqual(resp['bytes'], 3333) self.assertEqual(resp['container_count'], 234) self.assertEqual(resp['meta'], {}) - self.assertEqual(resp['total_object_count'], '10') + self.assertEqual(resp['total_object_count'], 10) def test_get_account_info_env(self): cache_key = get_account_memcache_key("account") diff --git a/test/unit/proxy/controllers/test_container.py b/test/unit/proxy/controllers/test_container.py index 851ecc81d9..fc5692d8c5 100644 --- a/test/unit/proxy/controllers/test_container.py +++ b/test/unit/proxy/controllers/test_container.py @@ -58,7 +58,7 @@ class TestContainerController(TestRingBase): proxy_server.ContainerController): def account_info(controller, *args, **kwargs): - patch_path = 'swift.proxy.controllers.base.get_info' + patch_path = 'swift.proxy.controllers.base.get_account_info' with mock.patch(patch_path) as mock_get_info: mock_get_info.return_value = dict(self.account_info) return super(FakeAccountInfoContainerController, @@ -95,18 +95,21 @@ class TestContainerController(TestRingBase): 'Expected %s but got %s. Failed case: %s' % (expected, resp.status_int, str(responses))) - def test_container_info_in_response_env(self): + def test_container_info_got_cached(self): controller = proxy_server.ContainerController(self.app, 'a', 'c') with mock.patch('swift.proxy.controllers.base.http_connect', fake_http_connect(200, 200, body='')): req = Request.blank('/v1/a/c', {'PATH_INFO': '/v1/a/c'}) resp = controller.HEAD(req) self.assertEqual(2, resp.status_int // 100) + # Make sure it's in both swift.infocache and memcache self.assertTrue( - "swift.container/a/c" in resp.environ['swift.infocache']) + "swift.container/a/c" in req.environ['swift.infocache']) self.assertEqual( headers_to_container_info(resp.headers), resp.environ['swift.infocache']['swift.container/a/c']) + from_memcache = self.app.memcache.get('container/a/c') + self.assertTrue(from_memcache) def test_swift_owner(self): owner_headers = { diff --git a/test/unit/proxy/controllers/test_obj.py b/test/unit/proxy/controllers/test_obj.py index 95b92b298a..a34fee34a4 100755 --- a/test/unit/proxy/controllers/test_obj.py +++ b/test/unit/proxy/controllers/test_obj.py @@ -34,7 +34,8 @@ from swift.common import utils, swob, exceptions from swift.common.header_key_dict import HeaderKeyDict from swift.proxy import server as proxy_server from swift.proxy.controllers import obj -from swift.proxy.controllers.base import get_info as _real_get_info +from swift.proxy.controllers.base import \ + get_container_info as _real_get_container_info from swift.common.storage_policy import POLICIES, ECDriverError, StoragePolicy from test.unit import FakeRing, FakeMemcache, fake_http_connect, \ @@ -76,7 +77,7 @@ def set_http_connect(*args, **kwargs): class PatchedObjControllerApp(proxy_server.Application): """ This patch is just a hook over the proxy server's __call__ to ensure - that calls to get_info will return the stubbed value for + that calls to get_container_info will return the stubbed value for container_info if it's a container info call. """ @@ -85,22 +86,45 @@ class PatchedObjControllerApp(proxy_server.Application): def __call__(self, *args, **kwargs): - def _fake_get_info(app, env, account, container=None, **kwargs): - if container: - if container in self.per_container_info: - return self.per_container_info[container] - return self.container_info - else: - return _real_get_info(app, env, account, container, **kwargs) + def _fake_get_container_info(env, app, swift_source=None): + _vrs, account, container, _junk = utils.split_path( + env['PATH_INFO'], 3, 4) - mock_path = 'swift.proxy.controllers.base.get_info' - with mock.patch(mock_path, new=_fake_get_info): + # Seed the cache with our container info so that the real + # get_container_info finds it. + ic = env.setdefault('swift.infocache', {}) + cache_key = "swift.container/%s/%s" % (account, container) + + old_value = ic.get(cache_key) + + # Copy the container info so we don't hand out a reference to a + # mutable thing that's set up only once at compile time. Nothing + # *should* mutate it, but it's better to be paranoid than wrong. + if container in self.per_container_info: + ic[cache_key] = self.per_container_info[container].copy() + else: + ic[cache_key] = self.container_info.copy() + + real_info = _real_get_container_info(env, app, swift_source) + + if old_value is None: + del ic[cache_key] + else: + ic[cache_key] = old_value + + return real_info + + with mock.patch('swift.proxy.server.get_container_info', + new=_fake_get_container_info), \ + mock.patch('swift.proxy.controllers.base.get_container_info', + new=_fake_get_container_info): return super( PatchedObjControllerApp, self).__call__(*args, **kwargs) class BaseObjectControllerMixin(object): container_info = { + 'status': 200, 'write_acl': None, 'read_acl': None, 'storage_policy': None, @@ -121,8 +145,11 @@ class BaseObjectControllerMixin(object): self.app = PatchedObjControllerApp( None, FakeMemcache(), account_ring=FakeRing(), container_ring=FakeRing(), logger=self.logger) + # you can over-ride the container_info just by setting it on the app + # (see PatchedObjControllerApp for details) self.app.container_info = dict(self.container_info) + # default policy and ring references self.policy = POLICIES.default self.obj_ring = self.policy.object_ring @@ -957,31 +984,6 @@ class TestReplicatedObjControllerVariousReplicas(BaseObjectControllerMixin, controller_cls = obj.ReplicatedObjectController -@patch_policies(legacy_only=True) -class TestObjControllerLegacyCache(TestReplicatedObjController): - """ - This test pretends like memcache returned a stored value that should - resemble whatever "old" format. It catches KeyErrors you'd get if your - code was expecting some new format during a rolling upgrade. - """ - - # in this case policy_index is missing - container_info = { - 'read_acl': None, - 'write_acl': None, - 'sync_key': None, - 'versions': None, - } - - def test_invalid_storage_policy_cache(self): - self.app.container_info['storage_policy'] = 1 - for method in ('GET', 'HEAD', 'POST', 'PUT', 'COPY'): - req = swob.Request.blank('/v1/a/c/o', method=method) - with set_http_connect(): - resp = req.get_response(self.app) - self.assertEqual(resp.status_int, 503) - - class StubResponse(object): def __init__(self, status, body='', headers=None): @@ -1055,6 +1057,7 @@ def capture_http_requests(get_response): @patch_policies(with_ec_default=True) class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): container_info = { + 'status': 200, 'read_acl': None, 'write_acl': None, 'sync_key': None, diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index f2e4986650..a2757f472e 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -74,7 +74,8 @@ from swift.common.utils import mkdirs, normalize_timestamp, NullLogger from swift.common.wsgi import monkey_patch_mimetools, loadapp from swift.proxy.controllers import base as proxy_base from swift.proxy.controllers.base import get_container_memcache_key, \ - get_account_memcache_key, cors_validation, _get_info_cache + get_account_memcache_key, cors_validation, get_account_info, \ + get_container_info import swift.proxy.controllers import swift.proxy.controllers.obj from swift.common.header_key_dict import HeaderKeyDict @@ -518,6 +519,7 @@ class TestController(unittest.TestCase): # 'container_count' changed from int to str cache_key = get_account_memcache_key(self.account) container_info = {'status': 200, + 'account_really_exists': True, 'container_count': '12345', 'total_object_count': None, 'bytes': None, @@ -686,7 +688,32 @@ class TestController(unittest.TestCase): test(404, 507, 503) test(503, 503, 503) - def test_get_info_cache_returns_values_as_strings(self): + def test_get_account_info_returns_values_as_strings(self): + app = mock.MagicMock() + app.memcache = mock.MagicMock() + app.memcache.get = mock.MagicMock() + app.memcache.get.return_value = { + u'foo': u'\u2603', + u'meta': {u'bar': u'\u2603'}, + u'sysmeta': {u'baz': u'\u2603'}} + env = {'PATH_INFO': '/v1/a'} + ai = get_account_info(env, app) + + # Test info is returned as strings + self.assertEqual(ai.get('foo'), '\xe2\x98\x83') + self.assertTrue(isinstance(ai.get('foo'), str)) + + # Test info['meta'] is returned as strings + m = ai.get('meta', {}) + self.assertEqual(m.get('bar'), '\xe2\x98\x83') + self.assertTrue(isinstance(m.get('bar'), str)) + + # Test info['sysmeta'] is returned as strings + m = ai.get('sysmeta', {}) + self.assertEqual(m.get('baz'), '\xe2\x98\x83') + self.assertTrue(isinstance(m.get('baz'), str)) + + def test_get_container_info_returns_values_as_strings(self): app = mock.MagicMock() app.memcache = mock.MagicMock() app.memcache.get = mock.MagicMock() @@ -695,25 +722,25 @@ class TestController(unittest.TestCase): u'meta': {u'bar': u'\u2603'}, u'sysmeta': {u'baz': u'\u2603'}, u'cors': {u'expose_headers': u'\u2603'}} - env = {} - r = _get_info_cache(app, env, 'account', 'container') + env = {'PATH_INFO': '/v1/a/c'} + ci = get_container_info(env, app) # Test info is returned as strings - self.assertEqual(r.get('foo'), '\xe2\x98\x83') - self.assertTrue(isinstance(r.get('foo'), str)) + self.assertEqual(ci.get('foo'), '\xe2\x98\x83') + self.assertTrue(isinstance(ci.get('foo'), str)) # Test info['meta'] is returned as strings - m = r.get('meta', {}) + m = ci.get('meta', {}) self.assertEqual(m.get('bar'), '\xe2\x98\x83') self.assertTrue(isinstance(m.get('bar'), str)) # Test info['sysmeta'] is returned as strings - m = r.get('sysmeta', {}) + m = ci.get('sysmeta', {}) self.assertEqual(m.get('baz'), '\xe2\x98\x83') self.assertTrue(isinstance(m.get('baz'), str)) # Test info['cors'] is returned as strings - m = r.get('cors', {}) + m = ci.get('cors', {}) self.assertEqual(m.get('expose_headers'), '\xe2\x98\x83') self.assertTrue(isinstance(m.get('expose_headers'), str)) @@ -6362,8 +6389,8 @@ class TestContainerController(unittest.TestCase): else: self.assertNotIn('swift.account/a', infocache) # In all the following tests cache 200 for account - # return and ache vary for container - # return 200 and cache 200 for and container + # return and cache vary for container + # return 200 and cache 200 for account and container test_status_map((200, 200, 404, 404), 200, 200, 200) test_status_map((200, 200, 500, 404), 200, 200, 200) # return 304 don't cache container @@ -6375,12 +6402,13 @@ class TestContainerController(unittest.TestCase): test_status_map((200, 500, 500, 500), 503, None, 200) self.assertFalse(self.app.account_autocreate) - # In all the following tests cache 404 for account # return 404 (as account is not found) and don't cache container test_status_map((404, 404, 404), 404, None, 404) - # This should make no difference + + # cache a 204 for the account because it's sort of like it + # exists self.app.account_autocreate = True - test_status_map((404, 404, 404), 404, None, 404) + test_status_map((404, 404, 404), 404, None, 204) def test_PUT_policy_headers(self): backend_requests = [] @@ -6966,8 +6994,7 @@ class TestContainerController(unittest.TestCase): def test_GET_no_content(self): with save_globals(): set_http_connect(200, 204, 204, 204) - controller = proxy_server.ContainerController(self.app, 'account', - 'container') + controller = proxy_server.ContainerController(self.app, 'a', 'c') req = Request.blank('/v1/a/c') self.app.update_request(req) res = controller.GET(req) @@ -6985,8 +7012,7 @@ class TestContainerController(unittest.TestCase): return HTTPUnauthorized(request=req) with save_globals(): set_http_connect(200, 201, 201, 201) - controller = proxy_server.ContainerController(self.app, 'account', - 'container') + controller = proxy_server.ContainerController(self.app, 'a', 'c') req = Request.blank('/v1/a/c') req.environ['swift.authorize'] = authorize self.app.update_request(req) @@ -7004,8 +7030,7 @@ class TestContainerController(unittest.TestCase): return HTTPUnauthorized(request=req) with save_globals(): set_http_connect(200, 201, 201, 201) - controller = proxy_server.ContainerController(self.app, 'account', - 'container') + controller = proxy_server.ContainerController(self.app, 'a', 'c') req = Request.blank('/v1/a/c', {'REQUEST_METHOD': 'HEAD'}) req.environ['swift.authorize'] = authorize self.app.update_request(req) @@ -7517,7 +7542,7 @@ class TestAccountController(unittest.TestCase): def test_GET(self): with save_globals(): - controller = proxy_server.AccountController(self.app, 'account') + controller = proxy_server.AccountController(self.app, 'a') # GET returns after the first successful call to an Account Server self.assert_status_map(controller.GET, (200,), 200, 200) self.assert_status_map(controller.GET, (503, 200), 200, 200) @@ -7539,7 +7564,7 @@ class TestAccountController(unittest.TestCase): def test_GET_autocreate(self): with save_globals(): - controller = proxy_server.AccountController(self.app, 'account') + controller = proxy_server.AccountController(self.app, 'a') self.app.memcache = FakeMemcacheReturnsNone() self.assertFalse(self.app.account_autocreate) # Repeat the test for autocreate = False and 404 by all @@ -7564,7 +7589,7 @@ class TestAccountController(unittest.TestCase): def test_HEAD(self): # Same behaviour as GET with save_globals(): - controller = proxy_server.AccountController(self.app, 'account') + controller = proxy_server.AccountController(self.app, 'a') self.assert_status_map(controller.HEAD, (200,), 200, 200) self.assert_status_map(controller.HEAD, (503, 200), 200, 200) self.assert_status_map(controller.HEAD, (503, 503, 200), 200, 200) @@ -7582,7 +7607,7 @@ class TestAccountController(unittest.TestCase): def test_HEAD_autocreate(self): # Same behaviour as GET with save_globals(): - controller = proxy_server.AccountController(self.app, 'account') + controller = proxy_server.AccountController(self.app, 'a') self.app.memcache = FakeMemcacheReturnsNone() self.assertFalse(self.app.account_autocreate) self.assert_status_map(controller.HEAD, @@ -7598,7 +7623,7 @@ class TestAccountController(unittest.TestCase): def test_POST_autocreate(self): with save_globals(): - controller = proxy_server.AccountController(self.app, 'account') + controller = proxy_server.AccountController(self.app, 'a') self.app.memcache = FakeMemcacheReturnsNone() # first test with autocreate being False self.assertFalse(self.app.account_autocreate) @@ -7620,7 +7645,7 @@ class TestAccountController(unittest.TestCase): def test_POST_autocreate_with_sysmeta(self): with save_globals(): - controller = proxy_server.AccountController(self.app, 'account') + controller = proxy_server.AccountController(self.app, 'a') self.app.memcache = FakeMemcacheReturnsNone() # first test with autocreate being False self.assertFalse(self.app.account_autocreate) From 1adc6047f0dc56caffb3ab552de948c720b50682 Mon Sep 17 00:00:00 2001 From: Brian Ober Date: Fri, 13 May 2016 16:43:50 -0500 Subject: [PATCH 126/141] Bypass Account Mgmt Functional Cases When Disabled The testPUT case is failing when keystone was enabled and allow_account_management is set to True. There were a few issues needing addressed. First the case was renamed to call out what it was actually doing which is verifying an error scenario for which a PUT on a storage account was not allowed. Second the case was running even when allow_account_management is enabled, which is incorrect. It "accidently" works with TempAuth because it requires a reseller permission, so the Keystone failure here has more to do with not requiring a reseller permission to do a PUT on a storage account for which a user has an operator role on. The common sense fix here is to not execute this test case when allow_account_management is enabled. Change-Id: Id29f5ca48f92cd139535be7064107b8a61b02856 --- test/functional/tests.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/functional/tests.py b/test/functional/tests.py index fa5b0188bf..9c81c8fc86 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -173,7 +173,9 @@ class TestAccount(Base): finally: self.env.account.conn.storage_url = was_url - def testPUT(self): + def testPUTError(self): + if load_constraint('allow_account_management'): + raise SkipTest("Allow account management is enabled") self.env.account.conn.make_request('PUT') self.assert_status([403, 405]) From 4248123169f88bf62dbc5413ae2c15d53010482e Mon Sep 17 00:00:00 2001 From: venkatamahesh Date: Sun, 15 May 2016 21:27:46 +0530 Subject: [PATCH 127/141] Remove the invalid project link swiftsync project is no longer maintained and so is removed from the list Change-Id: Ieae67c728da16e5516babd33d2e5937b14738d6a Closes-Bug: #1581975 --- doc/source/associated_projects.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/associated_projects.rst b/doc/source/associated_projects.rst index b92dc4ed21..46e0238564 100644 --- a/doc/source/associated_projects.rst +++ b/doc/source/associated_projects.rst @@ -107,7 +107,6 @@ Other * `Glance `_ - Provides services for discovering, registering, and retrieving virtual machine images (for OpenStack Compute [Nova], for example). * `Better Staticweb `_ - Makes swift containers accessible by default. -* `Swiftsync `_ - A massive syncer between two swift clusters. * `Django Swiftbrowser `_ - Simple Django web app to access OpenStack Swift. * `Swift-account-stats `_ - Swift-account-stats is a tool to report statistics on Swift usage at tenant and global levels. * `PyECLib `_ - High Level Erasure Code library used by Swift From 2744492f30c758a4f076e2b30eaaf1e2e7fa586c Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Wed, 27 Apr 2016 13:31:11 -0500 Subject: [PATCH 128/141] Use the same key for memcache and env['swift.infocache'] When we were caching directly to the WSGI environment, it made sense to have different keys for the different caches. Now that we have a separate data structure for the per-request cache, however, we ought to be consistent. Change-Id: I199cba6e5fc9ab4205bba369e6a2f34fc5ce22d4 --- swift/proxy/controllers/base.py | 72 ++++----- .../common/middleware/test_account_quotas.py | 13 +- .../common/middleware/test_container_sync.py | 8 +- test/unit/common/middleware/test_formpost.py | 145 +++++++++--------- .../common/middleware/test_keystoneauth.py | 20 +-- test/unit/common/middleware/test_quotas.py | 8 +- test/unit/common/middleware/test_ratelimit.py | 10 +- test/unit/common/middleware/test_tempurl.py | 6 +- test/unit/proxy/controllers/test_account.py | 22 ++- test/unit/proxy/controllers/test_base.py | 18 +-- test/unit/proxy/controllers/test_container.py | 5 +- test/unit/proxy/controllers/test_obj.py | 2 +- test/unit/proxy/test_server.py | 54 +++---- 13 files changed, 182 insertions(+), 201 deletions(-) diff --git a/swift/proxy/controllers/base.py b/swift/proxy/controllers/base.py index 72d6f61539..407a7aed93 100644 --- a/swift/proxy/controllers/base.py +++ b/swift/proxy/controllers/base.py @@ -107,18 +107,6 @@ def delay_denial(func): return func -def get_account_memcache_key(account): - cache_key, env_key = _get_cache_key(account, None) - return cache_key - - -def get_container_memcache_key(account, container): - if not container: - raise ValueError("container not provided") - cache_key, env_key = _get_cache_key(account, container) - return cache_key - - def _prep_headers_to_info(headers, server_type): """ Helper method that iterates once over a dict of headers, @@ -424,17 +412,24 @@ def get_account_info(env, app, swift_source=None): return info -def _get_cache_key(account, container): +def get_cache_key(account, container=None, obj=None): """ - Get the keys for both memcache (cache_key) and env (env_key) - where info about accounts and containers is cached + Get the keys for both memcache and env['swift.infocache'] (cache_key) + where info about accounts, containers, and objects is cached :param account: The name of the account :param container: The name of the container (or None if account) - :returns: a tuple of (cache_key, env_key) + :param obj: The name of the object (or None if account or container) + :returns: a string cache_key """ - if container: + if obj: + if not (account and container): + raise ValueError('Object cache key requires account and container') + cache_key = 'object/%s/%s/%s' % (account, container, obj) + elif container: + if not account: + raise ValueError('Container cache key requires account') cache_key = 'container/%s/%s' % (account, container) else: cache_key = 'account/%s' % account @@ -442,22 +437,7 @@ def _get_cache_key(account, container): # This allows caching both account and container and ensures that when we # copy this env to form a new request, it won't accidentally reuse the # old container or account info - env_key = 'swift.%s' % cache_key - return cache_key, env_key - - -def get_object_env_key(account, container, obj): - """ - Get the keys for env (env_key) where info about object is cached - - :param account: The name of the account - :param container: The name of the container - :param obj: The name of the object - :returns: a string env_key - """ - env_key = 'swift.object/%s/%s/%s' % (account, - container, obj) - return env_key + return cache_key def set_info_cache(app, env, account, container, resp): @@ -483,7 +463,7 @@ def set_info_cache(app, env, account, container, resp): cache_time = int(resp.headers.get( 'X-Backend-Recheck-Account-Existence', DEFAULT_RECHECK_ACCOUNT_EXISTENCE)) - cache_key, env_key = _get_cache_key(account, container) + cache_key = get_cache_key(account, container) if resp: if resp.status_int in (HTTP_NOT_FOUND, HTTP_GONE): @@ -494,7 +474,7 @@ def set_info_cache(app, env, account, container, resp): # Next actually set both memcache and the env cache memcache = getattr(app, 'memcache', None) or env.get('swift.cache') if not cache_time: - infocache.pop(env_key, None) + infocache.pop(cache_key, None) if memcache: memcache.delete(cache_key) return @@ -505,7 +485,7 @@ def set_info_cache(app, env, account, container, resp): info = headers_to_account_info(resp.headers, resp.status_int) if memcache: memcache.set(cache_key, info, time=cache_time) - infocache[env_key] = info + infocache[cache_key] = info return info @@ -525,14 +505,14 @@ def set_object_info_cache(app, env, account, container, obj, resp): :returns: the object info """ - env_key = get_object_env_key(account, container, obj) + cache_key = get_cache_key(account, container, obj) if 'swift.infocache' in env and not resp: - env['swift.infocache'].pop(env_key, None) + env['swift.infocache'].pop(cache_key, None) return info = headers_to_object_info(resp.headers, resp.status_int) - env.setdefault('swift.infocache', {})[env_key] = info + env.setdefault('swift.infocache', {})[cache_key] = info return info @@ -559,9 +539,9 @@ def _get_info_from_infocache(env, account, container=None): :returns: a dictionary of cached info on cache hit, None on miss """ - _junk, env_key = _get_cache_key(account, container) - if 'swift.infocache' in env and env_key in env['swift.infocache']: - return env['swift.infocache'][env_key] + cache_key = get_cache_key(account, container) + if 'swift.infocache' in env and cache_key in env['swift.infocache']: + return env['swift.infocache'][cache_key] return None @@ -577,7 +557,7 @@ def _get_info_from_memcache(app, env, account, container=None): :returns: a dictionary of cached info on cache hit, None on miss. Also returns None if memcache is not in use. """ - cache_key, env_key = _get_cache_key(account, container) + cache_key = get_cache_key(account, container) memcache = getattr(app, 'memcache', None) or env.get('swift.cache') if memcache: info = memcache.get(cache_key) @@ -589,7 +569,7 @@ def _get_info_from_memcache(app, env, account, container=None): for subkey, value in info[key].items(): if isinstance(value, six.text_type): info[key][subkey] = value.encode("utf-8") - env.setdefault('swift.infocache', {})[env_key] = info + env.setdefault('swift.infocache', {})[cache_key] = info return info return None @@ -680,8 +660,8 @@ def _get_object_info(app, env, account, container, obj, swift_source=None): :param obj: The unquoted name of the object :returns: the cached info or None if cannot be retrieved """ - env_key = get_object_env_key(account, container, obj) - info = env.get('swift.infocache', {}).get(env_key) + cache_key = get_cache_key(account, container, obj) + info = env.get('swift.infocache', {}).get(cache_key) if info: return info # Not in cache, let's try the object servers diff --git a/test/unit/common/middleware/test_account_quotas.py b/test/unit/common/middleware/test_account_quotas.py index 87574bd14f..3ebbe37bd7 100644 --- a/test/unit/common/middleware/test_account_quotas.py +++ b/test/unit/common/middleware/test_account_quotas.py @@ -18,9 +18,8 @@ from swift.common.swob import Request, wsgify, HTTPForbidden, \ from swift.common.middleware import account_quotas, copy -from swift.proxy.controllers.base import _get_cache_key, \ - headers_to_account_info, get_object_env_key, \ - headers_to_object_info +from swift.proxy.controllers.base import get_cache_key, \ + headers_to_account_info, headers_to_object_info class FakeCache(object): @@ -58,8 +57,8 @@ class FakeApp(object): return aresp(env, start_response) if env['REQUEST_METHOD'] == "HEAD" and \ env['PATH_INFO'] == '/v1/a/c2/o2': - env_key = get_object_env_key('a', 'c2', 'o2') - env.setdefault('swift.infocache', {})[env_key] = \ + cache_key = get_cache_key('a', 'c2', 'o2') + env.setdefault('swift.infocache', {})[cache_key] = \ headers_to_object_info(self.headers, 200) start_response('200 OK', self.headers) elif env['REQUEST_METHOD'] == "HEAD" and \ @@ -67,8 +66,8 @@ class FakeApp(object): start_response('404 Not Found', []) else: # Cache the account_info (same as a real application) - cache_key, env_key = _get_cache_key('a', None) - env.setdefault('swift.infocache', {})[env_key] = \ + cache_key = get_cache_key('a') + env.setdefault('swift.infocache', {})[cache_key] = \ headers_to_account_info(self.headers, 200) start_response('200 OK', self.headers) return [] diff --git a/test/unit/common/middleware/test_container_sync.py b/test/unit/common/middleware/test_container_sync.py index 9d5f1dd332..6d30eeb6b1 100644 --- a/test/unit/common/middleware/test_container_sync.py +++ b/test/unit/common/middleware/test_container_sync.py @@ -23,7 +23,7 @@ import mock from swift.common import swob from swift.common.middleware import container_sync -from swift.proxy.controllers.base import _get_cache_key +from swift.proxy.controllers.base import get_cache_key from swift.proxy.controllers.info import InfoController from test.unit import FakeLogger @@ -206,7 +206,7 @@ cluster_dfw1 = http://dfw1.host/v1/ req = swob.Request.blank( '/v1/a/c', headers={'x-container-sync-auth': 'US nonce sig'}) infocache = req.environ.setdefault('swift.infocache', {}) - infocache[_get_cache_key('a', 'c')[1]] = {'sync_key': 'abc'} + infocache[get_cache_key('a', 'c')] = {'sync_key': 'abc'} resp = req.get_response(self.sync) self.assertEqual(resp.status, '401 Unauthorized') self.assertEqual( @@ -226,7 +226,7 @@ cluster_dfw1 = http://dfw1.host/v1/ 'x-container-sync-auth': 'US nonce ' + sig, 'x-backend-inbound-x-timestamp': ts}) infocache = req.environ.setdefault('swift.infocache', {}) - infocache[_get_cache_key('a', 'c')[1]] = {'sync_key': 'abc'} + infocache[get_cache_key('a', 'c')] = {'sync_key': 'abc'} resp = req.get_response(self.sync) self.assertEqual(resp.status, '200 OK') self.assertEqual(resp.body, 'Response to Authorized Request') @@ -241,7 +241,7 @@ cluster_dfw1 = http://dfw1.host/v1/ req = swob.Request.blank( '/v1/a/c', headers={'x-container-sync-auth': 'US nonce ' + sig}) infocache = req.environ.setdefault('swift.infocache', {}) - infocache[_get_cache_key('a', 'c')[1]] = {'sync_key': 'abc'} + infocache[get_cache_key('a', 'c')] = {'sync_key': 'abc'} resp = req.get_response(self.sync) self.assertEqual(resp.status, '200 OK') self.assertEqual(resp.body, 'Response to Authorized Request') diff --git a/test/unit/common/middleware/test_formpost.py b/test/unit/common/middleware/test_formpost.py index 6e9da72857..fabfe1931f 100644 --- a/test/unit/common/middleware/test_formpost.py +++ b/test/unit/common/middleware/test_formpost.py @@ -24,6 +24,7 @@ from six import BytesIO from swift.common.swob import Request, Response from swift.common.middleware import tempauth, formpost from swift.common.utils import split_path +from swift.proxy.controllers.base import get_cache_key class FakeApp(object): @@ -131,7 +132,7 @@ class TestFormPost(unittest.TestCase): _junk, account, _junk, _junk = split_path(path, 2, 4) req.environ.setdefault('swift.infocache', {}) - req.environ['swift.infocache']['swift.account/' + account] = \ + req.environ['swift.infocache'][get_cache_key(account)] = \ self._fake_cache_env(account, tempurl_keys) return req @@ -249,7 +250,7 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '/v1/AUTH_test/container', '', 1024, 10, int(time() - 10), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.infocache']['swift.account/AUTH_test'] = ( + env['swift.infocache'][get_cache_key('AUTH_test')] = ( self._fake_cache_env('AUTH_test', [key])) self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) @@ -354,9 +355,10 @@ class TestFormPost(unittest.TestCase): 'SERVER_PORT': '8080', 'SERVER_PROTOCOL': 'HTTP/1.0', 'swift.infocache': { - 'swift.account/AUTH_test': self._fake_cache_env( + get_cache_key('AUTH_test'): self._fake_cache_env( 'AUTH_test', [key]), - 'swift.container/AUTH_test/container': {'meta': {}}}, + get_cache_key('AUTH_test', 'container'): { + 'meta': {}}}, 'wsgi.errors': wsgi_errors, 'wsgi.input': wsgi_input, 'wsgi.multiprocess': False, @@ -471,9 +473,10 @@ class TestFormPost(unittest.TestCase): 'SERVER_PORT': '8080', 'SERVER_PROTOCOL': 'HTTP/1.0', 'swift.infocache': { - 'swift.account/AUTH_test': self._fake_cache_env( + get_cache_key('AUTH_test'): self._fake_cache_env( 'AUTH_test', [key]), - 'swift.container/AUTH_test/container': {'meta': {}}}, + get_cache_key('AUTH_test', 'container'): { + 'meta': {}}}, 'wsgi.errors': wsgi_errors, 'wsgi.input': wsgi_input, 'wsgi.multiprocess': False, @@ -591,9 +594,10 @@ class TestFormPost(unittest.TestCase): 'SERVER_PORT': '8080', 'SERVER_PROTOCOL': 'HTTP/1.0', 'swift.infocache': { - 'swift.account/AUTH_test': self._fake_cache_env( + get_cache_key('AUTH_test'): self._fake_cache_env( 'AUTH_test', [key]), - 'swift.container/AUTH_test/container': {'meta': {}}}, + get_cache_key('AUTH_test', 'container'): { + 'meta': {}}}, 'wsgi.errors': wsgi_errors, 'wsgi.input': wsgi_input, 'wsgi.multiprocess': False, @@ -707,9 +711,10 @@ class TestFormPost(unittest.TestCase): 'SERVER_PORT': '8080', 'SERVER_PROTOCOL': 'HTTP/1.0', 'swift.infocache': { - 'swift.account/AUTH_test': self._fake_cache_env( + get_cache_key('AUTH_test'): self._fake_cache_env( 'AUTH_test', [key]), - 'swift.container/AUTH_test/container': {'meta': {}}}, + get_cache_key('AUTH_test', 'container'): { + 'meta': {}}}, 'wsgi.errors': wsgi_errors, 'wsgi.input': wsgi_input, 'wsgi.multiprocess': False, @@ -753,10 +758,10 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', 'http://brim.net', 5, 10, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'XX' + b'\r\n'.join(body)) - env['swift.infocache']['swift.account/AUTH_test'] = ( + env['swift.infocache'][get_cache_key('AUTH_test')] = ( self._fake_cache_env('AUTH_test', [key])) - env['swift.infocache']['swift.container/AUTH_test/container'] = { - 'meta': {}} + env['swift.infocache'][get_cache_key( + 'AUTH_test', 'container')] = {'meta': {}} self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -790,10 +795,10 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', 'http://brim.net', 5, 10, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.infocache']['swift.account/AUTH_test'] = ( + env['swift.infocache'][get_cache_key('AUTH_test')] = ( self._fake_cache_env('AUTH_test', [key])) - env['swift.infocache']['swift.container/AUTH_test/container'] = { - 'meta': {}} + env['swift.infocache'][get_cache_key( + 'AUTH_test', 'container')] = {'meta': {}} self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -822,10 +827,10 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', 'http://brim.net', 1024, 1, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.infocache']['swift.account/AUTH_test'] = ( + env['swift.infocache'][get_cache_key('AUTH_test')] = ( self._fake_cache_env('AUTH_test', [key])) - env['swift.infocache']['swift.container/AUTH_test/container'] = { - 'meta': {}} + env['swift.infocache'][get_cache_key( + 'AUTH_test', 'container')] = {'meta': {}} self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -864,10 +869,10 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key) env['QUERY_STRING'] = 'this=should¬=get&passed' env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.infocache']['swift.account/AUTH_test'] = ( + env['swift.infocache'][get_cache_key('AUTH_test')] = ( self._fake_cache_env('AUTH_test', [key])) - env['swift.infocache']['swift.container/AUTH_test/container'] = { - 'meta': {}} + env['swift.infocache'][get_cache_key( + 'AUTH_test', 'container')] = {'meta': {}} self.app = FakeApp( iter([('201 Created', {}, ''), ('201 Created', {}, '')]), @@ -900,10 +905,10 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', 'http://brim.net', 1024, 10, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.infocache']['swift.account/AUTH_test'] = ( + env['swift.infocache'][get_cache_key('AUTH_test')] = ( self._fake_cache_env('AUTH_test', [key])) - env['swift.infocache']['swift.container/AUTH_test/container'] = { - 'meta': {}} + env['swift.infocache'][get_cache_key( + 'AUTH_test', 'container')] = {'meta': {}} self.app = FakeApp(iter([('404 Not Found', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -987,10 +992,10 @@ class TestFormPost(unittest.TestCase): if six.PY3: wsgi_input = wsgi_input.encode('utf-8') env['wsgi.input'] = BytesIO(wsgi_input) - env['swift.infocache']['swift.account/AUTH_test'] = ( + env['swift.infocache'][get_cache_key('AUTH_test')] = ( self._fake_cache_env('AUTH_test', [key])) - env['swift.infocache']['swift.container/AUTH_test/container'] = { - 'meta': {}} + env['swift.infocache'][get_cache_key( + 'AUTH_test', 'container')] = {'meta': {}} self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -1059,10 +1064,10 @@ class TestFormPost(unittest.TestCase): if six.PY3: wsgi_input = wsgi_input.encode('utf-8') env['wsgi.input'] = BytesIO(wsgi_input) - env['swift.infocache']['swift.account/AUTH_test'] = ( + env['swift.infocache'][get_cache_key('AUTH_test')] = ( self._fake_cache_env('AUTH_test', [key])) - env['swift.infocache']['swift.container/AUTH_test/container'] = { - 'meta': {}} + env['swift.infocache'][get_cache_key( + 'AUTH_test', 'container')] = {'meta': {}} self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -1100,10 +1105,10 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', 'http://redirect', 1024, 10, int(time() + 86400), key, user_agent=False) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.infocache']['swift.account/AUTH_test'] = ( + env['swift.infocache'][get_cache_key('AUTH_test')] = ( self._fake_cache_env('AUTH_test', [key])) - env['swift.infocache']['swift.container/AUTH_test/container'] = { - 'meta': {}} + env['swift.infocache'][get_cache_key( + 'AUTH_test', 'container')] = {'meta': {}} self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -1122,10 +1127,10 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', 'http://redirect', 1024, 10, int(time() + 86400), key, user_agent=False) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.infocache']['swift.account/AUTH_test'] = ( + env['swift.infocache'][get_cache_key('AUTH_test')] = ( self._fake_cache_env('AUTH_test', [key])) - env['swift.infocache']['swift.container/AUTH_test/container'] = { - 'meta': {}} + env['swift.infocache'][get_cache_key( + 'AUTH_test', 'container')] = {'meta': {}} env['HTTP_ORIGIN'] = 'http://localhost:5000' self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', @@ -1152,10 +1157,10 @@ class TestFormPost(unittest.TestCase): int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) # Stick it in X-Account-Meta-Temp-URL-Key-2 and make sure we get it - env['swift.infocache']['swift.account/AUTH_test'] = ( - self._fake_cache_env('AUTH_test', ['bert', key])) - env['swift.infocache']['swift.container/AUTH_test/container'] = \ - {'meta': {}} + env['swift.infocache'][get_cache_key('AUTH_test')] = ( + self._fake_cache_env('AUTH_test', [key])) + env['swift.infocache'][get_cache_key( + 'AUTH_test', 'container')] = {'meta': {}} self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -1189,11 +1194,11 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', 'http://redirect', 1024, 10, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.infocache']['swift.account/AUTH_test'] = ( + env['swift.infocache'][get_cache_key('AUTH_test')] = ( self._fake_cache_env('AUTH_test')) # Stick it in X-Container-Meta-Temp-URL-Key-2 and ensure we get it - env['swift.infocache']['swift.container/AUTH_test/container'] = { - 'meta': meta} + env['swift.infocache'][get_cache_key( + 'AUTH_test', 'container')] = {'meta': meta} self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -1217,10 +1222,10 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', 'http://redirect', 1024, 10, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.infocache']['swift.account/AUTH_test'] = ( + env['swift.infocache'][get_cache_key('AUTH_test')] = ( self._fake_cache_env('AUTH_test', [key])) - env['swift.infocache']['swift.container/AUTH_test/container'] = { - 'meta': {}} + env['swift.infocache'][get_cache_key( + 'AUTH_test', 'container')] = {'meta': {}} self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -1256,10 +1261,10 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', 'http://redirect?one=two', 1024, 10, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.infocache']['swift.account/AUTH_test'] = ( + env['swift.infocache'][get_cache_key('AUTH_test')] = ( self._fake_cache_env('AUTH_test', [key])) - env['swift.infocache']['swift.container/AUTH_test/container'] = { - 'meta': {}} + env['swift.infocache'][get_cache_key( + 'AUTH_test', 'container')] = {'meta': {}} self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -1295,10 +1300,10 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.infocache']['swift.account/AUTH_test'] = ( + env['swift.infocache'][get_cache_key('AUTH_test')] = ( self._fake_cache_env('AUTH_test', [key])) - env['swift.infocache']['swift.container/AUTH_test/container'] = { - 'meta': {}} + env['swift.infocache'][get_cache_key( + 'AUTH_test', 'container')] = {'meta': {}} self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -1333,7 +1338,7 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '/v1/AUTH_test/container', '', 1024, 10, int(time() - 10), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.infocache']['swift.account/AUTH_test'] = ( + env['swift.infocache'][get_cache_key('AUTH_test')] = ( self._fake_cache_env('AUTH_test', [key])) self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) @@ -1367,7 +1372,7 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) # Change key to invalidate sig - env['swift.infocache']['swift.account/AUTH_test'] = ( + env['swift.infocache'][get_cache_key('AUTH_test')] = ( self._fake_cache_env('AUTH_test', [key + ' is bogus now'])) self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) @@ -1400,7 +1405,7 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'XX' + b'\r\n'.join(body)) - env['swift.infocache']['swift.account/AUTH_test'] = ( + env['swift.infocache'][get_cache_key('AUTH_test')] = ( self._fake_cache_env('AUTH_test', [key])) self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) @@ -1433,7 +1438,7 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '/v2/AUTH_test/container', '', 1024, 10, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.infocache']['swift.account/AUTH_test'] = ( + env['swift.infocache'][get_cache_key('AUTH_test')] = ( self._fake_cache_env('AUTH_test', [key])) self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) @@ -1466,7 +1471,7 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '//AUTH_test/container', '', 1024, 10, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.infocache']['swift.account/AUTH_test'] = ( + env['swift.infocache'][get_cache_key('AUTH_test')] = ( self._fake_cache_env('AUTH_test', [key])) self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) @@ -1499,7 +1504,7 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '/v1//container', '', 1024, 10, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.infocache']['swift.account/AUTH_test'] = ( + env['swift.infocache'][get_cache_key('AUTH_test')] = ( self._fake_cache_env('AUTH_test', [key])) self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) @@ -1532,7 +1537,7 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '/v1/AUTH_tst/container', '', 1024, 10, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.infocache']['swift.account/AUTH_test'] = ( + env['swift.infocache'][get_cache_key('AUTH_test')] = ( self._fake_cache_env('AUTH_test', [key])) self.app = FakeApp(iter([ ('200 Ok', {'x-account-meta-temp-url-key': 'def'}, ''), @@ -1567,7 +1572,7 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '/v1/AUTH_test', '', 1024, 10, int(time() + 86400), key) env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.infocache']['swift.account/AUTH_test'] = ( + env['swift.infocache'][get_cache_key('AUTH_test')] = ( self._fake_cache_env('AUTH_test', [key])) self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) @@ -1605,7 +1610,7 @@ class TestFormPost(unittest.TestCase): body[i] = 'badvalue' break env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) - env['swift.infocache']['swift.account/AUTH_test'] = ( + env['swift.infocache'][get_cache_key('AUTH_test')] = ( self._fake_cache_env('AUTH_test', [key])) self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) @@ -1646,10 +1651,10 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key) wsgi_input = b'\r\n'.join(x_delete_body_part + body) env['wsgi.input'] = BytesIO(wsgi_input) - env['swift.infocache']['swift.account/AUTH_test'] = ( + env['swift.infocache'][get_cache_key('AUTH_test')] = ( self._fake_cache_env('AUTH_test', [key])) - env['swift.infocache']['swift.container/AUTH_test/container'] = { - 'meta': {}} + env['swift.infocache'][get_cache_key( + 'AUTH_test', 'container')] = {'meta': {}} self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -1690,7 +1695,7 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key) wsgi_input = b'\r\n'.join(x_delete_body_part + body) env['wsgi.input'] = BytesIO(wsgi_input) - env['swift.infocache']['swift.account/AUTH_test'] = ( + env['swift.infocache'][get_cache_key('AUTH_test')] = ( self._fake_cache_env('AUTH_test', [key])) self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) @@ -1725,10 +1730,10 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key) wsgi_input = b'\r\n'.join(x_delete_body_part + body) env['wsgi.input'] = BytesIO(wsgi_input) - env['swift.infocache']['swift.account/AUTH_test'] = ( + env['swift.infocache'][get_cache_key('AUTH_test')] = ( self._fake_cache_env('AUTH_test', [key])) - env['swift.infocache']['swift.container/AUTH_test/container'] = { - 'meta': {}} + env['swift.infocache'][get_cache_key( + 'AUTH_test', 'container')] = {'meta': {}} self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) self.auth = tempauth.filter_factory({})(self.app) @@ -1769,7 +1774,7 @@ class TestFormPost(unittest.TestCase): '/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key) wsgi_input = b'\r\n'.join(x_delete_body_part + body) env['wsgi.input'] = BytesIO(wsgi_input) - env['swift.infocache']['swift.account/AUTH_test'] = ( + env['swift.infocache'][get_cache_key('AUTH_test')] = ( self._fake_cache_env('AUTH_test', [key])) self.app = FakeApp(iter([('201 Created', {}, ''), ('201 Created', {}, '')])) diff --git a/test/unit/common/middleware/test_keystoneauth.py b/test/unit/common/middleware/test_keystoneauth.py index 4b82c88dc4..96d0ed4902 100644 --- a/test/unit/common/middleware/test_keystoneauth.py +++ b/test/unit/common/middleware/test_keystoneauth.py @@ -19,7 +19,7 @@ from swift.common.middleware import keystoneauth from swift.common.swob import Request, Response from swift.common.http import HTTP_FORBIDDEN from swift.common.utils import split_path -from swift.proxy.controllers.base import _get_cache_key +from swift.proxy.controllers.base import get_cache_key from test.unit import FakeLogger UNKNOWN_ID = keystoneauth.UNKNOWN_ID @@ -251,7 +251,7 @@ class SwiftAuth(unittest.TestCase): account = get_account_for_tenant(self.test_auth, proj_id) path = '/v1/' + account # fake cached account info - _, info_key = _get_cache_key(account, None) + info_key = get_cache_key(account) env = {'swift.infocache': {info_key: {'status': 0, 'sysmeta': {}}}, 'keystone.token_info': _fake_token_info(version='3')} req = Request.blank(path, environ=env, headers=headers) @@ -280,7 +280,7 @@ class SwiftAuth(unittest.TestCase): account = get_account_for_tenant(self.test_auth, proj_id) path = '/v1/' + account # fake cached account info - _, info_key = _get_cache_key(account, None) + info_key = get_cache_key(account) env = {'swift.infocache': {info_key: {'status': 0, 'sysmeta': {}}}, 'keystone.token_info': _fake_token_info(version='3')} req = Request.blank(path, environ=env, headers=headers) @@ -301,7 +301,7 @@ class SwiftAuth(unittest.TestCase): headers = get_identity_headers(tenant_id=proj_id, role='admin') account = get_account_for_tenant(self.test_auth, proj_id) path = '/v1/' + account - _, info_key = _get_cache_key(account, None) + info_key = get_cache_key(account) # v2 token env = {'swift.infocache': {info_key: {'status': 0, 'sysmeta': {}}}, 'keystone.token_info': _fake_token_info(version='2')} @@ -323,7 +323,7 @@ class SwiftAuth(unittest.TestCase): role='reselleradmin') account = get_account_for_tenant(self.test_auth, proj_id) path = '/v1/' + account - _, info_key = _get_cache_key(account, None) + info_key = get_cache_key(account) # v2 token env = {'swift.infocache': {info_key: {'status': 0, 'sysmeta': {}}}, 'keystone.token_info': _fake_token_info(version='2')} @@ -381,7 +381,7 @@ class ServiceTokenFunctionality(unittest.TestCase): role=user_role, service_role=service_role) (version, account, _junk, _junk) = split_path(path, 2, 4, True) - _, info_key = _get_cache_key(account, None) + info_key = get_cache_key(account) env = {'swift.infocache': {info_key: {'status': 0, 'sysmeta': {}}}, 'keystone.token_info': _fake_token_info(version='2')} if environ: @@ -595,7 +595,7 @@ class TestAuthorize(BaseTestAuthorize): if not path: path = '/v1/%s/c' % account # fake cached account info - _, info_key = _get_cache_key(account, None) + info_key = get_cache_key(account) default_env = { 'REMOTE_USER': identity['HTTP_X_TENANT_ID'], 'swift.infocache': {info_key: {'status': 200, 'sysmeta': {}}}} @@ -985,7 +985,7 @@ class TestAuthorize(BaseTestAuthorize): def test_get_project_domain_id(self): sysmeta = {} info = {'sysmeta': sysmeta} - _, info_key = _get_cache_key('AUTH_1234', None) + info_key = get_cache_key('AUTH_1234') env = {'PATH_INFO': '/v1/AUTH_1234', 'swift.infocache': {info_key: info}} @@ -1029,7 +1029,7 @@ class TestIsNameAllowedInACL(BaseTestAuthorize): # pretend account exists info = {'status': 200, 'sysmeta': sysmeta} - _, info_key = _get_cache_key(account, None) + info_key = get_cache_key(account) req = Request.blank(path, environ={'swift.infocache': {info_key: info}}) @@ -1216,7 +1216,7 @@ class TestSetProjectDomain(BaseTestAuthorize): if sysmeta_project_domain_id: sysmeta['project-domain-id'] = sysmeta_project_domain_id info = {'status': status, 'sysmeta': sysmeta} - _, info_key = _get_cache_key(account, None) + info_key = get_cache_key(account) env = {'swift.infocache': {info_key: info}} # create fake env identity diff --git a/test/unit/common/middleware/test_quotas.py b/test/unit/common/middleware/test_quotas.py index f4eba5b76c..0bec5cad51 100644 --- a/test/unit/common/middleware/test_quotas.py +++ b/test/unit/common/middleware/test_quotas.py @@ -246,8 +246,8 @@ class ContainerQuotaCopyingTestCases(unittest.TestCase): req = Request.blank('/v1/a/c2/o2', environ={'REQUEST_METHOD': 'COPY', 'swift.infocache': { - 'swift.container/a/c': a_c_cache, - 'swift.container/a2/c': a2_c_cache}}, + 'container/a/c': a_c_cache, + 'container/a2/c': a2_c_cache}}, headers={'Destination': '/c/o', 'Destination-Account': 'a2'}) res = req.get_response(self.copy_filter) @@ -263,8 +263,8 @@ class ContainerQuotaCopyingTestCases(unittest.TestCase): req = Request.blank('/v1/a2/c/o', environ={'REQUEST_METHOD': 'PUT', 'swift.infocache': { - 'swift.container/a/c': a_c_cache, - 'swift.container/a2/c': a2_c_cache}}, + 'container/a/c': a_c_cache, + 'container/a2/c': a2_c_cache}}, headers={'X-Copy-From': '/c2/o2', 'X-Copy-From-Account': 'a'}) res = req.get_response(self.copy_filter) diff --git a/test/unit/common/middleware/test_ratelimit.py b/test/unit/common/middleware/test_ratelimit.py index 66ca43d033..0fc6a61a11 100644 --- a/test/unit/common/middleware/test_ratelimit.py +++ b/test/unit/common/middleware/test_ratelimit.py @@ -21,7 +21,7 @@ from contextlib import contextmanager from test.unit import FakeLogger from swift.common.middleware import ratelimit -from swift.proxy.controllers.base import get_container_memcache_key, \ +from swift.proxy.controllers.base import get_cache_key, \ headers_to_container_info from swift.common.memcached import MemcacheConnectionError from swift.common.swob import Request @@ -185,7 +185,7 @@ class TestRateLimit(unittest.TestCase): conf_dict = {'account_ratelimit': current_rate, 'container_ratelimit_3': 200} fake_memcache = FakeMemcache() - fake_memcache.store[get_container_memcache_key('a', 'c')] = \ + fake_memcache.store[get_cache_key('a', 'c')] = \ {'object_count': '5'} the_app = ratelimit.filter_factory(conf_dict)(FakeApp()) the_app.memcache_client = fake_memcache @@ -229,7 +229,7 @@ class TestRateLimit(unittest.TestCase): conf_dict = {'account_ratelimit': current_rate, 'container_ratelimit_3': 200} fake_memcache = FakeMemcache() - fake_memcache.store[get_container_memcache_key('a', 'c')] = \ + fake_memcache.store[get_cache_key('a', 'c')] = \ {'container_size': 5} the_app = ratelimit.filter_factory(conf_dict)(FakeApp()) the_app.memcache_client = fake_memcache @@ -431,7 +431,7 @@ class TestRateLimit(unittest.TestCase): req.method = 'PUT' req.environ['swift.cache'] = FakeMemcache() req.environ['swift.cache'].set( - get_container_memcache_key('a', 'c'), + get_cache_key('a', 'c'), {'object_count': 1}) time_override = [0, 0, 0, 0, None] @@ -465,7 +465,7 @@ class TestRateLimit(unittest.TestCase): req.method = 'GET' req.environ['swift.cache'] = FakeMemcache() req.environ['swift.cache'].set( - get_container_memcache_key('a', 'c'), + get_cache_key('a', 'c'), {'object_count': 1}) with mock.patch('swift.common.middleware.ratelimit.get_account_info', diff --git a/test/unit/common/middleware/test_tempurl.py b/test/unit/common/middleware/test_tempurl.py index fed3cbd17d..0d5ed07111 100644 --- a/test/unit/common/middleware/test_tempurl.py +++ b/test/unit/common/middleware/test_tempurl.py @@ -97,7 +97,7 @@ class TestTempURL(unittest.TestCase): meta[meta_name] = key ic = environ.setdefault('swift.infocache', {}) - ic['swift.account/' + account] = { + ic['account/' + account] = { 'status': 204, 'container_count': '0', 'total_object_count': '0', @@ -109,7 +109,7 @@ class TestTempURL(unittest.TestCase): meta_name = 'Temp-URL-key' + (("-%d" % (i + 1) if i else "")) meta[meta_name] = key - container_cache_key = 'swift.container/' + account + '/c' + container_cache_key = 'container/' + account + '/c' ic.setdefault(container_cache_key, {'meta': meta}) def test_passthrough(self): @@ -169,7 +169,7 @@ class TestTempURL(unittest.TestCase): meta_name = 'Temp-URL-key' + (("-%d" % (idx + 1) if idx else "")) if key: meta[meta_name] = key - ic['swift.container/a/c'] = {'meta': meta} + ic['container/a/c'] = {'meta': meta} method = 'GET' expires = int(time() + 86400) diff --git a/test/unit/proxy/controllers/test_account.py b/test/unit/proxy/controllers/test_account.py index 4ef77f3dab..86206f02a1 100644 --- a/test/unit/proxy/controllers/test_account.py +++ b/test/unit/proxy/controllers/test_account.py @@ -69,11 +69,10 @@ class TestAccountController(unittest.TestCase): req = Request.blank('/v1/AUTH_bob', {'PATH_INFO': '/v1/AUTH_bob'}) resp = controller.HEAD(req) self.assertEqual(2, resp.status_int // 100) - self.assertTrue( - 'swift.account/AUTH_bob' in resp.environ['swift.infocache']) + self.assertIn('account/AUTH_bob', resp.environ['swift.infocache']) self.assertEqual( headers_to_account_info(resp.headers), - resp.environ['swift.infocache']['swift.account/AUTH_bob']) + resp.environ['swift.infocache']['account/AUTH_bob']) def test_swift_owner(self): owner_headers = { @@ -228,13 +227,20 @@ class TestAccountController(unittest.TestCase): self.assertEqual(1, len(resp.headers)) # we always get Content-Type self.assertEqual(2, len(resp.environ)) - def test_memcache_key_impossible_cases(self): + def test_cache_key_impossible_cases(self): # For test coverage: verify that defensive coding does defend, in cases # that shouldn't arise naturally - self.assertRaises( - ValueError, - lambda: swift.proxy.controllers.base.get_container_memcache_key( - '/a', None)) + with self.assertRaises(ValueError): + # Container needs account + swift.proxy.controllers.base.get_cache_key(None, 'c') + + with self.assertRaises(ValueError): + # Object needs account + swift.proxy.controllers.base.get_cache_key(None, 'c', 'o') + + with self.assertRaises(ValueError): + # Object needs container + swift.proxy.controllers.base.get_cache_key('a', None, 'o') def test_stripping_swift_admin_headers(self): # Verify that a GET/HEAD which receives privileged headers from the diff --git a/test/unit/proxy/controllers/test_base.py b/test/unit/proxy/controllers/test_base.py index 07c237b599..689c6c88a8 100644 --- a/test/unit/proxy/controllers/test_base.py +++ b/test/unit/proxy/controllers/test_base.py @@ -19,8 +19,7 @@ import unittest from mock import patch from swift.proxy.controllers.base import headers_to_container_info, \ headers_to_account_info, headers_to_object_info, get_container_info, \ - get_container_memcache_key, get_account_info, get_account_memcache_key, \ - get_object_env_key, get_info, get_object_info, \ + get_cache_key, get_account_info, get_info, get_object_info, \ Controller, GetOrHeadHandler, bytes_to_skip from swift.common.swob import Request, HTTPException, RESPONSE_REASONS from swift.common import exceptions @@ -210,7 +209,6 @@ class TestFuncs(unittest.TestCase): # cached app.responses.stats['account'] = 0 app.responses.stats['container'] = 0 - del(env['swift.infocache']['swift.account/a']) info_c = get_info(app, env, 'a', 'c') # Check that you got proper info self.assertEqual(info_a['status'], 200) @@ -274,11 +272,10 @@ class TestFuncs(unittest.TestCase): self.assertEqual(resp['versions'], "\xe1\xbd\x8a\x39") def test_get_container_info_env(self): - cache_key = get_container_memcache_key("account", "cont") - env_key = 'swift.%s' % cache_key + cache_key = get_cache_key("account", "cont") req = Request.blank( "/v1/account/cont", - environ={'swift.infocache': {env_key: {'bytes': 3867}}, + environ={'swift.infocache': {cache_key: {'bytes': 3867}}, 'swift.cache': FakeCache({})}) resp = get_container_info(req.environ, 'xxx') self.assertEqual(resp['bytes'], 3867) @@ -344,11 +341,10 @@ class TestFuncs(unittest.TestCase): self.assertEqual(resp['total_object_count'], 10) def test_get_account_info_env(self): - cache_key = get_account_memcache_key("account") - env_key = 'swift.%s' % cache_key + cache_key = get_cache_key("account") req = Request.blank( "/v1/account", - environ={'swift.infocache': {env_key: {'bytes': 3867}}, + environ={'swift.infocache': {cache_key: {'bytes': 3867}}, 'swift.cache': FakeCache({})}) resp = get_account_info(req.environ, 'xxx') self.assertEqual(resp['bytes'], 3867) @@ -358,10 +354,10 @@ class TestFuncs(unittest.TestCase): 'length': 3333, 'type': 'application/json', 'meta': {}} - env_key = get_object_env_key("account", "cont", "obj") + cache_key = get_cache_key("account", "cont", "obj") req = Request.blank( "/v1/account/cont/obj", - environ={'swift.infocache': {env_key: cached}, + environ={'swift.infocache': {cache_key: cached}, 'swift.cache': FakeCache({})}) resp = get_object_info(req.environ, 'xxx') self.assertEqual(resp['length'], 3333) diff --git a/test/unit/proxy/controllers/test_container.py b/test/unit/proxy/controllers/test_container.py index fc5692d8c5..f08a90dfb1 100644 --- a/test/unit/proxy/controllers/test_container.py +++ b/test/unit/proxy/controllers/test_container.py @@ -103,11 +103,10 @@ class TestContainerController(TestRingBase): resp = controller.HEAD(req) self.assertEqual(2, resp.status_int // 100) # Make sure it's in both swift.infocache and memcache - self.assertTrue( - "swift.container/a/c" in req.environ['swift.infocache']) + self.assertIn("container/a/c", resp.environ['swift.infocache']) self.assertEqual( headers_to_container_info(resp.headers), - resp.environ['swift.infocache']['swift.container/a/c']) + resp.environ['swift.infocache']['container/a/c']) from_memcache = self.app.memcache.get('container/a/c') self.assertTrue(from_memcache) diff --git a/test/unit/proxy/controllers/test_obj.py b/test/unit/proxy/controllers/test_obj.py index a34fee34a4..35a2178c1c 100755 --- a/test/unit/proxy/controllers/test_obj.py +++ b/test/unit/proxy/controllers/test_obj.py @@ -93,7 +93,7 @@ class PatchedObjControllerApp(proxy_server.Application): # Seed the cache with our container info so that the real # get_container_info finds it. ic = env.setdefault('swift.infocache', {}) - cache_key = "swift.container/%s/%s" % (account, container) + cache_key = "container/%s/%s" % (account, container) old_value = ic.get(cache_key) diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index a2757f472e..7aac742c19 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -73,9 +73,8 @@ from swift.common.ring import RingData from swift.common.utils import mkdirs, normalize_timestamp, NullLogger from swift.common.wsgi import monkey_patch_mimetools, loadapp from swift.proxy.controllers import base as proxy_base -from swift.proxy.controllers.base import get_container_memcache_key, \ - get_account_memcache_key, cors_validation, get_account_info, \ - get_container_info +from swift.proxy.controllers.base import get_cache_key, cors_validation, \ + get_account_info, get_container_info import swift.proxy.controllers import swift.proxy.controllers.obj from swift.common.header_key_dict import HeaderKeyDict @@ -482,14 +481,14 @@ class TestController(unittest.TestCase): self.controller.account_info(self.account) self.assertEqual(count, 123) with save_globals(): - cache_key = get_account_memcache_key(self.account) + cache_key = get_cache_key(self.account) account_info = {'status': 200, 'container_count': 1234} self.memcache.set(cache_key, account_info) partition, nodes, count = \ self.controller.account_info(self.account) self.assertEqual(count, 1234) with save_globals(): - cache_key = get_account_memcache_key(self.account) + cache_key = get_cache_key(self.account) account_info = {'status': 200, 'container_count': '1234'} self.memcache.set(cache_key, account_info) partition, nodes, count = \ @@ -517,7 +516,7 @@ class TestController(unittest.TestCase): # Test the internal representation in memcache # 'container_count' changed from int to str - cache_key = get_account_memcache_key(self.account) + cache_key = get_cache_key(self.account) container_info = {'status': 200, 'account_really_exists': True, 'container_count': '12345', @@ -545,7 +544,7 @@ class TestController(unittest.TestCase): # Test the internal representation in memcache # 'container_count' changed from 0 to None - cache_key = get_account_memcache_key(self.account) + cache_key = get_cache_key(self.account) account_info = {'status': 404, 'container_count': None, # internally keep None 'total_object_count': None, @@ -622,8 +621,7 @@ class TestController(unittest.TestCase): self.account, self.container, self.request) self.check_container_info_return(ret) - cache_key = get_container_memcache_key(self.account, - self.container) + cache_key = get_cache_key(self.account, self.container) cache_value = self.memcache.get(cache_key) self.assertTrue(isinstance(cache_value, dict)) self.assertEqual(200, cache_value.get('status')) @@ -645,8 +643,7 @@ class TestController(unittest.TestCase): self.account, self.container, self.request) self.check_container_info_return(ret, True) - cache_key = get_container_memcache_key(self.account, - self.container) + cache_key = get_cache_key(self.account, self.container) cache_value = self.memcache.get(cache_key) self.assertTrue(isinstance(cache_value, dict)) self.assertEqual(404, cache_value.get('status')) @@ -661,8 +658,7 @@ class TestController(unittest.TestCase): self.account, self.container, self.request) self.check_container_info_return(ret, True) - cache_key = get_container_memcache_key(self.account, - self.container) + cache_key = get_cache_key(self.account, self.container) cache_value = self.memcache.get(cache_key) self.assertTrue(isinstance(cache_value, dict)) self.assertEqual(404, cache_value.get('status')) @@ -6351,18 +6347,18 @@ class TestContainerController(unittest.TestCase): self.assertIn('x-works', res.headers) self.assertEqual(res.headers['x-works'], 'yes') if c_expected: - self.assertIn('swift.container/a/c', infocache) + self.assertIn('container/a/c', infocache) self.assertEqual( - infocache['swift.container/a/c']['status'], + infocache['container/a/c']['status'], c_expected) else: - self.assertNotIn('swift.container/a/c', infocache) + self.assertNotIn('container/a/c', infocache) if a_expected: - self.assertIn('swift.account/a', infocache) - self.assertEqual(infocache['swift.account/a']['status'], + self.assertIn('account/a', infocache) + self.assertEqual(infocache['account/a']['status'], a_expected) else: - self.assertNotIn('swift.account/a', res.environ) + self.assertNotIn('account/a', res.environ) set_http_connect(*statuses, **kwargs) self.app.memcache.store = {} @@ -6376,18 +6372,18 @@ class TestContainerController(unittest.TestCase): self.assertTrue('x-works' in res.headers) self.assertEqual(res.headers['x-works'], 'yes') if c_expected: - self.assertIn('swift.container/a/c', infocache) + self.assertIn('container/a/c', infocache) self.assertEqual( - infocache['swift.container/a/c']['status'], + infocache['container/a/c']['status'], c_expected) else: - self.assertNotIn('swift.container/a/c', infocache) + self.assertNotIn('container/a/c', infocache) if a_expected: - self.assertIn('swift.account/a', infocache) - self.assertEqual(infocache['swift.account/a']['status'], + self.assertIn('account/a', infocache) + self.assertEqual(infocache['account/a']['status'], a_expected) else: - self.assertNotIn('swift.account/a', infocache) + self.assertNotIn('account/a', infocache) # In all the following tests cache 200 for account # return and cache vary for container # return 200 and cache 200 for account and container @@ -7000,7 +6996,7 @@ class TestContainerController(unittest.TestCase): res = controller.GET(req) self.assertEqual(res.status_int, 204) ic = res.environ['swift.infocache'] - self.assertEqual(ic['swift.container/a/c']['status'], 204) + self.assertEqual(ic['container/a/c']['status'], 204) self.assertEqual(res.content_length, 0) self.assertTrue('transfer-encoding' not in res.headers) @@ -7018,7 +7014,7 @@ class TestContainerController(unittest.TestCase): self.app.update_request(req) res = controller.GET(req) self.assertEqual( - res.environ['swift.infocache']['swift.container/a/c']['status'], + res.environ['swift.infocache']['container/a/c']['status'], 201) self.assertTrue(called[0]) @@ -7488,7 +7484,7 @@ class TestAccountController(unittest.TestCase): self.assertEqual(res.status_int, expected) infocache = res.environ.get('swift.infocache', {}) if env_expected: - self.assertEqual(infocache['swift.account/a']['status'], + self.assertEqual(infocache['account/a']['status'], env_expected) set_http_connect(*statuses) req = Request.blank('/v1/a/', {}) @@ -7497,7 +7493,7 @@ class TestAccountController(unittest.TestCase): infocache = res.environ.get('swift.infocache', {}) self.assertEqual(res.status_int, expected) if env_expected: - self.assertEqual(infocache['swift.account/a']['status'], + self.assertEqual(infocache['account/a']['status'], env_expected) def test_OPTIONS(self): From 876df35f847e607901e2ed8a344c71c607495705 Mon Sep 17 00:00:00 2001 From: Matthew Oliver Date: Tue, 17 May 2016 10:48:25 +1000 Subject: [PATCH 129/141] disable_fallocate also disables fallocate_reserve Currently when disable_fallocate is true it disables calling the fallocate syscall, but it doesn't disable fallocate_reserve. This patch fixes this. This problem has caused functional tests to fail in our SAIOs, since SAIOs have disable_fallocate set but the fallocate_reserve space free checking was still being run creating 507 responses. This is thanks to the change in fallocate_reserve default changing from 0 to 1%. Because fallocate_reserve and disable_fallocate causes SAIO functional tests to fail a section called 'Known Issues' has been added to the SAIO developer documentation which includes a warning about using fallocate_reserve on SAIOs. Change-Id: I727bfb0861ea26fe2f16ad55f4d36ae088864d8f --- doc/source/development_saio.rst | 14 ++++++++++++++ swift/common/utils.py | 25 ++++++++++++++----------- test/unit/common/test_utils.py | 13 +++++++++++++ 3 files changed, 41 insertions(+), 11 deletions(-) diff --git a/doc/source/development_saio.rst b/doc/source/development_saio.rst index bca218cad5..1d497a526f 100644 --- a/doc/source/development_saio.rst +++ b/doc/source/development_saio.rst @@ -591,3 +591,17 @@ doesn't work, here are some good starting places to look for issues: you check that you can ``GET`` account, use ``sudo service memcached status`` and check if memcache is running. If memcache is not running, start it using ``sudo service memcached start``. Once memcache is running, rerun ``GET`` account. + +------------ +Known Issues +------------ + +Listed here are some "gotcha's" that you may run into when using or testing your SAIO: + +#. fallocate_reserve - in most cases a SAIO doesn't have a very large XFS partition + so having fallocate enabled and fallocate_reserve set can cause issues, specifically + when trying to run the functional tests. For this reason fallocate has been turned + off on the object-servers in the SAIO. If you want to play with the fallocate_reserve + settings then know that functional tests will fail unless you change the max_file_size + constraint to something more reasonable then the default (5G). Ideally you'd make + it 1/4 of your XFS file system size so the tests can pass. diff --git a/swift/common/utils.py b/swift/common/utils.py index a33df51ed8..4694f5951e 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -596,7 +596,8 @@ class FileLikeIter(object): class FallocateWrapper(object): def __init__(self, noop=False): - if noop: + self.noop = noop + if self.noop: self.func_name = 'posix_fallocate' self.fallocate = noop_libc_function return @@ -614,16 +615,18 @@ class FallocateWrapper(object): def __call__(self, fd, mode, offset, length): """The length parameter must be a ctypes.c_uint64.""" - if FALLOCATE_RESERVE > 0: - st = os.fstatvfs(fd) - free = st.f_frsize * st.f_bavail - length.value - if FALLOCATE_IS_PERCENT: - free = (float(free) / float(st.f_frsize * st.f_blocks)) * 100 - if float(free) <= float(FALLOCATE_RESERVE): - raise OSError( - errno.ENOSPC, - 'FALLOCATE_RESERVE fail %s <= %s' % (free, - FALLOCATE_RESERVE)) + if not self.noop: + if FALLOCATE_RESERVE > 0: + st = os.fstatvfs(fd) + free = st.f_frsize * st.f_bavail - length.value + if FALLOCATE_IS_PERCENT: + free = \ + (float(free) / float(st.f_frsize * st.f_blocks)) * 100 + if float(free) <= float(FALLOCATE_RESERVE): + raise OSError( + errno.ENOSPC, + 'FALLOCATE_RESERVE fail %s <= %s' % + (free, FALLOCATE_RESERVE)) args = { 'fallocate': (fd, mode, offset, length), 'posix_fallocate': (fd, offset, length) diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index 9b9ffe9b14..14e826c908 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -2601,6 +2601,19 @@ cluster_dfw1 = http://dfw1.host/v1/ try: fallocate = utils.FallocateWrapper(noop=True) utils.os.fstatvfs = fstatvfs + + # Make sure setting noop, which disables fallocate, also stops the + # fallocate_reserve check. + # Set the fallocate_reserve to 99% and request an object that is + # about 50% the size. With fallocate_reserve off this will succeed. + utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ + utils.config_fallocate_value('99%') + self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(500)), 0) + + # Setting noop to False after the constructor allows us to use + # a noop fallocate syscall and still test fallocate_reserve. + fallocate.noop = False + # Want 1023 reserved, have 1024 * 1 free, so succeeds utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('1023') From 9ae3ee5a8b6aa169e1f4179cee748c721f480229 Mon Sep 17 00:00:00 2001 From: Alfredo Moralejo Date: Thu, 19 May 2016 15:56:15 +0200 Subject: [PATCH 130/141] Fix locale directory in MANIFEST.in Actual location is swift/locale, not locale. This makes build to not include .mo files if they have been created previously with compile_catalog. Change-Id: I907e9bd6dde57cb9ed718656e56a313e82bad128 Closes-Bug: 1583618 --- MANIFEST.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MANIFEST.in b/MANIFEST.in index 87eb0c9c96..4a65073dba 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -6,7 +6,7 @@ include tox.ini include requirements.txt test-requirements.txt graft doc graft etc -graft locale +graft swift/locale graft test/functional graft test/probe graft test/unit From 14eb1803e931c5c501d99e612a6d59a57eccf464 Mon Sep 17 00:00:00 2001 From: Kota Tsuyuzaki Date: Thu, 12 May 2016 23:57:49 -0700 Subject: [PATCH 131/141] Fix probe failure and small things This is follow up for https://review.openstack.org/#/c/283351/. Probe fix: - The probe in the patch now fails (sometimes success luckily) because inbound X-Timestamp is deprecated at the change, f581fccf71034818d19062593eeb52a4347bb174, so we can not use X-Timestamp to make an object with arbitrary timestamp anymore from outside of Swift. This patch makes the probe to use internal client to put the objects to make the inconsistent situation. Small things: - Enable expirer split brain test even if we have just one policy. - FAIL rather than ERROR if the object was expired incorrectly - ObjectBrainSplitter now uses the policy set at instance variable in default instead of random choice of ENABLED_POLICIES. Co-Authored-By: Alistair Coles Change-Id: I757dbb0f1906932ef5d508b48b4120f2794b3d07 --- test/probe/brain.py | 7 +- test/probe/test_object_expirer.py | 105 +++++++++++++++++++++++------- 2 files changed, 86 insertions(+), 26 deletions(-) diff --git a/test/probe/brain.py b/test/probe/brain.py index ea5c2cc5ee..9f90ed8d8b 100644 --- a/test/probe/brain.py +++ b/test/probe/brain.py @@ -142,11 +142,16 @@ class BrainSplitter(object): """ put container with next storage policy """ - policy = next(self.policies) + if policy_index is not None: policy = POLICIES.get_by_index(int(policy_index)) if not policy: raise ValueError('Unknown policy with index %s' % policy) + elif not self.policy: + policy = next(self.policies) + else: + policy = self.policy + headers = {'X-Storage-Policy': policy.name} client.put_container(self.url, self.token, self.container_name, headers=headers) diff --git a/test/probe/test_object_expirer.py b/test/probe/test_object_expirer.py index 3cfee3656b..17b4092671 100644 --- a/test/probe/test_object_expirer.py +++ b/test/probe/test_object_expirer.py @@ -19,7 +19,7 @@ import unittest from nose import SkipTest -from swift.common.internal_client import InternalClient +from swift.common.internal_client import InternalClient, UnexpectedResponse from swift.common.manager import Manager from swift.common.utils import Timestamp @@ -32,9 +32,6 @@ from swiftclient import client class TestObjectExpirer(ReplProbeTest): def setUp(self): - if len(ENABLED_POLICIES) < 2: - raise SkipTest('Need more than one policy') - self.expirer = Manager(['object-expirer']) self.expirer.start() err = self.expirer.stop() @@ -54,6 +51,9 @@ class TestObjectExpirer(ReplProbeTest): self.object_name) def test_expirer_object_split_brain(self): + if len(ENABLED_POLICIES) < 2: + raise SkipTest('Need more than one policy') + old_policy = random.choice(ENABLED_POLICIES) wrong_policy = random.choice([p for p in ENABLED_POLICIES if p != old_policy]) @@ -128,6 +128,32 @@ class TestObjectExpirer(ReplProbeTest): create_timestamp) def test_expirer_object_should_not_be_expired(self): + + # Current object-expirer checks the correctness via x-if-delete-at + # header that it can be deleted by expirer. If there are objects + # either which doesn't have x-delete-at header as metadata or which + # has different x-delete-at value from x-if-delete-at value, + # object-expirer's delete will fail as 412 PreconditionFailed. + # However, if some of the objects are in handoff nodes, the expirer + # can put the tombstone with the timestamp as same as x-delete-at and + # the object consistency will be resolved as the newer timestamp will + # be winner (in particular, overwritten case w/o x-delete-at). This + # test asserts such a situation that, at least, the overwriten object + # which have larger timestamp than the original expirered date should + # be safe. + + def put_object(headers): + # use internal client to PUT objects so that X-Timestamp in headers + # is effective + headers['Content-Length'] = '0' + path = self.client.make_path( + self.account, self.container_name, self.object_name) + try: + self.client.make_request('PUT', path, headers, (2,)) + except UnexpectedResponse as e: + self.fail( + 'Expected 201 for PUT object but got %s' % e.resp.status) + obj_brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name, 'object', self.policy) @@ -135,40 +161,69 @@ class TestObjectExpirer(ReplProbeTest): # < T(expirer_executed) # Recreated obj should be appeared in any split brain case - # T(obj_created) - first_created_at = time.time() + obj_brain.put_container() + # T(obj_deleted with x-delete-at) # object-server accepts req only if X-Delete-At is later than 'now' - delete_at = int(time.time() + 1.5) - # T(obj_recreated) - recreated_at = time.time() + 2.0 - # T(expirer_executed) - 'now' - sleep_for_expirer = 2.01 + # so here, T(obj_created) < T(obj_deleted with x-delete-at) + now = time.time() + delete_at = int(now + 2.0) + recreate_at = delete_at + 1.0 + put_object(headers={'X-Delete-At': delete_at, + 'X-Timestamp': Timestamp(now).normal}) - obj_brain.put_container(int(self.policy)) - obj_brain.put_object( - headers={'X-Delete-At': delete_at, - 'X-Timestamp': Timestamp(first_created_at).internal}) - - # some object servers stopped + # some object servers stopped to make a situation that the + # object-expirer can put tombstone in the primary nodes. obj_brain.stop_primary_half() - obj_brain.put_object( - headers={'X-Timestamp': Timestamp(recreated_at).internal, - 'X-Object-Meta-Expired': 'False'}) + + # increment the X-Timestamp explicitly + # (will be T(obj_deleted with x-delete-at) < T(obj_recreated)) + put_object(headers={'X-Object-Meta-Expired': 'False', + 'X-Timestamp': Timestamp(recreate_at).normal}) # make sure auto-created containers get in the account listing Manager(['container-updater']).once() + # sanity, the newer object is still there + try: + metadata = self.client.get_object_metadata( + self.account, self.container_name, self.object_name) + except UnexpectedResponse as e: + self.fail( + 'Expected 200 for HEAD object but got %s' % e.resp.status) + + self.assertIn('x-object-meta-expired', metadata) + # some object servers recovered obj_brain.start_primary_half() - # sleep to make sure expirer runs at the time after obj is recreated - time.sleep(sleep_for_expirer) + + # sleep until after recreated_at + while time.time() <= recreate_at: + time.sleep(0.1) + # Now, expirer runs at the time after obj is recreated self.expirer.once() - # inconsistent state of objects is recovered + + # verify that original object was deleted by expirer + obj_brain.stop_handoff_half() + try: + metadata = self.client.get_object_metadata( + self.account, self.container_name, self.object_name, + acceptable_statuses=(4,)) + except UnexpectedResponse as e: + self.fail( + 'Expected 404 for HEAD object but got %s' % e.resp.status) + obj_brain.start_handoff_half() + + # and inconsistent state of objects is recovered by replicator Manager(['object-replicator']).once() # check if you can get recreated object - metadata = self.client.get_object_metadata( - self.account, self.container_name, self.object_name) + try: + metadata = self.client.get_object_metadata( + self.account, self.container_name, self.object_name) + except UnexpectedResponse as e: + self.fail( + 'Expected 200 for HEAD object but got %s' % e.resp.status) + self.assertIn('x-object-meta-expired', metadata) From 1b7001adeb935893017ff2428291d7edf9470b6b Mon Sep 17 00:00:00 2001 From: Brian Cline Date: Sat, 21 May 2016 03:54:33 -0500 Subject: [PATCH 132/141] Improve linting for third-party middleware Currently any third-party CI doing linting of their own middlewares against the Swift codebase don't do so hot -- if they use swob's Response.status_int in any way, the linter will fail because it only gets set when status does. This patch defaults it to None in the class definition, seeing as a linter can't truly know that status_int will only be created lazily at runtime, and further, that its existence is tied to setting Response.status (via __init__ or setting the property directly). I couldn't actually find a code path within Swift where we could end up with a None when we access it; at init time, we default the status code to 200, and there don't seem to be any code paths that init with a None status code. (You might try, but you can't do that even today.) As a result, this seems to have no ill effect on anything, but is a huge help for linters. This doesn't seem to break any unit tests, func tests via v1, v2, and v3 auth, nor tempest tests via v2 or v3. Change-Id: Id28fb56bc7133b9eb59df7989a71b892fd919643 --- swift/common/swob.py | 1 + 1 file changed, 1 insertion(+) diff --git a/swift/common/swob.py b/swift/common/swob.py index f895c44f74..2ba5d5e6a4 100644 --- a/swift/common/swob.py +++ b/swift/common/swob.py @@ -1086,6 +1086,7 @@ class Response(object): content_range = _header_property('content-range') etag = _resp_etag_property() status = _resp_status_property() + status_int = None body = _resp_body_property() host_url = _host_url_property() last_modified = _datetime_property('last-modified') From d0ec1adb78b26f5c24312090796c69912e9e3da9 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Thu, 19 May 2016 19:58:56 +0100 Subject: [PATCH 133/141] Make SLO manifest copies retain correct content-type When copying an SLO manifest with multipart-manifest=get the actual manifest content-type should get copied to the destination, rather than the application/json value that is synthesised by SLO in a GET response. That way the result of a HEAD on the copied manifest is the same as a HEAD to the source, and the container listings for the two are consistent. This patch also un-skips a functional test and adds functional tests that verify this patch and also verify that etags and size also get correctly copied and updated in destination container (bug #1260446). Closes-Bug: #1260446 Closes-Bug: #1583756 Change-Id: Ie7fa82f70b3ec3ef568f5355c69f6bce460ba25d --- swift/common/middleware/slo.py | 17 +-- test/functional/tests.py | 143 ++++++++++++++++++++++-- test/unit/common/middleware/test_slo.py | 16 +-- 3 files changed, 148 insertions(+), 28 deletions(-) diff --git a/swift/common/middleware/slo.py b/swift/common/middleware/slo.py index b87c8f2984..180cd3f306 100644 --- a/swift/common/middleware/slo.py +++ b/swift/common/middleware/slo.py @@ -582,14 +582,15 @@ class SloGetContext(WSGIContext): if req.params.get('format') == 'raw': resp_iter = self.convert_segment_listing( self._response_headers, resp_iter) - new_headers = [] - for header, value in self._response_headers: - if header.lower() == 'content-type': - new_headers.append(('Content-Type', - 'application/json; charset=utf-8')) - else: - new_headers.append((header, value)) - self._response_headers = new_headers + else: + new_headers = [] + for header, value in self._response_headers: + if header.lower() == 'content-type': + new_headers.append(('Content-Type', + 'application/json; charset=utf-8')) + else: + new_headers.append((header, value)) + self._response_headers = new_headers start_response(self._response_status, self._response_headers, self._response_exc_info) diff --git a/test/functional/tests.py b/test/functional/tests.py index 2274743423..d083aa10c2 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -2875,18 +2875,26 @@ class TestSlo(Base): def test_slo_container_listing(self): # the listing object size should equal the sum of the size of the # segments, not the size of the manifest body - raise SkipTest('Only passes with object_post_as_copy=False') file_item = self.env.container.file(Utils.create_name) file_item.write( json.dumps([self.env.seg_info['seg_a']]), parms={'multipart-manifest': 'put'}) + # The container listing has the etag of the actual manifest object + # contents which we get using multipart-manifest=get. Arguably this + # should be the etag that we get when NOT using multipart-manifest=get, + # to be consistent with size and content-type. But here we at least + # verify that it remains consistent when the object is updated with a + # POST. + file_item.initialize(parms={'multipart-manifest': 'get'}) + expected_etag = file_item.etag - files = self.env.container.files(parms={'format': 'json'}) - for f_dict in files: + listing = self.env.container.files(parms={'format': 'json'}) + for f_dict in listing: if f_dict['name'] == file_item.name: self.assertEqual(1024 * 1024, f_dict['bytes']) self.assertEqual('application/octet-stream', f_dict['content_type']) + self.assertEqual(expected_etag, f_dict['hash']) break else: self.fail('Failed to find manifest file in container listing') @@ -2898,12 +2906,31 @@ class TestSlo(Base): self.assertEqual('image/jpeg', file_item.content_type) # sanity # verify that the container listing is consistent with the file - files = self.env.container.files(parms={'format': 'json'}) - for f_dict in files: + listing = self.env.container.files(parms={'format': 'json'}) + for f_dict in listing: if f_dict['name'] == file_item.name: self.assertEqual(1024 * 1024, f_dict['bytes']) self.assertEqual(file_item.content_type, f_dict['content_type']) + self.assertEqual(expected_etag, f_dict['hash']) + break + else: + self.fail('Failed to find manifest file in container listing') + + # now POST with no change to content-type + file_item.sync_metadata({'X-Object-Meta-Test': 'blah'}, + cfg={'no_content_type': True}) + file_item.initialize() + self.assertEqual('image/jpeg', file_item.content_type) # sanity + + # verify that the container listing is consistent with the file + listing = self.env.container.files(parms={'format': 'json'}) + for f_dict in listing: + if f_dict['name'] == file_item.name: + self.assertEqual(1024 * 1024, f_dict['bytes']) + self.assertEqual(file_item.content_type, + f_dict['content_type']) + self.assertEqual(expected_etag, f_dict['hash']) break else: self.fail('Failed to find manifest file in container listing') @@ -3127,17 +3154,109 @@ class TestSlo(Base): self.assertEqual(4 * 1024 * 1024 + 1, len(copied_contents)) def test_slo_copy_the_manifest(self): - file_item = self.env.container.file("manifest-abcde") - self.assertTrue(file_item.copy(self.env.container.name, - "copied-abcde-manifest-only", - parms={'multipart-manifest': 'get'})) + source = self.env.container.file("manifest-abcde") + source_contents = source.read(parms={'multipart-manifest': 'get'}) + source_json = json.loads(source_contents) + source.initialize() + self.assertEqual('application/octet-stream', source.content_type) + source.initialize(parms={'multipart-manifest': 'get'}) + source_hash = hashlib.md5() + source_hash.update(source_contents) + self.assertEqual(source_hash.hexdigest(), source.etag) + + self.assertTrue(source.copy(self.env.container.name, + "copied-abcde-manifest-only", + parms={'multipart-manifest': 'get'})) copied = self.env.container.file("copied-abcde-manifest-only") copied_contents = copied.read(parms={'multipart-manifest': 'get'}) try: - json.loads(copied_contents) + copied_json = json.loads(copied_contents) except ValueError: self.fail("COPY didn't copy the manifest (invalid json on GET)") + self.assertEqual(source_json, copied_json) + copied.initialize() + self.assertEqual('application/octet-stream', copied.content_type) + copied.initialize(parms={'multipart-manifest': 'get'}) + copied_hash = hashlib.md5() + copied_hash.update(copied_contents) + self.assertEqual(copied_hash.hexdigest(), copied.etag) + + # verify the listing metadata + listing = self.env.container.files(parms={'format': 'json'}) + names = {} + for f_dict in listing: + if f_dict['name'] in ('manifest-abcde', + 'copied-abcde-manifest-only'): + names[f_dict['name']] = f_dict + + self.assertIn('manifest-abcde', names) + actual = names['manifest-abcde'] + self.assertEqual(4 * 1024 * 1024 + 1, actual['bytes']) + self.assertEqual('application/octet-stream', actual['content_type']) + self.assertEqual(source.etag, actual['hash']) + + self.assertIn('copied-abcde-manifest-only', names) + actual = names['copied-abcde-manifest-only'] + self.assertEqual(4 * 1024 * 1024 + 1, actual['bytes']) + self.assertEqual('application/octet-stream', actual['content_type']) + self.assertEqual(copied.etag, actual['hash']) + + def test_slo_copy_the_manifest_updating_metadata(self): + source = self.env.container.file("manifest-abcde") + source.content_type = 'application/octet-stream' + source.sync_metadata({'test': 'original'}) + source_contents = source.read(parms={'multipart-manifest': 'get'}) + source_json = json.loads(source_contents) + source.initialize() + self.assertEqual('application/octet-stream', source.content_type) + source.initialize(parms={'multipart-manifest': 'get'}) + source_hash = hashlib.md5() + source_hash.update(source_contents) + self.assertEqual(source_hash.hexdigest(), source.etag) + self.assertEqual(source.metadata['test'], 'original') + + self.assertTrue( + source.copy(self.env.container.name, "copied-abcde-manifest-only", + parms={'multipart-manifest': 'get'}, + hdrs={'Content-Type': 'image/jpeg', + 'X-Object-Meta-Test': 'updated'})) + + copied = self.env.container.file("copied-abcde-manifest-only") + copied_contents = copied.read(parms={'multipart-manifest': 'get'}) + try: + copied_json = json.loads(copied_contents) + except ValueError: + self.fail("COPY didn't copy the manifest (invalid json on GET)") + self.assertEqual(source_json, copied_json) + copied.initialize() + self.assertEqual('image/jpeg', copied.content_type) + copied.initialize(parms={'multipart-manifest': 'get'}) + copied_hash = hashlib.md5() + copied_hash.update(copied_contents) + self.assertEqual(copied_hash.hexdigest(), copied.etag) + self.assertEqual(copied.metadata['test'], 'updated') + + # verify the listing metadata + listing = self.env.container.files(parms={'format': 'json'}) + names = {} + for f_dict in listing: + if f_dict['name'] in ('manifest-abcde', + 'copied-abcde-manifest-only'): + names[f_dict['name']] = f_dict + + self.assertIn('manifest-abcde', names) + actual = names['manifest-abcde'] + self.assertEqual(4 * 1024 * 1024 + 1, actual['bytes']) + self.assertEqual('application/octet-stream', actual['content_type']) + # the container listing should have the etag of the manifest contents + self.assertEqual(source.etag, actual['hash']) + + self.assertIn('copied-abcde-manifest-only', names) + actual = names['copied-abcde-manifest-only'] + self.assertEqual(4 * 1024 * 1024 + 1, actual['bytes']) + self.assertEqual('image/jpeg', actual['content_type']) + self.assertEqual(copied.etag, actual['hash']) def test_slo_copy_the_manifest_account(self): acct = self.env.conn.account_name @@ -3295,8 +3414,8 @@ class TestSlo(Base): got_body = manifest.read(parms={'multipart-manifest': 'get', 'format': 'raw'}) - self.assertEqual('application/json; charset=utf-8', - manifest.content_type) + # raw format should have the actual manifest object content-type + self.assertEqual('application/octet-stream', manifest.content_type) try: value = json.loads(got_body) except ValueError: diff --git a/test/unit/common/middleware/test_slo.py b/test/unit/common/middleware/test_slo.py index 79eaddcbf3..2a27d1c315 100644 --- a/test/unit/common/middleware/test_slo.py +++ b/test/unit/common/middleware/test_slo.py @@ -1112,7 +1112,8 @@ class TestSloGetRawManifest(SloTestCase): self.bc_etag = md5hex(_bc_manifest_json) self.app.register( 'GET', '/v1/AUTH_test/gettest/manifest-bc', - swob.HTTPOk, {'Content-Type': 'application/json;swift_bytes=35', + # proxy obj controller removes swift_bytes from content-type + swob.HTTPOk, {'Content-Type': 'text/plain', 'X-Static-Large-Object': 'true', 'X-Object-Meta-Plant': 'Ficus', 'Etag': md5hex(_bc_manifest_json)}, @@ -1127,7 +1128,8 @@ class TestSloGetRawManifest(SloTestCase): 'content_type': 'text/plain', 'range': '100-200'}]) self.app.register( 'GET', '/v1/AUTH_test/gettest/manifest-bc-r', - swob.HTTPOk, {'Content-Type': 'application/json;swift_bytes=25', + # proxy obj controller removes swift_bytes from content-type + swob.HTTPOk, {'Content-Type': 'text/plain', 'X-Static-Large-Object': 'true', 'X-Object-Meta-Plant': 'Ficus', 'Etag': md5hex(_bc_manifest_json_ranges)}, @@ -1144,9 +1146,8 @@ class TestSloGetRawManifest(SloTestCase): self.assertEqual(status, '200 OK') self.assertTrue(('Etag', self.bc_etag) in headers, headers) self.assertTrue(('X-Static-Large-Object', 'true') in headers, headers) - self.assertTrue( - ('Content-Type', 'application/json; charset=utf-8') in headers, - headers) + # raw format should return the actual manifest object content-type + self.assertIn(('Content-Type', 'text/plain'), headers) try: resp_data = json.loads(body) @@ -1172,9 +1173,8 @@ class TestSloGetRawManifest(SloTestCase): status, headers, body = self.call_slo(req) self.assertEqual(status, '200 OK') - self.assertTrue( - ('Content-Type', 'application/json; charset=utf-8') in headers, - headers) + # raw format should return the actual manifest object content-type + self.assertIn(('Content-Type', 'text/plain'), headers) try: resp_data = json.loads(body) except ValueError: From 7b706926a8ed5bbcec3a678e868e301c9a6ed8f1 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Mon, 23 May 2016 15:20:06 +0100 Subject: [PATCH 134/141] Fix setup of manifest responses in SLO tests The swift_bytes param is removed from the content-type in the proxy object controller, so the SLO unit tests should not be registering GET responses with FakeSwift that have swift_bytes appended to the content-type. Nor should submanifest segment dicts have swift_bytes appended to their content-type values. Also adds a test for the object controller and container server handling of SLO swift_bytes. Change-Id: Icf9bd87eee25002c8d9728b16e60c8347060f320 --- test/unit/common/middleware/test_slo.py | 79 +++++++++++++++++++------ test/unit/container/test_server.py | 42 +++++++++++++ test/unit/proxy/controllers/test_obj.py | 14 +++++ 3 files changed, 116 insertions(+), 19 deletions(-) diff --git a/test/unit/common/middleware/test_slo.py b/test/unit/common/middleware/test_slo.py index 2a27d1c315..0435ae5544 100644 --- a/test/unit/common/middleware/test_slo.py +++ b/test/unit/common/middleware/test_slo.py @@ -349,7 +349,7 @@ class TestSloPutManifest(SloTestCase): 'GET', '/v1/AUTH_test/checktest/slob', swob.HTTPOk, {'X-Static-Large-Object': 'true', 'Etag': 'slob-etag', - 'Content-Type': 'cat/picture;swift_bytes=12345', + 'Content-Type': 'cat/picture', 'Content-Length': len(_manifest_json)}, _manifest_json) @@ -1106,7 +1106,7 @@ class TestSloGetRawManifest(SloTestCase): 'last_modified': '1970-01-01T00:00:00.000000'}, {'name': '/gettest/d_10', 'hash': md5hex(md5hex("e" * 5) + md5hex("f" * 5)), 'bytes': '10', - 'content_type': 'application/json;swift_bytes=10', + 'content_type': 'application/json', 'sub_slo': True, 'last_modified': '1970-01-01T00:00:00.000000'}]) self.bc_etag = md5hex(_bc_manifest_json) @@ -1262,7 +1262,7 @@ class TestSloGetManifest(SloTestCase): 'content_type': 'text/plain'}]) self.app.register( 'GET', '/v1/AUTH_test/gettest/manifest-bc', - swob.HTTPOk, {'Content-Type': 'application/json;swift_bytes=25', + swob.HTTPOk, {'Content-Type': 'application/json', 'X-Static-Large-Object': 'true', 'X-Object-Meta-Plant': 'Ficus', 'Etag': md5hex(_bc_manifest_json)}, @@ -1272,9 +1272,9 @@ class TestSloGetManifest(SloTestCase): [{'name': '/gettest/a_5', 'hash': md5hex("a" * 5), 'content_type': 'text/plain', 'bytes': '5'}, {'name': '/gettest/manifest-bc', 'sub_slo': True, - 'content_type': 'application/json;swift_bytes=25', + 'content_type': 'application/json', 'hash': md5hex(md5hex("b" * 10) + md5hex("c" * 15)), - 'bytes': len(_bc_manifest_json)}, + 'bytes': 25}, {'name': '/gettest/d_20', 'hash': md5hex("d" * 20), 'content_type': 'text/plain', 'bytes': '20'}]) self.app.register( @@ -1284,6 +1284,34 @@ class TestSloGetManifest(SloTestCase): 'Etag': md5(_abcd_manifest_json).hexdigest()}, _abcd_manifest_json) + # A submanifest segment is created using the response headers from a + # HEAD on the submanifest. That HEAD is passed through SLO which will + # modify the response content-length to be equal to the size of the + # submanifest's large object. The swift_bytes value appended to the + # submanifest's content-type will have been removed. So the sub-slo + # segment dict that is written to the parent manifest should have the + # correct bytes and content-type values. However, if somehow the + # submanifest HEAD response wasn't modified by SLO (maybe + # historically?) and we ended up with the parent manifest sub-slo entry + # having swift_bytes appended to it's content-type and the actual + # submanifest size in its bytes field, then SLO can cope, so we create + # a deviant manifest to verify that SLO can deal with it. + _abcd_manifest_json_alt = json.dumps( + [{'name': '/gettest/a_5', 'hash': md5hex("a" * 5), + 'content_type': 'text/plain', 'bytes': '5'}, + {'name': '/gettest/manifest-bc', 'sub_slo': True, + 'content_type': 'application/json; swift_bytes=25', + 'hash': md5hex(md5hex("b" * 10) + md5hex("c" * 15)), + 'bytes': len(_bc_manifest_json)}, + {'name': '/gettest/d_20', 'hash': md5hex("d" * 20), + 'content_type': 'text/plain', 'bytes': '20'}]) + self.app.register( + 'GET', '/v1/AUTH_test/gettest/manifest-abcd-alt', + swob.HTTPOk, {'Content-Type': 'application/json', + 'X-Static-Large-Object': 'true', + 'Etag': md5(_abcd_manifest_json_alt).hexdigest()}, + _abcd_manifest_json_alt) + _abcdefghijkl_manifest_json = json.dumps( [{'name': '/gettest/a_5', 'hash': md5hex("a" * 5), 'content_type': 'text/plain', 'bytes': '5'}, @@ -1337,7 +1365,7 @@ class TestSloGetManifest(SloTestCase): self.bc_ranges_etag = md5hex(_bc_ranges_manifest_json) self.app.register( 'GET', '/v1/AUTH_test/gettest/manifest-bc-ranges', - swob.HTTPOk, {'Content-Type': 'application/json;swift_bytes=16', + swob.HTTPOk, {'Content-Type': 'application/json', 'X-Static-Large-Object': 'true', 'X-Object-Meta-Plant': 'Ficus', 'Etag': self.bc_ranges_etag}, @@ -1351,12 +1379,12 @@ class TestSloGetManifest(SloTestCase): 'content_type': 'text/plain', 'bytes': '5', 'range': '1-4'}, {'name': '/gettest/manifest-bc-ranges', 'sub_slo': True, - 'content_type': 'application/json;swift_bytes=16', + 'content_type': 'application/json', 'hash': self.bc_ranges_etag, - 'bytes': len(_bc_ranges_manifest_json), + 'bytes': 16, 'range': '8-15'}, {'name': '/gettest/manifest-bc-ranges', 'sub_slo': True, - 'content_type': 'application/json;swift_bytes=16', + 'content_type': 'application/json', 'hash': self.bc_ranges_etag, 'bytes': len(_bc_ranges_manifest_json), 'range': '0-7'}, @@ -1655,6 +1683,22 @@ class TestSloGetManifest(SloTestCase): self.assertEqual( body, 'aaaaabbbbbbbbbbcccccccccccccccdddddddddddddddddddd') + def test_get_manifest_with_submanifest_bytes_in_content_type(self): + # verify correct content-length when the sub-slo segment in the + # manifest has its actual object content-length appended as swift_bytes + # to the content-type, and the submanifest length in the bytes field. + req = Request.blank( + '/v1/AUTH_test/gettest/manifest-abcd-alt', + environ={'REQUEST_METHOD': 'GET'}) + status, headers, body = self.call_slo(req) + headers = HeaderKeyDict(headers) + + self.assertEqual(status, '200 OK') + self.assertEqual(headers['Content-Length'], '50') + self.assertEqual(headers['Etag'], '"%s"' % self.manifest_abcd_etag) + self.assertEqual( + body, 'aaaaabbbbbbbbbbcccccccccccccccdddddddddddddddddddd') + def test_range_get_manifest(self): req = Request.blank( '/v1/AUTH_test/gettest/manifest-abcd', @@ -2274,8 +2318,7 @@ class TestSloGetManifest(SloTestCase): 'hash': 'man%d' % (i + 1), 'sub_slo': True, 'bytes': len(manifest_json), - 'content_type': - 'application/json;swift_bytes=%d' % ((21 - i) * 6)}] + 'content_type': 'application/json'}] manifest_json = json.dumps(manifest_data) self.app.register( @@ -2330,9 +2373,8 @@ class TestSloGetManifest(SloTestCase): {'name': '/gettest/man%d' % (i + 1), 'hash': 'man%d' % (i + 1), 'sub_slo': True, - 'bytes': len(manifest_json), - 'content_type': - 'application/json;swift_bytes=%d' % ((10 - i) * 6)}, + 'bytes': (10 - i) * 6, + 'content_type': 'application/json'}, {'name': '/gettest/obj%d' % i, 'hash': md5hex('body%02d' % i), 'bytes': '6', @@ -2387,9 +2429,8 @@ class TestSloGetManifest(SloTestCase): {'name': '/gettest/man%d' % (i + 1), 'hash': 'man%d' % (i + 1), 'sub_slo': True, - 'bytes': len(manifest_json), - 'content_type': - 'application/json;swift_bytes=%d' % ((12 - i) * 6)}, + 'bytes': (12 - i) * 6, + 'content_type': 'application/json'}, {'name': '/gettest/obj%d' % i, 'hash': md5hex('body%02d' % i), 'bytes': '6', @@ -2479,7 +2520,7 @@ class TestSloGetManifest(SloTestCase): swob.HTTPOk, {'Content-Type': 'application/json', 'X-Static-Large-Object': 'true'}, json.dumps([{'name': '/gettest/manifest-a', 'sub_slo': True, - 'content_type': 'application/json;swift_bytes=5', + 'content_type': 'application/json', 'hash': 'manifest-a', 'bytes': '12345'}])) @@ -2497,7 +2538,7 @@ class TestSloGetManifest(SloTestCase): def test_invalid_json_submanifest(self): self.app.register( 'GET', '/v1/AUTH_test/gettest/manifest-bc', - swob.HTTPOk, {'Content-Type': 'application/json;swift_bytes=25', + swob.HTTPOk, {'Content-Type': 'application/json', 'X-Static-Large-Object': 'true', 'X-Object-Meta-Plant': 'Ficus'}, "[this {isn't (JSON") diff --git a/test/unit/container/test_server.py b/test/unit/container/test_server.py index 642c12e7b6..706f3c3366 100644 --- a/test/unit/container/test_server.py +++ b/test/unit/container/test_server.py @@ -2299,6 +2299,48 @@ class TestContainerController(unittest.TestCase): result = [x['content_type'] for x in json.loads(resp.body)] self.assertEqual(result, [u'\u2603', 'text/plain;charset="utf-8"']) + def test_swift_bytes_in_content_type(self): + # create container + req = Request.blank( + '/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', + 'HTTP_X_TIMESTAMP': '0'}) + req.get_response(self.controller) + + # regular object update + ctype = 'text/plain; charset="utf-8"' + req = Request.blank( + '/sda1/p/a/c/o1', environ={ + 'REQUEST_METHOD': 'PUT', + 'HTTP_X_TIMESTAMP': '1', 'HTTP_X_CONTENT_TYPE': ctype, + 'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 99}) + self._update_object_put_headers(req) + resp = req.get_response(self.controller) + self.assertEqual(resp.status_int, 201) + + # slo object update + ctype = 'text/plain; charset="utf-8"; swift_bytes=12345678' + req = Request.blank( + '/sda1/p/a/c/o2', environ={ + 'REQUEST_METHOD': 'PUT', + 'HTTP_X_TIMESTAMP': '1', 'HTTP_X_CONTENT_TYPE': ctype, + 'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 99}) + self._update_object_put_headers(req) + resp = req.get_response(self.controller) + self.assertEqual(resp.status_int, 201) + + # verify listing + req = Request.blank('/sda1/p/a/c?format=json', + environ={'REQUEST_METHOD': 'GET'}) + resp = req.get_response(self.controller) + listing = json.loads(resp.body) + self.assertEqual(2, len(listing)) + self.assertEqual('text/plain;charset="utf-8"', + listing[0]['content_type']) + self.assertEqual(99, listing[0]['bytes']) + self.assertEqual('text/plain;charset="utf-8"', + listing[1]['content_type']) + self.assertEqual(12345678, listing[1]['bytes']) + def test_GET_accept_not_valid(self): req = Request.blank('/sda1/p/a/c', method='PUT', headers={ 'X-Timestamp': Timestamp(0).internal}) diff --git a/test/unit/proxy/controllers/test_obj.py b/test/unit/proxy/controllers/test_obj.py index 35a2178c1c..be0893dbb2 100755 --- a/test/unit/proxy/controllers/test_obj.py +++ b/test/unit/proxy/controllers/test_obj.py @@ -747,6 +747,20 @@ class TestReplicatedObjController(BaseObjectControllerMixin, self.assertEqual(resp.status_int, 200) self.assertEqual(resp.headers['Transfer-Encoding'], 'chunked') + def _test_removes_swift_bytes(self, method): + req = swift.common.swob.Request.blank('/v1/a/c/o', method=method) + with set_http_connect( + 200, headers={'content-type': 'image/jpeg; swift_bytes=99'}): + resp = req.get_response(self.app) + self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.headers['Content-Type'], 'image/jpeg') + + def test_GET_removes_swift_bytes(self): + self._test_removes_swift_bytes('GET') + + def test_HEAD_removes_swift_bytes(self): + self._test_removes_swift_bytes('HEAD') + def test_GET_error(self): req = swift.common.swob.Request.blank('/v1/a/c/o') self.app.logger.txn_id = req.environ['swift.trans_id'] = 'my-txn-id' From 226557afc42c245e050d84162497f46341407ef7 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Thu, 19 May 2016 18:55:40 -0700 Subject: [PATCH 135/141] Turn on H703, so our translators don't punch us Change-Id: I4ce3068f79563e4d4296c6e1078bc12f0cf84c96 Related-Bug: 1559431 --- swift/common/container_sync_realms.py | 11 +++++++---- swift/common/utils.py | 3 ++- swift/common/wsgi.py | 7 ++++--- swift/container/server.py | 11 ++++++----- swift/container/sync.py | 5 +++-- swift/obj/diskfile.py | 5 +++-- swift/obj/expirer.py | 16 ++++++++++------ swift/obj/server.py | 11 ++++++----- swift/obj/updater.py | 6 +++--- tox.ini | 3 +-- 10 files changed, 45 insertions(+), 33 deletions(-) diff --git a/swift/common/container_sync_realms.py b/swift/common/container_sync_realms.py index 7b441da9de..2c4c944add 100644 --- a/swift/common/container_sync_realms.py +++ b/swift/common/container_sync_realms.py @@ -57,7 +57,8 @@ class ContainerSyncRealms(object): log_func = self.logger.debug else: log_func = self.logger.error - log_func(_('Could not load %r: %s'), self.conf_path, err) + log_func(_('Could not load %(conf)r: %(error)s') % { + 'conf': self.conf_path, 'error': err}) else: if mtime != self.conf_path_mtime: self.conf_path_mtime = mtime @@ -66,7 +67,8 @@ class ContainerSyncRealms(object): conf.read(self.conf_path) except configparser.ParsingError as err: self.logger.error( - _('Could not load %r: %s'), self.conf_path, err) + _('Could not load %(conf)r: %(error)s') + % {'conf': self.conf_path, 'error': err}) else: try: self.mtime_check_interval = conf.getint( @@ -79,8 +81,9 @@ class ContainerSyncRealms(object): now + self.mtime_check_interval except (configparser.ParsingError, ValueError) as err: self.logger.error( - _('Error in %r with mtime_check_interval: %s'), - self.conf_path, err) + _('Error in %(conf)r with ' + 'mtime_check_interval: %(error)s') + % {'conf': self.conf_path, 'error': err}) realms = {} for section in conf.sections(): realm = {} diff --git a/swift/common/utils.py b/swift/common/utils.py index 4694f5951e..1d12b96495 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -2674,7 +2674,8 @@ def validate_sync_to(value, allowed_sync_hosts, realms_conf): endpoint = realms_conf.endpoint(realm, cluster) if not endpoint: return ( - _('No cluster endpoint for %r %r') % (realm, cluster), + _('No cluster endpoint for %(realm)r %(cluster)r') + % {'realm': realm, 'cluster': cluster}, None, None, None) return ( None, diff --git a/swift/common/wsgi.py b/swift/common/wsgi.py index fdd4a203ed..6676d38358 100644 --- a/swift/common/wsgi.py +++ b/swift/common/wsgi.py @@ -196,9 +196,10 @@ def get_socket(conf): raise sleep(0.1) if not sock: - raise Exception(_('Could not bind to %s:%s ' - 'after trying for %s seconds') % ( - bind_addr[0], bind_addr[1], bind_timeout)) + raise Exception(_('Could not bind to %(addr)s:%(port)s ' + 'after trying for %(timeout)s seconds') % { + 'addr': bind_addr[0], 'port': bind_addr[1], + 'timeout': bind_timeout}) # in my experience, sockets can hang around forever without keepalive sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) diff --git a/swift/container/server.py b/swift/container/server.py index 92bb595e8f..a77dadcd22 100644 --- a/swift/container/server.py +++ b/swift/container/server.py @@ -183,11 +183,12 @@ class ContainerController(BaseStorageServer): if len(account_hosts) != len(account_devices): # This shouldn't happen unless there's a bug in the proxy, # but if there is, we want to know about it. - self.logger.error(_('ERROR Account update failed: different ' - 'numbers of hosts and devices in request: ' - '"%s" vs "%s"') % - (req.headers.get('X-Account-Host', ''), - req.headers.get('X-Account-Device', ''))) + self.logger.error(_( + 'ERROR Account update failed: different ' + 'numbers of hosts and devices in request: ' + '"%(hosts)s" vs "%(devices)s"') % { + 'hosts': req.headers.get('X-Account-Host', ''), + 'devices': req.headers.get('X-Account-Device', '')}) return HTTPBadRequest(req=req) if account_partition: diff --git a/swift/container/sync.py b/swift/container/sync.py index 8fbfe9dba3..1c94d9a679 100644 --- a/swift/container/sync.py +++ b/swift/container/sync.py @@ -237,8 +237,9 @@ class ContainerSync(Daemon): if err.errno != errno.ENOENT: raise raise SystemExit( - _('Unable to load internal client from config: %r (%s)') % - (internal_client_conf_path, err)) + _('Unable to load internal client from config: ' + '%(conf)r (%(error)s)') + % {'conf': internal_client_conf_path, 'error': err}) def run_forever(self, *args, **kwargs): """ diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py index f50ca7030e..49deb36f67 100644 --- a/swift/obj/diskfile.py +++ b/swift/obj/diskfile.py @@ -378,8 +378,9 @@ def object_audit_location_generator(devices, mount_check=True, logger=None, base, policy = split_policy_string(dir_) except PolicyError as e: if logger: - logger.warning(_('Directory %r does not map ' - 'to a valid policy (%s)') % (dir_, e)) + logger.warning(_('Directory %(directory)r does not map ' + 'to a valid policy (%(error)s)') % { + 'directory': dir_, 'error': e}) continue datadir_path = os.path.join(devices, device, dir_) diff --git a/swift/obj/expirer.py b/swift/obj/expirer.py index 42855eee77..115920dd6d 100644 --- a/swift/obj/expirer.py +++ b/swift/obj/expirer.py @@ -77,15 +77,17 @@ class ObjectExpirer(Daemon): """ if final: elapsed = time() - self.report_first_time - self.logger.info(_('Pass completed in %ds; %d objects expired') % - (elapsed, self.report_objects)) + self.logger.info(_('Pass completed in %(time)ds; ' + '%(objects)d objects expired') % { + 'time': elapsed, 'objects': self.report_objects}) dump_recon_cache({'object_expiration_pass': elapsed, 'expired_last_pass': self.report_objects}, self.rcache, self.logger) elif time() - self.report_last_time >= self.report_interval: elapsed = time() - self.report_first_time - self.logger.info(_('Pass so far %ds; %d objects expired') % - (elapsed, self.report_objects)) + self.logger.info(_('Pass so far %(time)ds; ' + '%(objects)d objects expired') % { + 'time': elapsed, 'objects': self.report_objects}) self.report_last_time = time() def iter_cont_objs_to_expire(self): @@ -168,8 +170,10 @@ class ObjectExpirer(Daemon): self.logger.debug('Run begin') containers, objects = \ self.swift.get_account_info(self.expiring_objects_account) - self.logger.info(_('Pass beginning; %s possible containers; %s ' - 'possible objects') % (containers, objects)) + self.logger.info(_('Pass beginning; ' + '%(containers)s possible containers; ' + '%(objects)s possible objects') % { + 'containers': containers, 'objects': objects}) for container, obj in self.iter_cont_objs_to_expire(): containers_to_delete.add(container) diff --git a/swift/obj/server.py b/swift/obj/server.py index e59c9fbc38..4a90c6d677 100644 --- a/swift/obj/server.py +++ b/swift/obj/server.py @@ -281,11 +281,12 @@ class ObjectController(BaseStorageServer): if len(conthosts) != len(contdevices): # This shouldn't happen unless there's a bug in the proxy, # but if there is, we want to know about it. - self.logger.error(_('ERROR Container update failed: different ' - 'numbers of hosts and devices in request: ' - '"%s" vs "%s"') % - (headers_in.get('X-Container-Host', ''), - headers_in.get('X-Container-Device', ''))) + self.logger.error(_( + 'ERROR Container update failed: different ' + 'numbers of hosts and devices in request: ' + '"%(hosts)s" vs "%(devices)s"') % { + 'hosts': headers_in.get('X-Container-Host', ''), + 'devices': headers_in.get('X-Container-Device', '')}) return if contpartition: diff --git a/swift/obj/updater.py b/swift/obj/updater.py index 9bf4ef19a3..743cf850dc 100644 --- a/swift/obj/updater.py +++ b/swift/obj/updater.py @@ -160,9 +160,9 @@ class ObjectUpdater(Daemon): try: base, policy = split_policy_string(asyncdir) except PolicyError as e: - self.logger.warning(_('Directory %r does not map ' - 'to a valid policy (%s)') % - (asyncdir, e)) + self.logger.warning(_('Directory %(directory)r does not map ' + 'to a valid policy (%(error)s)') % { + 'directory': asyncdir, 'error': e}) continue for prefix in self._listdir(async_pending): prefix_path = os.path.join(async_pending, prefix) diff --git a/tox.ini b/tox.ini index 68c1f6a0bd..1e79f67b88 100644 --- a/tox.ini +++ b/tox.ini @@ -76,8 +76,7 @@ commands = bandit -c bandit.yaml -r swift bin -n 5 -p gate # H404: multi line docstring should start without a leading new line # H405: multi line docstring summary not separated with an empty line # H501: Do not use self.__dict__ for string formatting -# H703: Multiple positional placeholders -ignore = F812,H101,H202,H233,H301,H306,H401,H403,H404,H405,H501,H703 +ignore = F812,H101,H202,H233,H301,H306,H401,H403,H404,H405,H501 exclude = .venv,.tox,dist,*egg show-source = True From e09c4ee7800e82aa09ca2f6ae375420b766182a4 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Fri, 29 Apr 2016 12:12:00 -0500 Subject: [PATCH 136/141] Allow concurrent bulk deletes Before, server-side deletes of static large objects could take a long time to complete since the proxy would wait for a response to each segment DELETE before starting the next DELETE request. Now, operators can configure a concurrency factor for the slo and bulk middlewares to allow up to N concurrent DELETE requests. By default, two DELETE requests will be allowed at a time. Note that objects and containers are now deleted in separate passes, to reduce the likelihood of 409 Conflict responses when deleting containers. Upgrade Consideration ===================== If operators have enabled the bulk or slo middlewares and would like to preserve the prior (single-threaded) DELETE behavior, they must add the following line to their [filter:slo] and [filter:bulk] proxy config sections: delete_concurrency = 1 This may be done prior to upgrading Swift. UpgradeImpact Closes-Bug: 1524454 Change-Id: I128374d74a4cef7a479b221fd15eec785cc4694a --- etc/proxy-server.conf-sample | 16 +++- swift/common/middleware/bulk.py | 114 +++++++++++++++------- swift/common/middleware/slo.py | 4 +- swift/common/utils.py | 42 ++++++++ test/unit/common/middleware/test_bulk.py | 116 +++++++++++++++++++---- test/unit/common/middleware/test_slo.py | 24 +++-- 6 files changed, 247 insertions(+), 69 deletions(-) diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample index b5cfbf873b..6a4962ff9c 100644 --- a/etc/proxy-server.conf-sample +++ b/etc/proxy-server.conf-sample @@ -618,19 +618,23 @@ use = egg:swift#bulk # max_failed_extractions = 1000 # max_deletes_per_request = 10000 # max_failed_deletes = 1000 - +# # In order to keep a connection active during a potentially long bulk request, # Swift may return whitespace prepended to the actual response body. This # whitespace will be yielded no more than every yield_frequency seconds. # yield_frequency = 10 - +# # Note: The following parameter is used during a bulk delete of objects and # their container. This would frequently fail because it is very likely # that all replicated objects have not been deleted by the time the middleware got a # successful response. It can be configured the number of retries. And the # number of seconds to wait between each retry will be 1.5**retry - # delete_container_retry_count = 0 +# +# To speed up the bulk delete process, multiple deletes may be executed in +# parallel. Avoid setting this too high, as it gives clients a force multiplier +# which may be used in DoS attacks. The suggested range is between 2 and 10. +# delete_concurrency = 2 # Note: Put after auth and staticweb in the pipeline. [filter:slo] @@ -651,6 +655,12 @@ use = egg:swift#slo # # Time limit on GET requests (seconds) # max_get_time = 86400 +# +# When deleting with ?multipart-manifest=delete, multiple deletes may be +# executed in parallel. Avoid setting this too high, as it gives clients a +# force multiplier which may be used in DoS attacks. The suggested range is +# between 2 and 10. +# delete_concurrency = 2 # Note: Put after auth and staticweb in the pipeline. # If you don't put it in the pipeline, it will be inserted for you. diff --git a/swift/common/middleware/bulk.py b/swift/common/middleware/bulk.py index 0dd4aa12b2..3c394d2f8c 100644 --- a/swift/common/middleware/bulk.py +++ b/swift/common/middleware/bulk.py @@ -201,7 +201,8 @@ from swift.common.swob import Request, HTTPBadGateway, \ HTTPCreated, HTTPBadRequest, HTTPNotFound, HTTPUnauthorized, HTTPOk, \ HTTPPreconditionFailed, HTTPRequestEntityTooLarge, HTTPNotAcceptable, \ HTTPLengthRequired, HTTPException, HTTPServerError, wsgify -from swift.common.utils import get_logger, register_swift_info +from swift.common.utils import get_logger, register_swift_info, \ + StreamingPile from swift.common import constraints from swift.common.http import HTTP_UNAUTHORIZED, HTTP_NOT_FOUND, HTTP_CONFLICT @@ -274,8 +275,9 @@ class Bulk(object): def __init__(self, app, conf, max_containers_per_extraction=10000, max_failed_extractions=1000, max_deletes_per_request=10000, - max_failed_deletes=1000, yield_frequency=10, retry_count=0, - retry_interval=1.5, logger=None): + max_failed_deletes=1000, yield_frequency=10, + delete_concurrency=2, retry_count=0, retry_interval=1.5, + logger=None): self.app = app self.logger = logger or get_logger(conf, log_route='bulk') self.max_containers = max_containers_per_extraction @@ -283,6 +285,7 @@ class Bulk(object): self.max_failed_deletes = max_failed_deletes self.max_deletes_per_request = max_deletes_per_request self.yield_frequency = yield_frequency + self.delete_concurrency = min(1000, max(1, delete_concurrency)) self.retry_count = retry_count self.retry_interval = retry_interval self.max_path_length = constraints.MAX_OBJECT_NAME_LENGTH \ @@ -397,39 +400,74 @@ class Bulk(object): objs_to_delete = self.get_objs_to_delete(req) failed_file_response = {'type': HTTPBadRequest} req.environ['eventlet.minimum_write_chunk_size'] = 0 - for obj_to_delete in objs_to_delete: - if last_yield + self.yield_frequency < time(): - separator = '\r\n\r\n' - last_yield = time() - yield ' ' - obj_name = obj_to_delete['name'] - if not obj_name: - continue - if len(failed_files) >= self.max_failed_deletes: - raise HTTPBadRequest('Max delete failures exceeded') - if obj_to_delete.get('error'): - if obj_to_delete['error']['code'] == HTTP_NOT_FOUND: - resp_dict['Number Not Found'] += 1 - else: + + def delete_filter(predicate, objs_to_delete): + for obj_to_delete in objs_to_delete: + obj_name = obj_to_delete['name'] + if not obj_name: + continue + if not predicate(obj_name): + continue + if obj_to_delete.get('error'): + if obj_to_delete['error']['code'] == HTTP_NOT_FOUND: + resp_dict['Number Not Found'] += 1 + else: + failed_files.append([ + quote(obj_name), + obj_to_delete['error']['message']]) + continue + delete_path = '/'.join(['', vrs, account, + obj_name.lstrip('/')]) + if not constraints.check_utf8(delete_path): failed_files.append([quote(obj_name), - obj_to_delete['error']['message']]) - continue - delete_path = '/'.join(['', vrs, account, - obj_name.lstrip('/')]) - if not constraints.check_utf8(delete_path): - failed_files.append([quote(obj_name), - HTTPPreconditionFailed().status]) - continue + HTTPPreconditionFailed().status]) + continue + yield (obj_name, delete_path) + + def objs_then_containers(objs_to_delete): + # process all objects first + yield delete_filter(lambda name: '/' in name.strip('/'), + objs_to_delete) + # followed by containers + yield delete_filter(lambda name: '/' not in name.strip('/'), + objs_to_delete) + + def do_delete(obj_name, delete_path): new_env = req.environ.copy() new_env['PATH_INFO'] = delete_path del(new_env['wsgi.input']) new_env['CONTENT_LENGTH'] = 0 new_env['REQUEST_METHOD'] = 'DELETE' - new_env['HTTP_USER_AGENT'] = \ - '%s %s' % (req.environ.get('HTTP_USER_AGENT'), user_agent) + new_env['HTTP_USER_AGENT'] = '%s %s' % ( + req.environ.get('HTTP_USER_AGENT'), user_agent) new_env['swift.source'] = swift_source - self._process_delete(delete_path, obj_name, new_env, resp_dict, - failed_files, failed_file_response) + delete_obj_req = Request.blank(delete_path, new_env) + return (delete_obj_req.get_response(self.app), obj_name, 0) + + with StreamingPile(self.delete_concurrency) as pile: + for names_to_delete in objs_then_containers(objs_to_delete): + for resp, obj_name, retry in pile.asyncstarmap( + do_delete, names_to_delete): + if last_yield + self.yield_frequency < time(): + separator = '\r\n\r\n' + last_yield = time() + yield ' ' + self._process_delete(resp, pile, obj_name, + resp_dict, failed_files, + failed_file_response, retry) + if len(failed_files) >= self.max_failed_deletes: + # Abort, but drain off the in-progress deletes + for resp, obj_name, retry in pile: + if last_yield + self.yield_frequency < time(): + separator = '\r\n\r\n' + last_yield = time() + yield ' ' + # Don't pass in the pile, as we shouldn't retry + self._process_delete( + resp, None, obj_name, resp_dict, + failed_files, failed_file_response, retry) + msg = 'Max delete failures exceeded' + raise HTTPBadRequest(msg) if failed_files: resp_dict['Response Status'] = \ @@ -603,10 +641,8 @@ class Bulk(object): yield separator + get_response_body( out_content_type, resp_dict, failed_files) - def _process_delete(self, delete_path, obj_name, env, resp_dict, + def _process_delete(self, resp, pile, obj_name, resp_dict, failed_files, failed_file_response, retry=0): - delete_obj_req = Request.blank(delete_path, env) - resp = delete_obj_req.get_response(self.app) if resp.status_int // 100 == 2: resp_dict['Number Deleted'] += 1 elif resp.status_int == HTTP_NOT_FOUND: @@ -614,13 +650,16 @@ class Bulk(object): elif resp.status_int == HTTP_UNAUTHORIZED: failed_files.append([quote(obj_name), HTTPUnauthorized().status]) - elif resp.status_int == HTTP_CONFLICT and \ + elif resp.status_int == HTTP_CONFLICT and pile and \ self.retry_count > 0 and self.retry_count > retry: retry += 1 sleep(self.retry_interval ** retry) - self._process_delete(delete_path, obj_name, env, resp_dict, - failed_files, failed_file_response, - retry) + delete_obj_req = Request.blank(resp.environ['PATH_INFO'], + resp.environ) + + def _retry(req, app, obj_name, retry): + return req.get_response(app), obj_name, retry + pile.spawn(_retry, delete_obj_req, self.app, obj_name, retry) else: if resp.status_int // 100 == 5: failed_file_response['type'] = HTTPBadGateway @@ -664,6 +703,8 @@ def filter_factory(global_conf, **local_conf): max_deletes_per_request = int(conf.get('max_deletes_per_request', 10000)) max_failed_deletes = int(conf.get('max_failed_deletes', 1000)) yield_frequency = int(conf.get('yield_frequency', 10)) + delete_concurrency = min(1000, max(1, int( + conf.get('delete_concurrency', 2)))) retry_count = int(conf.get('delete_container_retry_count', 0)) retry_interval = 1.5 @@ -684,6 +725,7 @@ def filter_factory(global_conf, **local_conf): max_deletes_per_request=max_deletes_per_request, max_failed_deletes=max_failed_deletes, yield_frequency=yield_frequency, + delete_concurrency=delete_concurrency, retry_count=retry_count, retry_interval=retry_interval) return bulk_filter diff --git a/swift/common/middleware/slo.py b/swift/common/middleware/slo.py index b87c8f2984..88efce6050 100644 --- a/swift/common/middleware/slo.py +++ b/swift/common/middleware/slo.py @@ -784,7 +784,9 @@ class StaticLargeObject(object): 'rate_limit_after_segment', '10')) self.rate_limit_segments_per_sec = int(self.conf.get( 'rate_limit_segments_per_sec', '1')) - self.bulk_deleter = Bulk(app, {}, logger=self.logger) + delete_concurrency = int(self.conf.get('delete_concurrency', '2')) + self.bulk_deleter = Bulk( + app, {}, delete_concurrency=delete_concurrency, logger=self.logger) def handle_multipart_get_or_head(self, req, start_response): """ diff --git a/swift/common/utils.py b/swift/common/utils.py index a33df51ed8..022c5128d1 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -2603,6 +2603,48 @@ class GreenAsyncPile(object): __next__ = next +class StreamingPile(GreenAsyncPile): + """ + Runs jobs in a pool of green threads, spawning more jobs as results are + retrieved and worker threads become available. + + When used as a context manager, has the same worker-killing properties as + :class:`ContextPool`. + """ + def __init__(self, size): + """:param size: number of worker threads to use""" + self.pool = ContextPool(size) + super(StreamingPile, self).__init__(self.pool) + + def asyncstarmap(self, func, args_iter): + """ + This is the same as :func:`itertools.starmap`, except that *func* is + executed in a separate green thread for each item, and results won't + necessarily have the same order as inputs. + """ + args_iter = iter(args_iter) + + # Initialize the pile + for args in itertools.islice(args_iter, self.pool.size): + self.spawn(func, *args) + + # Keep populating the pile as greenthreads become available + for args in args_iter: + yield next(self) + self.spawn(func, *args) + + # Drain the pile + for result in self: + yield result + + def __enter__(self): + self.pool.__enter__() + return self + + def __exit__(self, type, value, traceback): + self.pool.__exit__(type, value, traceback) + + class ModifiedParseResult(ParseResult): "Parse results class for urlparse." diff --git a/test/unit/common/middleware/test_bulk.py b/test/unit/common/middleware/test_bulk.py index 1888261629..1439b0bd2e 100644 --- a/test/unit/common/middleware/test_bulk.py +++ b/test/unit/common/middleware/test_bulk.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from collections import Counter import numbers from six.moves import urllib import unittest @@ -611,10 +612,11 @@ class TestUntar(unittest.TestCase): class TestDelete(unittest.TestCase): + conf = {'delete_concurrency': 1} # default to old single-threaded behavior def setUp(self): self.app = FakeApp() - self.bulk = bulk.filter_factory({})(self.app) + self.bulk = bulk.filter_factory(self.conf)(self.app) def tearDown(self): self.app.calls = 0 @@ -729,10 +731,10 @@ class TestDelete(unittest.TestCase): req.method = 'POST' resp_body = self.handle_delete_and_iter(req) self.assertEqual( - self.app.delete_paths, - ['/delete_works/AUTH_Acc/c/f', - '/delete_works/AUTH_Acc/c/f404', - '/delete_works/AUTH_Acc/c/%25']) + Counter(self.app.delete_paths), + Counter(['/delete_works/AUTH_Acc/c/f', + '/delete_works/AUTH_Acc/c/f404', + '/delete_works/AUTH_Acc/c/%25'])) self.assertEqual(self.app.calls, 3) resp_data = utils.json.loads(resp_body) self.assertEqual(resp_data['Number Deleted'], 2) @@ -756,19 +758,20 @@ class TestDelete(unittest.TestCase): req.method = 'POST' resp_body = self.handle_delete_and_iter(req) self.assertEqual( - self.app.delete_paths, - ['/delete_works/AUTH_Acc/c/ obj \xe2\x99\xa1', - '/delete_works/AUTH_Acc/c/ objbadutf8']) + Counter(self.app.delete_paths), + Counter(['/delete_works/AUTH_Acc/c/ obj \xe2\x99\xa1', + '/delete_works/AUTH_Acc/c/ objbadutf8'])) self.assertEqual(self.app.calls, 2) resp_data = utils.json.loads(resp_body) self.assertEqual(resp_data['Number Deleted'], 1) self.assertEqual(len(resp_data['Errors']), 2) - self.assertEqual(resp_data['Errors'], - [[urllib.parse.quote('c/ objbadutf8'), - '412 Precondition Failed'], - [urllib.parse.quote('/c/f\xdebadutf8'), - '412 Precondition Failed']]) + self.assertEqual( + Counter(map(tuple, resp_data['Errors'])), + Counter([(urllib.parse.quote('c/ objbadutf8'), + '412 Precondition Failed'), + (urllib.parse.quote('/c/f\xdebadutf8'), + '412 Precondition Failed')])) def test_bulk_delete_no_body(self): req = Request.blank('/unauth/AUTH_acc/') @@ -798,8 +801,9 @@ class TestDelete(unittest.TestCase): resp_body = self.handle_delete_and_iter(req) resp_data = utils.json.loads(resp_body) self.assertEqual( - resp_data['Errors'], - [['/c/f', '500 Internal Error'], ['c/f2', '500 Internal Error']]) + Counter(map(tuple, resp_data['Errors'])), + Counter([('/c/f', '500 Internal Error'), + ('c/f2', '500 Internal Error')])) self.assertEqual(resp_data['Response Status'], '502 Bad Gateway') def test_bulk_delete_bad_path(self): @@ -879,19 +883,91 @@ class TestDelete(unittest.TestCase): self.assertTrue('400 Bad Request' in resp_body) def test_bulk_delete_max_failures(self): - req = Request.blank('/unauth/AUTH_Acc', body='/c/f1\n/c/f2\n/c/f3', + body = '\n'.join([ + '/c/f1', '/c/f2', '/c/f3', '/c/f4', '/c/f5', '/c/f6', + ]) + req = Request.blank('/unauth/AUTH_Acc', body=body, headers={'Accept': 'application/json'}) req.method = 'POST' with patch.object(self.bulk, 'max_failed_deletes', 2): resp_body = self.handle_delete_and_iter(req) - self.assertEqual(self.app.calls, 2) + # We know there should be at least max_failed_deletes, but there + # may be more as we clean up in-progress requests. + self.assertGreaterEqual(self.app.calls, + self.bulk.max_failed_deletes) + # As we're pulling things off the pile, we: + # - get delete result, + # - process the result, + # - check max_failed_deletes, + # - spawn another delete, repeat. + # As a result, we know our app calls should be *strictly* less. + # Note this means that when delete_concurrency is one, + # self.app.calls will exactly equal self.bulk.max_failed_deletes. + self.assertLess(self.app.calls, + self.bulk.max_failed_deletes + + self.bulk.delete_concurrency) resp_data = utils.json.loads(resp_body) self.assertEqual(resp_data['Response Status'], '400 Bad Request') self.assertEqual(resp_data['Response Body'], 'Max delete failures exceeded') - self.assertEqual(resp_data['Errors'], - [['/c/f1', '401 Unauthorized'], - ['/c/f2', '401 Unauthorized']]) + self.assertIn(['/c/f1', '401 Unauthorized'], resp_data['Errors']) + self.assertIn(['/c/f2', '401 Unauthorized'], resp_data['Errors']) + + +class TestConcurrentDelete(TestDelete): + conf = {'delete_concurrency': 3} + + def test_concurrency_set(self): + self.assertEqual(self.bulk.delete_concurrency, 3) + + +class TestConfig(unittest.TestCase): + def test_defaults(self): + expected_defaults = { + 'delete_concurrency': 2, + 'max_containers': 10000, + 'max_deletes_per_request': 10000, + 'max_failed_deletes': 1000, + 'max_failed_extractions': 1000, + 'retry_count': 0, + 'retry_interval': 1.5, + 'yield_frequency': 10, + } + + filter_app = bulk.filter_factory({})(FakeApp()) + self.assertEqual(expected_defaults, {k: getattr(filter_app, k) + for k in expected_defaults}) + + filter_app = bulk.Bulk(FakeApp(), None) + self.assertEqual(expected_defaults, {k: getattr(filter_app, k) + for k in expected_defaults}) + + def test_delete_concurrency(self): + # Must be an integer + conf = {'delete_concurrency': '1.5'} + self.assertRaises(ValueError, bulk.filter_factory, conf) + + conf = {'delete_concurrency': 'asdf'} + self.assertRaises(ValueError, bulk.filter_factory, conf) + + # Will be at least one + conf = {'delete_concurrency': '-1'} + filter_app = bulk.filter_factory(conf)(FakeApp()) + self.assertEqual(1, filter_app.delete_concurrency) + + conf = {'delete_concurrency': '0'} + filter_app = bulk.filter_factory(conf)(FakeApp()) + self.assertEqual(1, filter_app.delete_concurrency) + + # But if you want to set it stupid-high, we won't stop you + conf = {'delete_concurrency': '1000'} + filter_app = bulk.filter_factory(conf)(FakeApp()) + self.assertEqual(1000, filter_app.delete_concurrency) + + # ...unless it's extra-stupid-high, in which case we cap it + conf = {'delete_concurrency': '1001'} + filter_app = bulk.filter_factory(conf)(FakeApp()) + self.assertEqual(1000, filter_app.delete_concurrency) class TestSwiftInfo(unittest.TestCase): diff --git a/test/unit/common/middleware/test_slo.py b/test/unit/common/middleware/test_slo.py index 79eaddcbf3..b87edf8b5e 100644 --- a/test/unit/common/middleware/test_slo.py +++ b/test/unit/common/middleware/test_slo.py @@ -917,15 +917,17 @@ class TestSloDeleteManifest(SloTestCase): status, headers, body = self.call_slo(req) resp_data = json.loads(body) self.assertEqual( - self.app.calls, - [('GET', '/v1/AUTH_test/deltest/' + - 'manifest-missing-submanifest?multipart-manifest=get'), - ('DELETE', '/v1/AUTH_test/deltest/a_1?multipart-manifest=delete'), - ('GET', '/v1/AUTH_test/deltest/' + - 'missing-submanifest?multipart-manifest=get'), - ('DELETE', '/v1/AUTH_test/deltest/d_3?multipart-manifest=delete'), - ('DELETE', '/v1/AUTH_test/deltest/' + - 'manifest-missing-submanifest?multipart-manifest=delete')]) + set(self.app.calls), + set([('GET', '/v1/AUTH_test/deltest/' + + 'manifest-missing-submanifest?multipart-manifest=get'), + ('DELETE', '/v1/AUTH_test/deltest/' + + 'a_1?multipart-manifest=delete'), + ('GET', '/v1/AUTH_test/deltest/' + + 'missing-submanifest?multipart-manifest=get'), + ('DELETE', '/v1/AUTH_test/deltest/' + + 'd_3?multipart-manifest=delete'), + ('DELETE', '/v1/AUTH_test/deltest/' + + 'manifest-missing-submanifest?multipart-manifest=delete')])) self.assertEqual(resp_data['Response Status'], '200 OK') self.assertEqual(resp_data['Response Body'], '') self.assertEqual(resp_data['Number Deleted'], 3) @@ -2652,6 +2654,10 @@ class TestSloBulkLogger(unittest.TestCase): slo_mware = slo.filter_factory({})('fake app') self.assertTrue(slo_mware.logger is slo_mware.bulk_deleter.logger) + def test_passes_through_concurrency(self): + slo_mware = slo.filter_factory({'delete_concurrency': 5})('fake app') + self.assertEqual(5, slo_mware.bulk_deleter.delete_concurrency) + class TestSwiftInfo(unittest.TestCase): def setUp(self): From fb7a8e9ab7596a36a6992a3a8f8c6d005a2c2829 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Tue, 24 May 2016 13:37:58 -0700 Subject: [PATCH 137/141] Add links to mitaka install guides Change-Id: I62331923751c521daded4468b5cc5f03655226bc --- doc/source/howto_installmultinode.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/doc/source/howto_installmultinode.rst b/doc/source/howto_installmultinode.rst index e45a8adb77..0e296237a2 100644 --- a/doc/source/howto_installmultinode.rst +++ b/doc/source/howto_installmultinode.rst @@ -6,6 +6,13 @@ Please refer to the latest official `OpenStack Installation Guides `_ for the most up-to-date documentation. +Object Storage installation guide for OpenStack Mitaka +------------------------------------------------------ + + * `openSUSE Leap 42.1 and SUSE Linux Enterprise Server 12 SP1 `_ + * `RHEL 7, CentOS 7 `_ + * `Ubuntu 14.04 `_ + Object Storage installation guide for OpenStack Liberty ------------------------------------------------------- From b3ab715c055283ccfea9a504d6da20741d82e7ad Mon Sep 17 00:00:00 2001 From: Matthew Oliver Date: Wed, 25 May 2016 14:35:54 +1000 Subject: [PATCH 138/141] Add ring-builder dispersion command to admin guide This change updates the admin guide to point out the dispersion command in swift-ring-builder and mentions the dispersion verbose table to make it more obvious to operators. Change-Id: I72b4c8b2d718e6063de0fdabbaf4f2b73694e0a4 --- doc/source/admin_guide.rst | 17 +++++++++++++++++ doc/source/overview_ring.rst | 4 ++++ 2 files changed, 21 insertions(+) diff --git a/doc/source/admin_guide.rst b/doc/source/admin_guide.rst index 4f87939c0c..d039caac24 100644 --- a/doc/source/admin_guide.rst +++ b/doc/source/admin_guide.rst @@ -98,6 +98,23 @@ This produces a great deal of output that is mostly useful if you are either (a) attempting to fix the ring builder, or (b) filing a bug against the ring builder. +You may notice in the rebalance output a 'dispersion' number. What this +number means is explained in :ref:`ring_dispersion` but in essence +is the percentage of partitions in the ring that have too many replicas +within a particular failure domain. You can ask 'swift-ring-builder' what +the dispersion is with:: + + swift-ring-builder dispersion + +This will give you the percentage again, if you want a detailed view of +the dispersion simply add a ``--verbose``:: + + swift-ring-builder dispersion --verbose + +This will not only display the percentage but will also display a dispersion +table that lists partition dispersion by tier. You can use this table to figure +out were you need to add capacity or to help tune an :ref:`ring_overload` value. + ----------------------- Scripting Ring Creation ----------------------- diff --git a/doc/source/overview_ring.rst b/doc/source/overview_ring.rst index 181b2f143c..321b5ac8bc 100644 --- a/doc/source/overview_ring.rst +++ b/doc/source/overview_ring.rst @@ -158,6 +158,8 @@ for the ring. This means that some partitions will have more replicas than others. For example, if a ring has 3.25 replicas, then 25% of its partitions will have four replicas, while the remaining 75% will have just three. +.. _ring_dispersion: + ********** Dispersion ********** @@ -173,6 +175,8 @@ the dispersion metric. A lower dispersion value is better, and the value can be used to find the proper value for "overload". +.. _ring_overload: + ******** Overload ******** From f1fd50723bb84c4941e949895576733f6eb67793 Mon Sep 17 00:00:00 2001 From: Christian Schwede Date: Wed, 25 May 2016 09:53:31 +0200 Subject: [PATCH 139/141] Add dispersion --verbose example to admin guide Change-Id: I5f9cacedde2a329332ccf744800b6f2453e8b28e --- doc/source/admin_guide.rst | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/doc/source/admin_guide.rst b/doc/source/admin_guide.rst index d039caac24..2ce58abc87 100644 --- a/doc/source/admin_guide.rst +++ b/doc/source/admin_guide.rst @@ -115,6 +115,40 @@ This will not only display the percentage but will also display a dispersion table that lists partition dispersion by tier. You can use this table to figure out were you need to add capacity or to help tune an :ref:`ring_overload` value. +Now let's take an example with 1 region, 3 zones and 4 devices. Each device has +the same weight, and the ``dispersion --verbose`` might show the following:: + + Dispersion is 50.000000, Balance is 0.000000, Overload is 0.00% + Required overload is 33.333333% + Worst tier is 50.000000 (r1z3) + -------------------------------------------------------------------------- + Tier Parts % Max 0 1 2 3 + -------------------------------------------------------------------------- + r1 256 0.00 3 0 0 0 256 + r1z1 192 0.00 1 64 192 0 0 + r1z1-127.0.0.1 192 0.00 1 64 192 0 0 + r1z1-127.0.0.1/sda 192 0.00 1 64 192 0 0 + r1z2 192 0.00 1 64 192 0 0 + r1z2-127.0.0.2 192 0.00 1 64 192 0 0 + r1z2-127.0.0.2/sda 192 0.00 1 64 192 0 0 + r1z3 256 50.00 1 0 128 128 0 + r1z3-127.0.0.3 256 50.00 1 0 128 128 0 + r1z3-127.0.0.3/sda 192 0.00 1 64 192 0 0 + r1z3-127.0.0.3/sdb 192 0.00 1 64 192 0 0 + + +The first line reports that there are 256 partitions with 3 copies in region 1; +and this is an expected output in this case (single region with 3 replicas) as +reported by the "Max" value. + +However, there is some inbalance in the cluster, more precisely in zone 3. The +"Max" reports a maximum of 1 copy in this zone; however 50.00% of the partitions +are storing 2 replicas in this zone (which is somewhat expected, because there +are more disks in this zone). + +You can now either add more capacity to the other zones, decrease the total +weight in zone 3 or set the overload value to 33.333333%. + ----------------------- Scripting Ring Creation ----------------------- From b52eccb3b1ea0591f0040587228d3705b5d3f68d Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Wed, 25 May 2016 11:21:25 -0700 Subject: [PATCH 140/141] Clarify overload best practices in admin guide Change-Id: Ib7c08bdeab6374771bb8e2b05053e7e16973524d --- doc/source/admin_guide.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/source/admin_guide.rst b/doc/source/admin_guide.rst index 2ce58abc87..392ccdf9dc 100644 --- a/doc/source/admin_guide.rst +++ b/doc/source/admin_guide.rst @@ -147,7 +147,8 @@ are storing 2 replicas in this zone (which is somewhat expected, because there are more disks in this zone). You can now either add more capacity to the other zones, decrease the total -weight in zone 3 or set the overload value to 33.333333%. +weight in zone 3 or set the overload to a value `greater than` 33.333333% - +only as much overload as needed will be used. ----------------------- Scripting Ring Creation From 5fe392b562de3baed080704df433fb392cb4fb31 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Nov=C3=BD?= Date: Tue, 31 May 2016 16:25:50 +0200 Subject: [PATCH 141/141] Fixed typo Change-Id: I7a35c0076360c7a23cf405189828d3c252ec6708 --- swift/cli/ringbuilder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/swift/cli/ringbuilder.py b/swift/cli/ringbuilder.py index a7a005ef9a..fa8c425409 100644 --- a/swift/cli/ringbuilder.py +++ b/swift/cli/ringbuilder.py @@ -722,7 +722,7 @@ swift-ring-builder remove [search-value ...] or -swift-ring-builder search +swift-ring-builder remove --region --zone --ip --port --replication-ip --replication-port --device --meta --weight