From 73d0f1620a269f990dbd3d2796abf27e9a05e227 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?B=C3=A9la=20Vancsics?= Date: Tue, 8 Dec 2015 10:17:08 +0100 Subject: [PATCH 01/52] Not used parameter The account variable was not used in the method. Change-Id: I8e91d7616529f33b615bc52af76bfda01141d364 --- swift/common/direct_client.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/swift/common/direct_client.py b/swift/common/direct_client.py index 2f7a1fa675..bbb1ca4b90 100644 --- a/swift/common/direct_client.py +++ b/swift/common/direct_client.py @@ -50,7 +50,7 @@ class DirectClientException(ClientException): def _get_direct_account_container(path, stype, node, part, - account, marker=None, limit=None, + marker=None, limit=None, prefix=None, delimiter=None, conn_timeout=5, response_timeout=15): """Base class for get direct account and container. @@ -113,7 +113,7 @@ def direct_get_account(node, part, account, marker=None, limit=None, """ path = '/' + account return _get_direct_account_container(path, "Account", node, part, - account, marker=marker, + marker=marker, limit=limit, prefix=prefix, delimiter=delimiter, conn_timeout=conn_timeout, @@ -189,7 +189,7 @@ def direct_get_container(node, part, account, container, marker=None, """ path = '/%s/%s' % (account, container) return _get_direct_account_container(path, "Container", node, - part, account, marker=marker, + part, marker=marker, limit=limit, prefix=prefix, delimiter=delimiter, conn_timeout=conn_timeout, From 7f636a557296ecc6ae4727700cfcf9f82573bd16 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Mon, 30 Nov 2015 18:06:09 -0800 Subject: [PATCH 02/52] Allow smaller segments in static large objects The addition of range support for SLO segments (commit 25d5e68) required the range size to be at least the SLO minimum segment size (default 1 MiB). However, if you're doing something like assembling a video of short clips out of a larger one, then you might not need a full 1 MiB. The reason for the 1 MiB restriction was to protect Swift from resource overconsumption. It takes CPU, RAM, and internal bandwidth to connect to an object server, so it's much cheaper to serve a 10 GiB SLO if it has 10 MiB segments than if it has 10 B segments. Instead of a strict limit, now we apply ratelimiting to small segments. The threshold for "small" is configurable and defaults to 1 MiB. SLO segments may now be as small as 1 byte. If a client makes SLOs as before, it'll still be able to download the objects as fast as Swift can serve them. However, a SLO with a lot of small ranges or segments will be slowed down to avoid resource overconsumption. This is similar to how DLOs work, except that DLOs ratelimit *every* segment, not just small ones. UpgradeImpact For operators: if your cluster has enabled ratelimiting for SLO, you will want to set rate_limit_under_size to a large number prior to upgrade. This will preserve your existing behavior of ratelimiting all SLO segments. 5368709123 is a good value, as that's 1 greater than the default max object size. Alternately, hold down the 9 key until you get bored. If your cluster has not enabled ratelimiting for SLO (the default), no action is needed. Change-Id: Id1ff7742308ed816038a5c44ec548afa26612b95 --- etc/proxy-server.conf-sample | 9 +- swift/common/middleware/slo.py | 59 +++-- swift/common/utils.py | 19 +- test/unit/common/middleware/helpers.py | 9 +- test/unit/common/middleware/test_slo.py | 323 ++++++++++++++++-------- test/unit/common/test_utils.py | 20 ++ 6 files changed, 287 insertions(+), 152 deletions(-) diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample index 464705d240..bd421e5030 100644 --- a/etc/proxy-server.conf-sample +++ b/etc/proxy-server.conf-sample @@ -622,14 +622,17 @@ use = egg:swift#bulk use = egg:swift#slo # max_manifest_segments = 1000 # max_manifest_size = 2097152 -# min_segment_size = 1048576 -# Start rate-limiting SLO segment serving after the Nth segment of a +# +# Rate limiting applies only to segments smaller than this size (bytes). +# rate_limit_under_size = 1048576 +# +# Start rate-limiting SLO segment serving after the Nth small segment of a # segmented object. # rate_limit_after_segment = 10 # # Once segment rate-limiting kicks in for an object, limit segments served # to N per second. 0 means no rate-limiting. -# rate_limit_segments_per_sec = 0 +# rate_limit_segments_per_sec = 1 # # Time limit on GET requests (seconds) # max_get_time = 86400 diff --git a/swift/common/middleware/slo.py b/swift/common/middleware/slo.py index 048d8b5add..37f9a1fad2 100644 --- a/swift/common/middleware/slo.py +++ b/swift/common/middleware/slo.py @@ -57,12 +57,11 @@ The format of the list will be: "range": "1048576-2097151"}, ...] The number of object segments is limited to a configurable amount, default -1000. Each segment, except for the final one, must be at least 1 megabyte -(configurable). On upload, the middleware will head every segment passed in to -verify: +1000. Each segment must be at least 1 byte. On upload, the middleware will +head every segment passed in to verify: 1. the segment exists (i.e. the HEAD was successful); - 2. the segment meets minimum size requirements (if not the last segment); + 2. the segment meets minimum size requirements; 3. if the user provided a non-null etag, the etag matches; 4. if the user provided a non-null size_bytes, the size_bytes matches; and 5. if the user provided a range, it is a singular, syntactically correct range @@ -121,8 +120,9 @@ finally bytes 2095104 through 2097152 (i.e., the last 2048 bytes) of .. note:: - The minimum sized range is min_segment_size, which by - default is 1048576 (1MB). + + The minimum sized range is 1 byte. This is the same as the minimum + segment size. ------------------------- @@ -221,7 +221,7 @@ from swift.common.middleware.bulk import get_response_body, \ ACCEPTABLE_FORMATS, Bulk -DEFAULT_MIN_SEGMENT_SIZE = 1024 * 1024 # 1 MiB +DEFAULT_RATE_LIMIT_UNDER_SIZE = 1024 * 1024 # 1 MiB DEFAULT_MAX_MANIFEST_SEGMENTS = 1000 DEFAULT_MAX_MANIFEST_SIZE = 1024 * 1024 * 2 # 2 MiB @@ -231,7 +231,7 @@ OPTIONAL_SLO_KEYS = set(['range']) ALLOWED_SLO_KEYS = REQUIRED_SLO_KEYS | OPTIONAL_SLO_KEYS -def parse_and_validate_input(req_body, req_path, min_segment_size): +def parse_and_validate_input(req_body, req_path): """ Given a request body, parses it and returns a list of dictionaries. @@ -269,7 +269,6 @@ def parse_and_validate_input(req_body, req_path, min_segment_size): vrs, account, _junk = split_path(req_path, 3, 3, True) errors = [] - num_segs = len(parsed_data) for seg_index, seg_dict in enumerate(parsed_data): if not isinstance(seg_dict, dict): errors.append("Index %d: not a JSON object" % seg_index) @@ -315,10 +314,10 @@ def parse_and_validate_input(req_body, req_path, min_segment_size): except (TypeError, ValueError): errors.append("Index %d: invalid size_bytes" % seg_index) continue - if (seg_size < min_segment_size and seg_index < num_segs - 1): - errors.append("Index %d: too small; each segment, except " - "the last, must be at least %d bytes." - % (seg_index, min_segment_size)) + if seg_size < 1: + errors.append("Index %d: too small; each segment must be " + "at least 1 byte." + % (seg_index,)) continue obj_path = '/'.join(['', vrs, account, seg_dict['path'].lstrip('/')]) @@ -662,10 +661,17 @@ class SloGetContext(WSGIContext): plain_listing_iter = self._segment_listing_iterator( req, ver, account, segments) + def is_small_segment((seg_dict, start_byte, end_byte)): + start = 0 if start_byte is None else start_byte + end = int(seg_dict['bytes']) - 1 if end_byte is None else end_byte + is_small = (end - start + 1) < self.slo.rate_limit_under_size + return is_small + ratelimited_listing_iter = RateLimitedIterator( plain_listing_iter, self.slo.rate_limit_segments_per_sec, - limit_after=self.slo.rate_limit_after_segment) + limit_after=self.slo.rate_limit_after_segment, + ratelimit_if=is_small_segment) # self._segment_listing_iterator gives us 3-tuples of (segment dict, # start byte, end byte), but SegmentedIterable wants (obj path, etag, @@ -716,7 +722,7 @@ class StaticLargeObject(object): :param conf: The configuration dict for the middleware. """ - def __init__(self, app, conf, min_segment_size=DEFAULT_MIN_SEGMENT_SIZE, + def __init__(self, app, conf, max_manifest_segments=DEFAULT_MAX_MANIFEST_SEGMENTS, max_manifest_size=DEFAULT_MAX_MANIFEST_SIZE): self.conf = conf @@ -724,12 +730,13 @@ class StaticLargeObject(object): self.logger = get_logger(conf, log_route='slo') self.max_manifest_segments = max_manifest_segments self.max_manifest_size = max_manifest_size - self.min_segment_size = min_segment_size self.max_get_time = int(self.conf.get('max_get_time', 86400)) + self.rate_limit_under_size = int(self.conf.get( + 'rate_limit_under_size', DEFAULT_RATE_LIMIT_UNDER_SIZE)) self.rate_limit_after_segment = int(self.conf.get( 'rate_limit_after_segment', '10')) self.rate_limit_segments_per_sec = int(self.conf.get( - 'rate_limit_segments_per_sec', '0')) + 'rate_limit_segments_per_sec', '1')) self.bulk_deleter = Bulk(app, {}, logger=self.logger) def handle_multipart_get_or_head(self, req, start_response): @@ -783,7 +790,7 @@ class StaticLargeObject(object): raise HTTPLengthRequired(request=req) parsed_data = parse_and_validate_input( req.body_file.read(self.max_manifest_size), - req.path, self.min_segment_size) + req.path) problem_segments = [] if len(parsed_data) > self.max_manifest_segments: @@ -812,6 +819,7 @@ class StaticLargeObject(object): new_env['CONTENT_LENGTH'] = 0 new_env['HTTP_USER_AGENT'] = \ '%s MultipartPUT' % req.environ.get('HTTP_USER_AGENT') + if obj_path != last_obj_path: last_obj_path = obj_path head_seg_resp = \ @@ -840,12 +848,10 @@ class StaticLargeObject(object): seg_dict['range'] = '%d-%d' % (rng[0], rng[1] - 1) segment_length = rng[1] - rng[0] - if segment_length < self.min_segment_size and \ - index < len(parsed_data) - 1: + if segment_length < 1: problem_segments.append( [quote(obj_name), - 'Too small; each segment, except the last, must be ' - 'at least %d bytes.' % self.min_segment_size]) + 'Too small; each segment must be at least 1 byte.']) total_size += segment_length if seg_dict['size_bytes'] is not None and \ seg_dict['size_bytes'] != head_seg_resp.content_length: @@ -1045,18 +1051,17 @@ def filter_factory(global_conf, **local_conf): DEFAULT_MAX_MANIFEST_SEGMENTS)) max_manifest_size = int(conf.get('max_manifest_size', DEFAULT_MAX_MANIFEST_SIZE)) - min_segment_size = int(conf.get('min_segment_size', - DEFAULT_MIN_SEGMENT_SIZE)) register_swift_info('slo', max_manifest_segments=max_manifest_segments, max_manifest_size=max_manifest_size, - min_segment_size=min_segment_size) + # this used to be configurable; report it as 1 for + # clients that might still care + min_segment_size=1) def slo_filter(app): return StaticLargeObject( app, conf, max_manifest_segments=max_manifest_segments, - max_manifest_size=max_manifest_size, - min_segment_size=min_segment_size) + max_manifest_size=max_manifest_size) return slo_filter diff --git a/swift/common/utils.py b/swift/common/utils.py index d6cc5d7afb..4e597d1b26 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -1041,22 +1041,27 @@ class RateLimitedIterator(object): this many elements; default is 0 (rate limit immediately) """ - def __init__(self, iterable, elements_per_second, limit_after=0): + def __init__(self, iterable, elements_per_second, limit_after=0, + ratelimit_if=lambda _junk: True): self.iterator = iter(iterable) self.elements_per_second = elements_per_second self.limit_after = limit_after self.running_time = 0 + self.ratelimit_if = ratelimit_if def __iter__(self): return self def next(self): - if self.limit_after > 0: - self.limit_after -= 1 - else: - self.running_time = ratelimit_sleep(self.running_time, - self.elements_per_second) - return next(self.iterator) + next_value = next(self.iterator) + + if self.ratelimit_if(next_value): + if self.limit_after > 0: + self.limit_after -= 1 + else: + self.running_time = ratelimit_sleep(self.running_time, + self.elements_per_second) + return next_value class GreenthreadSafeIterator(object): diff --git a/test/unit/common/middleware/helpers.py b/test/unit/common/middleware/helpers.py index bc6ad50fdd..1387a773b4 100644 --- a/test/unit/common/middleware/helpers.py +++ b/test/unit/common/middleware/helpers.py @@ -56,7 +56,7 @@ class FakeSwift(object): self.container_ring = FakeRing() self.get_object_ring = lambda policy_index: FakeRing() - def _get_response(self, method, path): + def _find_response(self, method, path): resp = self._responses[(method, path)] if isinstance(resp, list): try: @@ -84,16 +84,17 @@ class FakeSwift(object): self.swift_sources.append(env.get('swift.source')) try: - resp_class, raw_headers, body = self._get_response(method, path) + resp_class, raw_headers, body = self._find_response(method, path) headers = swob.HeaderKeyDict(raw_headers) except KeyError: if (env.get('QUERY_STRING') and (method, env['PATH_INFO']) in self._responses): - resp_class, raw_headers, body = self._get_response( + resp_class, raw_headers, body = self._find_response( method, env['PATH_INFO']) headers = swob.HeaderKeyDict(raw_headers) elif method == 'HEAD' and ('GET', path) in self._responses: - resp_class, raw_headers, body = self._get_response('GET', path) + resp_class, raw_headers, body = self._find_response( + 'GET', path) body = None headers = swob.HeaderKeyDict(raw_headers) elif method == 'GET' and obj and path in self.uploaded: diff --git a/test/unit/common/middleware/test_slo.py b/test/unit/common/middleware/test_slo.py index 32d49547d4..897bf551f9 100644 --- a/test/unit/common/middleware/test_slo.py +++ b/test/unit/common/middleware/test_slo.py @@ -55,8 +55,8 @@ def md5hex(s): class SloTestCase(unittest.TestCase): def setUp(self): self.app = FakeSwift() - self.slo = slo.filter_factory({})(self.app) - self.slo.min_segment_size = 1 + slo_conf = {'rate_limit_under_size': '0'} + self.slo = slo.filter_factory(slo_conf)(self.app) self.slo.logger = self.app.logger def call_app(self, req, app=None, expect_exception=False): @@ -120,18 +120,14 @@ class TestSloMiddleware(SloTestCase): resp.startswith('X-Static-Large-Object is a reserved header')) def _put_bogus_slo(self, manifest_text, - manifest_path='/v1/a/c/the-manifest', - min_segment_size=1): + manifest_path='/v1/a/c/the-manifest'): with self.assertRaises(HTTPException) as catcher: - slo.parse_and_validate_input(manifest_text, manifest_path, - min_segment_size) + slo.parse_and_validate_input(manifest_text, manifest_path) self.assertEqual(400, catcher.exception.status_int) return catcher.exception.body - def _put_slo(self, manifest_text, manifest_path='/v1/a/c/the-manifest', - min_segment_size=1): - return slo.parse_and_validate_input(manifest_text, manifest_path, - min_segment_size) + def _put_slo(self, manifest_text, manifest_path='/v1/a/c/the-manifest'): + return slo.parse_and_validate_input(manifest_text, manifest_path) def test_bogus_input(self): self.assertEqual('Manifest must be valid JSON.\n', @@ -248,19 +244,18 @@ class TestSloMiddleware(SloTestCase): def test_bogus_input_undersize_segment(self): self.assertEqual( - "Index 1: too small; each segment, except the last, " - "must be at least 1000 bytes.\n" - "Index 2: too small; each segment, except the last, " - "must be at least 1000 bytes.\n", + "Index 1: too small; each segment " + "must be at least 1 byte.\n" + "Index 2: too small; each segment " + "must be at least 1 byte.\n", self._put_bogus_slo( json.dumps([ - {'path': u'/c/s1', 'etag': 'a', 'size_bytes': 1000}, - {'path': u'/c/s2', 'etag': 'b', 'size_bytes': 999}, - {'path': u'/c/s3', 'etag': 'c', 'size_bytes': 998}, + {'path': u'/c/s1', 'etag': 'a', 'size_bytes': 1}, + {'path': u'/c/s2', 'etag': 'b', 'size_bytes': 0}, + {'path': u'/c/s3', 'etag': 'c', 'size_bytes': 0}, # No error for this one since size_bytes is unspecified {'path': u'/c/s4', 'etag': 'd', 'size_bytes': None}, - {'path': u'/c/s5', 'etag': 'e', 'size_bytes': 996}]), - min_segment_size=1000)) + {'path': u'/c/s5', 'etag': 'e', 'size_bytes': 1000}]))) def test_valid_input(self): data = json.dumps( @@ -268,19 +263,19 @@ class TestSloMiddleware(SloTestCase): 'size_bytes': 100}]) self.assertEqual( '/cont/object', - slo.parse_and_validate_input(data, '/v1/a/cont/man', 1)[0]['path']) + slo.parse_and_validate_input(data, '/v1/a/cont/man')[0]['path']) data = json.dumps( [{'path': '/cont/object', 'etag': 'etagoftheobjectsegment', 'size_bytes': 100, 'range': '0-40'}]) - parsed = slo.parse_and_validate_input(data, '/v1/a/cont/man', 1) + parsed = slo.parse_and_validate_input(data, '/v1/a/cont/man') self.assertEqual('/cont/object', parsed[0]['path']) self.assertEqual([(0, 40)], parsed[0]['range'].ranges) data = json.dumps( [{'path': '/cont/object', 'etag': 'etagoftheobjectsegment', 'size_bytes': None, 'range': '0-40'}]) - parsed = slo.parse_and_validate_input(data, '/v1/a/cont/man', 1) + parsed = slo.parse_and_validate_input(data, '/v1/a/cont/man') self.assertEqual('/cont/object', parsed[0]['path']) self.assertEqual(None, parsed[0]['size_bytes']) self.assertEqual([(0, 40)], parsed[0]['range'].ranges) @@ -316,6 +311,11 @@ class TestSloPutManifest(SloTestCase): swob.HTTPOk, {'Content-Length': '10', 'Etag': 'etagoftheobjectsegment'}, None) + self.app.register( + 'HEAD', '/v1/AUTH_test/cont/empty_object', + swob.HTTPOk, + {'Content-Length': '0', 'Etag': 'etagoftheobjectsegment'}, + None) self.app.register( 'HEAD', u'/v1/AUTH_test/cont/あ_1', swob.HTTPOk, @@ -340,11 +340,17 @@ class TestSloPutManifest(SloTestCase): {'Content-Length': '2', 'Etag': 'b', 'Last-Modified': 'Fri, 01 Feb 2012 20:38:36 GMT'}, None) + + _manifest_json = json.dumps( + [{'name': '/checktest/a_5', 'hash': md5hex("a" * 5), + 'content_type': 'text/plain', 'bytes': '5'}]) self.app.register( 'GET', '/v1/AUTH_test/checktest/slob', swob.HTTPOk, - {'X-Static-Large-Object': 'true', 'Etag': 'slob-etag'}, - None) + {'X-Static-Large-Object': 'true', 'Etag': 'slob-etag', + 'Content-Type': 'cat/picture;swift_bytes=12345', + 'Content-Length': len(_manifest_json)}, + _manifest_json) self.app.register( 'PUT', '/v1/AUTH_test/checktest/man_3', swob.HTTPCreated, {}, None) @@ -367,21 +373,6 @@ class TestSloPutManifest(SloTestCase): pass self.assertEqual(e.status_int, 413) - with patch.object(self.slo, 'min_segment_size', 1000): - test_json_data_2obj = json.dumps( - [{'path': '/cont/small_object1', - 'etag': 'etagoftheobjectsegment', - 'size_bytes': 10}, - {'path': '/cont/small_object2', - 'etag': 'etagoftheobjectsegment', - 'size_bytes': 10}]) - req = Request.blank('/v1/a/c/o', body=test_json_data_2obj) - try: - self.slo.handle_multipart_put(req, fake_start_response) - except HTTPException as e: - pass - self.assertEqual(e.status_int, 400) - req = Request.blank('/v1/a/c/o', headers={'X-Copy-From': 'lala'}) try: self.slo.handle_multipart_put(req, fake_start_response) @@ -411,49 +402,29 @@ class TestSloPutManifest(SloTestCase): self.slo(req.environ, my_fake_start_response) self.assertTrue('X-Static-Large-Object' in req.headers) - def test_handle_multipart_put_success_allow_small_last_segment(self): - with patch.object(self.slo, 'min_segment_size', 50): - test_json_data = json.dumps([{'path': '/cont/object', - 'etag': 'etagoftheobjectsegment', - 'size_bytes': 100}, - {'path': '/cont/small_object', - 'etag': 'etagoftheobjectsegment', - 'size_bytes': 10}]) - req = Request.blank( - '/v1/AUTH_test/c/man?multipart-manifest=put', - environ={'REQUEST_METHOD': 'PUT'}, headers={'Accept': 'test'}, - body=test_json_data) - self.assertTrue('X-Static-Large-Object' not in req.headers) - self.slo(req.environ, fake_start_response) - self.assertTrue('X-Static-Large-Object' in req.headers) + def test_handle_multipart_put_disallow_empty_first_segment(self): + test_json_data = json.dumps([{'path': '/cont/object', + 'etag': 'etagoftheobjectsegment', + 'size_bytes': 0}, + {'path': '/cont/small_object', + 'etag': 'etagoftheobjectsegment', + 'size_bytes': 100}]) + req = Request.blank('/v1/a/c/o', body=test_json_data) + with self.assertRaises(HTTPException) as catcher: + self.slo.handle_multipart_put(req, fake_start_response) + self.assertEqual(catcher.exception.status_int, 400) - def test_handle_multipart_put_success_allow_only_one_small_segment(self): - with patch.object(self.slo, 'min_segment_size', 50): - test_json_data = json.dumps([{'path': '/cont/small_object', - 'etag': 'etagoftheobjectsegment', - 'size_bytes': 10}]) - req = Request.blank( - '/v1/AUTH_test/c/man?multipart-manifest=put', - environ={'REQUEST_METHOD': 'PUT'}, headers={'Accept': 'test'}, - body=test_json_data) - self.assertTrue('X-Static-Large-Object' not in req.headers) - self.slo(req.environ, fake_start_response) - self.assertTrue('X-Static-Large-Object' in req.headers) - - def test_handle_multipart_put_disallow_small_first_segment(self): - with patch.object(self.slo, 'min_segment_size', 50): - test_json_data = json.dumps([{'path': '/cont/object', - 'etag': 'etagoftheobjectsegment', - 'size_bytes': 10}, - {'path': '/cont/small_object', - 'etag': 'etagoftheobjectsegment', - 'size_bytes': 100}]) - req = Request.blank('/v1/a/c/o', body=test_json_data) - try: - self.slo.handle_multipart_put(req, fake_start_response) - except HTTPException as e: - pass - self.assertEqual(e.status_int, 400) + def test_handle_multipart_put_disallow_empty_last_segment(self): + test_json_data = json.dumps([{'path': '/cont/object', + 'etag': 'etagoftheobjectsegment', + 'size_bytes': 100}, + {'path': '/cont/small_object', + 'etag': 'etagoftheobjectsegment', + 'size_bytes': 0}]) + req = Request.blank('/v1/a/c/o', body=test_json_data) + with self.assertRaises(HTTPException) as catcher: + self.slo.handle_multipart_put(req, fake_start_response) + self.assertEqual(catcher.exception.status_int, 400) def test_handle_multipart_put_success_unicode(self): test_json_data = json.dumps([{'path': u'/cont/object\u2661', @@ -543,7 +514,7 @@ class TestSloPutManifest(SloTestCase): {'path': '/checktest/badreq', 'etag': 'a', 'size_bytes': '1'}, {'path': '/checktest/b_2', 'etag': 'not-b', 'size_bytes': '2'}, {'path': '/checktest/slob', 'etag': 'not-slob', - 'size_bytes': '2'}]) + 'size_bytes': '12345'}]) req = Request.blank( '/v1/AUTH_test/checktest/man?multipart-manifest=put', environ={'REQUEST_METHOD': 'PUT'}, @@ -553,6 +524,7 @@ class TestSloPutManifest(SloTestCase): status, headers, body = self.call_slo(req) self.assertEqual(self.app.call_count, 5) errors = json.loads(body)['Errors'] + self.assertEqual(len(errors), 5) self.assertEqual(errors[0][0], '/checktest/a_1') self.assertEqual(errors[0][1], 'Size Mismatch') @@ -587,35 +559,33 @@ class TestSloPutManifest(SloTestCase): self.assertEqual(2, manifest_data[1]['bytes']) def test_handle_multipart_put_skip_size_check_still_uses_min_size(self): - with patch.object(self.slo, 'min_segment_size', 50): - test_json_data = json.dumps([{'path': '/cont/small_object', - 'etag': 'etagoftheobjectsegment', - 'size_bytes': None}, - {'path': '/cont/small_object', - 'etag': 'etagoftheobjectsegment', - 'size_bytes': 100}]) - req = Request.blank('/v1/AUTH_test/c/o', body=test_json_data) - with self.assertRaises(HTTPException) as cm: - self.slo.handle_multipart_put(req, fake_start_response) - self.assertEqual(cm.exception.status_int, 400) + test_json_data = json.dumps([{'path': '/cont/empty_object', + 'etag': 'etagoftheobjectsegment', + 'size_bytes': None}, + {'path': '/cont/small_object', + 'etag': 'etagoftheobjectsegment', + 'size_bytes': 100}]) + req = Request.blank('/v1/AUTH_test/c/o', body=test_json_data) + with self.assertRaises(HTTPException) as cm: + self.slo.handle_multipart_put(req, fake_start_response) + self.assertEqual(cm.exception.status_int, 400) def test_handle_multipart_put_skip_size_check_no_early_bailout(self): - with patch.object(self.slo, 'min_segment_size', 50): - # The first is too small (it's 10 bytes but min size is 50), and - # the second has a bad etag. Make sure both errors show up in - # the response. - test_json_data = json.dumps([{'path': '/cont/small_object', - 'etag': 'etagoftheobjectsegment', - 'size_bytes': None}, - {'path': '/cont/object2', - 'etag': 'wrong wrong wrong', - 'size_bytes': 100}]) - req = Request.blank('/v1/AUTH_test/c/o', body=test_json_data) - with self.assertRaises(HTTPException) as cm: - self.slo.handle_multipart_put(req, fake_start_response) - self.assertEqual(cm.exception.status_int, 400) - self.assertIn('at least 50 bytes', cm.exception.body) - self.assertIn('Etag Mismatch', cm.exception.body) + # The first is too small (it's 0 bytes), and + # the second has a bad etag. Make sure both errors show up in + # the response. + test_json_data = json.dumps([{'path': '/cont/empty_object', + 'etag': 'etagoftheobjectsegment', + 'size_bytes': None}, + {'path': '/cont/object2', + 'etag': 'wrong wrong wrong', + 'size_bytes': 100}]) + req = Request.blank('/v1/AUTH_test/c/o', body=test_json_data) + with self.assertRaises(HTTPException) as cm: + self.slo.handle_multipart_put(req, fake_start_response) + self.assertEqual(cm.exception.status_int, 400) + self.assertIn('at least 1 byte', cm.exception.body) + self.assertIn('Etag Mismatch', cm.exception.body) def test_handle_multipart_put_skip_etag_check(self): good_data = json.dumps( @@ -1126,6 +1096,46 @@ class TestSloGetManifest(SloTestCase): swob.HTTPOk, {'Content-Length': '20', 'Etag': md5hex('d' * 20)}, 'd' * 20) + self.app.register( + 'GET', '/v1/AUTH_test/gettest/e_25', + swob.HTTPOk, {'Content-Length': '25', + 'Etag': md5hex('e' * 25)}, + 'e' * 25) + self.app.register( + 'GET', '/v1/AUTH_test/gettest/f_30', + swob.HTTPOk, {'Content-Length': '30', + 'Etag': md5hex('f' * 30)}, + 'f' * 30) + self.app.register( + 'GET', '/v1/AUTH_test/gettest/g_35', + swob.HTTPOk, {'Content-Length': '35', + 'Etag': md5hex('g' * 35)}, + 'g' * 35) + self.app.register( + 'GET', '/v1/AUTH_test/gettest/h_40', + swob.HTTPOk, {'Content-Length': '40', + 'Etag': md5hex('h' * 40)}, + 'h' * 40) + self.app.register( + 'GET', '/v1/AUTH_test/gettest/i_45', + swob.HTTPOk, {'Content-Length': '45', + 'Etag': md5hex('i' * 45)}, + 'i' * 45) + self.app.register( + 'GET', '/v1/AUTH_test/gettest/j_50', + swob.HTTPOk, {'Content-Length': '50', + 'Etag': md5hex('j' * 50)}, + 'j' * 50) + self.app.register( + 'GET', '/v1/AUTH_test/gettest/k_55', + swob.HTTPOk, {'Content-Length': '55', + 'Etag': md5hex('k' * 55)}, + 'k' * 55) + self.app.register( + 'GET', '/v1/AUTH_test/gettest/l_60', + swob.HTTPOk, {'Content-Length': '60', + 'Etag': md5hex('l' * 60)}, + 'l' * 60) _bc_manifest_json = json.dumps( [{'name': '/gettest/b_10', 'hash': md5hex('b' * 10), 'bytes': '10', @@ -1156,6 +1166,39 @@ class TestSloGetManifest(SloTestCase): 'Etag': md5(_abcd_manifest_json).hexdigest()}, _abcd_manifest_json) + _abcdefghijkl_manifest_json = json.dumps( + [{'name': '/gettest/a_5', 'hash': md5hex("a" * 5), + 'content_type': 'text/plain', 'bytes': '5'}, + {'name': '/gettest/b_10', 'hash': md5hex("b" * 10), + 'content_type': 'text/plain', 'bytes': '10'}, + {'name': '/gettest/c_15', 'hash': md5hex("c" * 15), + 'content_type': 'text/plain', 'bytes': '15'}, + {'name': '/gettest/d_20', 'hash': md5hex("d" * 20), + 'content_type': 'text/plain', 'bytes': '20'}, + {'name': '/gettest/e_25', 'hash': md5hex("e" * 25), + 'content_type': 'text/plain', 'bytes': '25'}, + {'name': '/gettest/f_30', 'hash': md5hex("f" * 30), + 'content_type': 'text/plain', 'bytes': '30'}, + {'name': '/gettest/g_35', 'hash': md5hex("g" * 35), + 'content_type': 'text/plain', 'bytes': '35'}, + {'name': '/gettest/h_40', 'hash': md5hex("h" * 40), + 'content_type': 'text/plain', 'bytes': '40'}, + {'name': '/gettest/i_45', 'hash': md5hex("i" * 45), + 'content_type': 'text/plain', 'bytes': '45'}, + {'name': '/gettest/j_50', 'hash': md5hex("j" * 50), + 'content_type': 'text/plain', 'bytes': '50'}, + {'name': '/gettest/k_55', 'hash': md5hex("k" * 55), + 'content_type': 'text/plain', 'bytes': '55'}, + {'name': '/gettest/l_60', 'hash': md5hex("l" * 60), + 'content_type': 'text/plain', 'bytes': '60'}]) + self.app.register( + 'GET', '/v1/AUTH_test/gettest/manifest-abcdefghijkl', + swob.HTTPOk, { + 'Content-Type': 'application/json', + 'X-Static-Large-Object': 'true', + 'Etag': md5(_abcdefghijkl_manifest_json).hexdigest()}, + _abcdefghijkl_manifest_json) + self.manifest_abcd_etag = md5hex( md5hex("a" * 5) + md5hex(md5hex("b" * 10) + md5hex("c" * 15)) + md5hex("d" * 20)) @@ -1361,6 +1404,65 @@ class TestSloGetManifest(SloTestCase): 'bytes=0-14,0-14', 'bytes=0-19,0-19']) + def test_get_manifest_ratelimiting(self): + req = Request.blank( + '/v1/AUTH_test/gettest/manifest-abcdefghijkl', + environ={'REQUEST_METHOD': 'GET'}) + + the_time = [time.time()] + sleeps = [] + + def mock_time(): + return the_time[0] + + def mock_sleep(duration): + sleeps.append(duration) + the_time[0] += duration + + with patch('time.time', mock_time), \ + patch('eventlet.sleep', mock_sleep), \ + patch.object(self.slo, 'rate_limit_under_size', 999999999), \ + patch.object(self.slo, 'rate_limit_after_segment', 0): + status, headers, body = self.call_slo(req) + + self.assertEqual(status, '200 OK') # sanity check + self.assertEqual(sleeps, [2.0, 2.0, 2.0, 2.0, 2.0]) + + # give the client the first 4 segments without ratelimiting; we'll + # sleep less + del sleeps[:] + with patch('time.time', mock_time), \ + patch('eventlet.sleep', mock_sleep), \ + patch.object(self.slo, 'rate_limit_under_size', 999999999), \ + patch.object(self.slo, 'rate_limit_after_segment', 4): + status, headers, body = self.call_slo(req) + + self.assertEqual(status, '200 OK') # sanity check + self.assertEqual(sleeps, [2.0, 2.0, 2.0]) + + # ratelimit segments under 35 bytes; this affects a-f + del sleeps[:] + with patch('time.time', mock_time), \ + patch('eventlet.sleep', mock_sleep), \ + patch.object(self.slo, 'rate_limit_under_size', 35), \ + patch.object(self.slo, 'rate_limit_after_segment', 0): + status, headers, body = self.call_slo(req) + + self.assertEqual(status, '200 OK') # sanity check + self.assertEqual(sleeps, [2.0, 2.0]) + + # ratelimit segments under 36 bytes; this now affects a-g, netting + # us one more sleep than before + del sleeps[:] + with patch('time.time', mock_time), \ + patch('eventlet.sleep', mock_sleep), \ + patch.object(self.slo, 'rate_limit_under_size', 36), \ + patch.object(self.slo, 'rate_limit_after_segment', 0): + status, headers, body = self.call_slo(req) + + self.assertEqual(status, '200 OK') # sanity check + self.assertEqual(sleeps, [2.0, 2.0, 2.0]) + def test_if_none_match_matches(self): req = Request.blank( '/v1/AUTH_test/gettest/manifest-abcd', @@ -2446,8 +2548,7 @@ class TestSwiftInfo(unittest.TestCase): self.assertTrue('slo' in swift_info) self.assertEqual(swift_info['slo'].get('max_manifest_segments'), mware.max_manifest_segments) - self.assertEqual(swift_info['slo'].get('min_segment_size'), - mware.min_segment_size) + self.assertEqual(swift_info['slo'].get('min_segment_size'), 1) self.assertEqual(swift_info['slo'].get('max_manifest_size'), mware.max_manifest_size) diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index 1de31aa438..3923b35f8a 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -3902,6 +3902,26 @@ class TestRateLimitedIterator(unittest.TestCase): # first element. self.assertEqual(len(got), 11) + def test_rate_limiting_sometimes(self): + + def testfunc(): + limited_iterator = utils.RateLimitedIterator( + range(9999), 100, + ratelimit_if=lambda item: item % 23 != 0) + got = [] + started_at = time.time() + try: + while time.time() - started_at < 0.5: + got.append(next(limited_iterator)) + except StopIteration: + pass + return got + + got = self.run_under_pseudo_time(testfunc) + # we'd get 51 without the ratelimit_if, but because 0, 23 and 46 + # weren't subject to ratelimiting, we get 54 instead + self.assertEqual(len(got), 54) + def test_limit_after(self): def testfunc(): From 211758f8cb02298fe16e59bf2954a146c6b24b83 Mon Sep 17 00:00:00 2001 From: Catherine Northcott Date: Thu, 5 Nov 2015 23:04:14 +1300 Subject: [PATCH 03/52] Add support for storage policies to have more than one name This patch alters storage_policy.py to allow storage policies to have multiple names. Now users are able to add a number of human-readable aliases for storage policies. Policies now have a .name (the default name), .aliases (a string of comma seperated aliases), and .aliases_list (a list of all human readable names). Policies will always have an .aliases value if no aliases are set it will contain the default name. The policy docs and tests have been updated to reflect changes and policy.get_policy_info has been altered to display the name and aliases Change-Id: I02967ca8d7c790595e5ee551581196aa64552eea --- doc/source/overview_policies.rst | 26 ++- doc/source/policies_saio.rst | 4 +- etc/swift.conf-sample | 15 +- swift/common/storage_policy.py | 199 ++++++++++++++++--- test/unit/common/test_storage_policy.py | 245 ++++++++++++++++++++++-- 5 files changed, 435 insertions(+), 54 deletions(-) mode change 100644 => 100755 doc/source/policies_saio.rst mode change 100644 => 100755 etc/swift.conf-sample mode change 100644 => 100755 swift/common/storage_policy.py mode change 100644 => 100755 test/unit/common/test_storage_policy.py diff --git a/doc/source/overview_policies.rst b/doc/source/overview_policies.rst index 06c7fc79a2..9ae2dcb468 100755 --- a/doc/source/overview_policies.rst +++ b/doc/source/overview_policies.rst @@ -57,7 +57,7 @@ deployers. Each container has a new special immutable metadata element called the storage policy index. Note that internally, Swift relies on policy indexes and not policy names. Policy names exist for human readability and translation is managed in the proxy. When a container is created, one new -optional header is supported to specify the policy name. If nothing is +optional header is supported to specify the policy name. If no name is specified, the default policy is used (and if no other policies defined, Policy-0 is considered the default). We will be covering the difference between default and Policy-0 in the next section. @@ -170,12 +170,13 @@ Storage Policies is a versatile feature intended to support both new and pre-existing clusters with the same level of flexibility. For that reason, we introduce the ``Policy-0`` concept which is not the same as the "default" policy. As you will see when we begin to configure policies, each policy has -both a name (human friendly, configurable) as well as an index (or simply -policy number). Swift reserves index 0 to map to the object ring that's -present in all installations (e.g., ``/etc/swift/object.ring.gz``). You can -name this policy anything you like, and if no policies are defined it will -report itself as ``Policy-0``, however you cannot change the index as there must -always be a policy with index 0. +a single name and an arbitrary number of aliases (human friendly, +configurable) as well as an index (or simply policy number). Swift reserves +index 0 to map to the object ring that's present in all installations +(e.g., ``/etc/swift/object.ring.gz``). You can name this policy anything you +like, and if no policies are defined it will report itself as ``Policy-0``, +however you cannot change the index as there must always be a policy with +index 0. Another important concept is the default policy which can be any policy in the cluster. The default policy is the policy that is automatically @@ -273,6 +274,8 @@ file: * Policy names must contain only letters, digits or a dash * Policy names must be unique * The policy name 'Policy-0' can only be used for the policy with index 0 + * Multiple names can be assigned to one policy using aliases. All names + must follow the Swift naming rules. * If any policies are defined, exactly one policy must be declared default * Deprecated policies cannot be declared the default * If no ``policy_type`` is provided, ``replication`` is the default value. @@ -288,6 +291,7 @@ example configuration.:: [storage-policy:0] name = gold + aliases = yellow, orange policy_type = replication default = yes @@ -301,8 +305,10 @@ information about the ``default`` and ``deprecated`` options. There are some other considerations when managing policies: - * Policy names can be changed (but be sure that users are aware, aliases are - not currently supported but could be implemented in custom middleware!) + * Policy names can be changed. + * Aliases are supported and can be added and removed. If the primary name + of a policy is removed the next available alias will be adopted as the + primary name. A policy must always have at least one name. * You cannot change the index of a policy once it has been created * The default policy can be changed at any time, by adding the default directive to the desired policy section @@ -399,7 +405,7 @@ The module, :ref:`storage_policy`, is responsible for parsing the configured policies via class :class:`.StoragePolicyCollection`. This collection is made up of policies of class :class:`.StoragePolicy`. The collection class includes handy functions for getting to a policy either by -name or by index , getting info about the policies, etc. There's also one +name or by index , getting info about the policies, etc. There's also one very important function, :meth:`~.StoragePolicyCollection.get_object_ring`. Object rings are members of the :class:`.StoragePolicy` class and are actually not instantiated until the :meth:`~.StoragePolicy.load_ring` diff --git a/doc/source/policies_saio.rst b/doc/source/policies_saio.rst old mode 100644 new mode 100755 index 458fc85107..ee2dcf6043 --- a/doc/source/policies_saio.rst +++ b/doc/source/policies_saio.rst @@ -26,6 +26,7 @@ to implement a usable set of policies. [storage-policy:0] name = gold + aliases = yellow, orange default = yes [storage-policy:1] @@ -82,7 +83,8 @@ Storage Policies effect placement of data in Swift. You should see this: (only showing the policy output here):: - policies: [{'default': True, 'name': 'gold'}, {'name': 'silver'}] + policies: [{'aliases': 'gold, yellow, orange', 'default': True, + 'name': 'gold'}, {'aliases': 'silver', 'name': 'silver'}] 3. Now create a container without specifying a policy, it will use the default, 'gold' and then put a test object in it (create the file ``file0.txt`` diff --git a/etc/swift.conf-sample b/etc/swift.conf-sample old mode 100644 new mode 100755 index 18cb047cf5..3768dbc9cb --- a/etc/swift.conf-sample +++ b/etc/swift.conf-sample @@ -21,7 +21,7 @@ swift_hash_path_prefix = changeme # policy with index 0 will be declared the default. If multiple policies are # defined you must define a policy with index 0 and you must specify a # default. It is recommended you always define a section for -# storage-policy:0. +# storage-policy:0. Aliases are not required when defining a storage policy. # # A 'policy_type' argument is also supported but is not mandatory. Default # policy type 'replication' is used when 'policy_type' is unspecified. @@ -29,6 +29,7 @@ swift_hash_path_prefix = changeme name = Policy-0 default = yes #policy_type = replication +aliases = yellow, orange # the following section would declare a policy called 'silver', the number of # replicas will be determined by how the ring is built. In this example the @@ -40,7 +41,10 @@ default = yes # this config has specified it as the default. However if a legacy container # (one created with a pre-policy version of swift) is accessed, it is known # implicitly to be assigned to the policy with index 0 as opposed to the -# current default. +# current default. Note that even without specifying any aliases, a policy +# always has at least the default name stored in aliases because this field is +# used to contain all human readable names for a storage policy. +# #[storage-policy:1] #name = silver #policy_type = replication @@ -67,12 +71,13 @@ default = yes # refer to Swift documentation for details on how to configure EC policies. # # The example 'deepfreeze10-4' policy defined below is a _sample_ -# configuration with 10 'data' and 4 'parity' fragments. 'ec_type' -# defines the Erasure Coding scheme. 'jerasure_rs_vand' (Reed-Solomon -# Vandermonde) is used as an example below. +# configuration with an alias of 'df10-4' as well as 10 'data' and 4 'parity' +# fragments. 'ec_type' defines the Erasure Coding scheme. +# 'jerasure_rs_vand' (Reed-Solomon Vandermonde) is used as an example below. # #[storage-policy:2] #name = deepfreeze10-4 +#aliases = df10-4 #policy_type = erasure_coding #ec_type = jerasure_rs_vand #ec_num_data_fragments = 10 diff --git a/swift/common/storage_policy.py b/swift/common/storage_policy.py old mode 100644 new mode 100755 index df0672fab5..90fcedd661 --- a/swift/common/storage_policy.py +++ b/swift/common/storage_policy.py @@ -16,11 +16,9 @@ import os import string import textwrap import six - from six.moves.configparser import ConfigParser - from swift.common.utils import ( - config_true_value, SWIFT_CONF_FILE, whataremyips) + config_true_value, SWIFT_CONF_FILE, whataremyips, list_from_csv) from swift.common.ring import Ring, RingData from swift.common.utils import quorum_size from swift.common.exceptions import RingValidationError @@ -84,7 +82,6 @@ class BindPortsCache(object): class PolicyError(ValueError): - def __init__(self, msg, index=None): if index is not None: msg += ', for index %r' % index @@ -161,7 +158,7 @@ class BaseStoragePolicy(object): policy_type_to_policy_cls = {} def __init__(self, idx, name='', is_default=False, is_deprecated=False, - object_ring=None): + object_ring=None, aliases=''): # do not allow BaseStoragePolicy class to be instantiated directly if type(self) == BaseStoragePolicy: raise TypeError("Can't instantiate BaseStoragePolicy directly") @@ -172,18 +169,17 @@ class BaseStoragePolicy(object): raise PolicyError('Invalid index', idx) if self.idx < 0: raise PolicyError('Invalid index', idx) - if not name: + self.alias_list = [] + if not name or not self._validate_policy_name(name): raise PolicyError('Invalid name %r' % name, idx) - # this is defensively restrictive, but could be expanded in the future - if not all(c in VALID_CHARS for c in name): - raise PolicyError('Names are used as HTTP headers, and can not ' - 'reliably contain any characters not in %r. ' - 'Invalid name %r' % (VALID_CHARS, name)) - if name.upper() == LEGACY_POLICY_NAME.upper() and self.idx != 0: - msg = 'The name %s is reserved for policy index 0. ' \ - 'Invalid name %r' % (LEGACY_POLICY_NAME, name) - raise PolicyError(msg, idx) - self.name = name + self.alias_list.append(name) + if aliases: + names_list = list_from_csv(aliases) + for alias in names_list: + if alias == name: + continue + self._validate_policy_name(alias) + self.alias_list.append(alias) self.is_deprecated = config_true_value(is_deprecated) self.is_default = config_true_value(is_default) if self.policy_type not in BaseStoragePolicy.policy_type_to_policy_cls: @@ -191,9 +187,23 @@ class BaseStoragePolicy(object): if self.is_deprecated and self.is_default: raise PolicyError('Deprecated policy can not be default. ' 'Invalid config', self.idx) + self.ring_name = _get_policy_string('object', self.idx) self.object_ring = object_ring + @property + def name(self): + return self.alias_list[0] + + @name.setter + def name_setter(self, name): + self._validate_policy_name(name) + self.alias_list[0] = name + + @property + def aliases(self): + return ", ".join(self.alias_list) + def __int__(self): return self.idx @@ -203,8 +213,8 @@ class BaseStoragePolicy(object): def __repr__(self): return ("%s(%d, %r, is_default=%s, " "is_deprecated=%s, policy_type=%r)") % \ - (self.__class__.__name__, self.idx, self.name, - self.is_default, self.is_deprecated, self.policy_type) + (self.__class__.__name__, self.idx, self.alias_list, + self.is_default, self.is_deprecated, self.policy_type) @classmethod def register(cls, policy_type): @@ -213,6 +223,7 @@ class BaseStoragePolicy(object): their StoragePolicy class. This will also set the policy_type attribute on the registered implementation. """ + def register_wrapper(policy_cls): if policy_type in cls.policy_type_to_policy_cls: raise PolicyError( @@ -222,6 +233,7 @@ class BaseStoragePolicy(object): cls.policy_type_to_policy_cls[policy_type] = policy_cls policy_cls.policy_type = policy_type return policy_cls + return register_wrapper @classmethod @@ -231,6 +243,7 @@ class BaseStoragePolicy(object): """ return { 'name': 'name', + 'aliases': 'aliases', 'policy_type': 'policy_type', 'default': 'is_default', 'deprecated': 'is_deprecated', @@ -269,6 +282,77 @@ class BaseStoragePolicy(object): info.pop('policy_type') return info + def _validate_policy_name(self, name): + """ + Helper function to determine the validity of a policy name. Used + to check policy names before setting them. + + :param name: a name string for a single policy name. + :returns: true if the name is valid. + :raises: PolicyError if the policy name is invalid. + """ + # this is defensively restrictive, but could be expanded in the future + if not all(c in VALID_CHARS for c in name): + raise PolicyError('Names are used as HTTP headers, and can not ' + 'reliably contain any characters not in %r. ' + 'Invalid name %r' % (VALID_CHARS, name)) + if name.upper() == LEGACY_POLICY_NAME.upper() and self.idx != 0: + msg = 'The name %s is reserved for policy index 0. ' \ + 'Invalid name %r' % (LEGACY_POLICY_NAME, name) + raise PolicyError(msg, self.idx) + if name.upper() in (existing_name.upper() for existing_name + in self.alias_list): + msg = 'The name %s is already assigned to this policy.' % name + raise PolicyError(msg, self.idx) + + return True + + def add_name(self, name): + """ + Adds an alias name to the storage policy. Shouldn't be called + directly from the storage policy but instead through the + storage policy collection class, so lookups by name resolve + correctly. + + :param name: a new alias for the storage policy + """ + if self._validate_policy_name(name): + self.alias_list.append(name) + + def remove_name(self, name): + """ + Removes an alias name from the storage policy. Shouldn't be called + directly from the storage policy but instead through the storage + policy collection class, so lookups by name resolve correctly. If + the name removed is the primary name then the next availiable alias + will be adopted as the new primary name. + + :param name: a name assigned to the storage policy + """ + if name not in self.alias_list: + raise PolicyError("%s is not a name assigned to policy %s" + % (name, self.idx)) + if len(self.alias_list) == 1: + raise PolicyError("Cannot remove only name %s from policy %s. " + "Policies must have at least one name." + % (name, self.idx)) + else: + self.alias_list.remove(name) + + def change_primary_name(self, name): + """ + Changes the primary/default name of the policy to a specified name. + + :param name: a string name to replace the current primary name. + """ + if name == self.name: + return + elif name in self.alias_list: + self.remove_name(name) + else: + self._validate_policy_name(name) + self.alias_list.insert(0, name) + def _validate_ring(self): """ Hook, called when the ring is loaded. Can be used to @@ -329,13 +413,15 @@ class ECStoragePolicy(BaseStoragePolicy): :func:`~swift.common.storage_policy.reload_storage_policies` to load POLICIES from ``swift.conf``. """ - def __init__(self, idx, name='', is_default=False, + + def __init__(self, idx, name='', aliases='', is_default=False, is_deprecated=False, object_ring=None, ec_segment_size=DEFAULT_EC_OBJECT_SEGMENT_SIZE, ec_type=None, ec_ndata=None, ec_nparity=None): super(ECStoragePolicy, self).__init__( - idx, name, is_default, is_deprecated, object_ring) + idx=idx, name=name, aliases=aliases, is_default=is_default, + is_deprecated=is_deprecated, object_ring=object_ring) # Validate erasure_coding policy specific members # ec_type is one of the EC implementations supported by PyEClib @@ -441,9 +527,9 @@ class ECStoragePolicy(BaseStoragePolicy): def __repr__(self): return ("%s, EC config(ec_type=%s, ec_segment_size=%d, " - "ec_ndata=%d, ec_nparity=%d)") % ( - super(ECStoragePolicy, self).__repr__(), self.ec_type, - self.ec_segment_size, self.ec_ndata, self.ec_nparity) + "ec_ndata=%d, ec_nparity=%d)") % \ + (super(ECStoragePolicy, self).__repr__(), self.ec_type, + self.ec_segment_size, self.ec_ndata, self.ec_nparity) @classmethod def _config_options_map(cls): @@ -532,6 +618,7 @@ class StoragePolicyCollection(object): * Deprecated policies can not be declared the default """ + def __init__(self, pols): self.default = [] self.by_name = {} @@ -542,7 +629,8 @@ class StoragePolicyCollection(object): """ Add pre-validated policies to internal indexes. """ - self.by_name[policy.name.upper()] = policy + for name in policy.alias_list: + self.by_name[name.upper()] = policy self.by_index[int(policy)] = policy def __repr__(self): @@ -570,9 +658,10 @@ class StoragePolicyCollection(object): if int(policy) in self.by_index: raise PolicyError('Duplicate index %s conflicts with %s' % ( policy, self.get_by_index(int(policy)))) - if policy.name.upper() in self.by_name: - raise PolicyError('Duplicate name %s conflicts with %s' % ( - policy, self.get_by_name(policy.name))) + for name in policy.alias_list: + if name.upper() in self.by_name: + raise PolicyError('Duplicate name %s conflicts with %s' % ( + policy, self.get_by_name(name))) if policy.is_default: if not self.default: self.default = policy @@ -667,6 +756,62 @@ class StoragePolicyCollection(object): policy_info.append(policy_entry) return policy_info + def add_policy_alias(self, policy_index, *aliases): + """ + Adds a new name or names to a policy + + :param policy_index: index of a policy in this policy collection. + :param *aliases: arbitrary number of string policy names to add. + """ + policy = self.get_by_index(policy_index) + for alias in aliases: + if alias.upper() in self.by_name: + raise PolicyError('Duplicate name %s in use ' + 'by policy %s' % (alias, + self.get_by_name(alias))) + else: + policy.add_name(alias) + self.by_name[alias.upper()] = policy + + def remove_policy_alias(self, *aliases): + """ + Removes a name or names from a policy. If the name removed is the + primary name then the next availiable alias will be adopted + as the new primary name. + + :param *aliases: arbitrary number of existing policy names to remove. + """ + for alias in aliases: + policy = self.get_by_name(alias) + if not policy: + raise PolicyError('No policy with name %s exists.' % alias) + if len(policy.alias_list) == 1: + raise PolicyError('Policy %s with name %s has only one name. ' + 'Policies must have at least one name.' % ( + policy, alias)) + else: + policy.remove_name(alias) + del self.by_name[alias.upper()] + + def change_policy_primary_name(self, policy_index, new_name): + """ + Changes the primary or default name of a policy. The new primary + name can be an alias that already belongs to the policy or a + completely new name. + + :param policy_index: index of a policy in this policy collection. + :param new_name: a string name to set as the new default name. + """ + policy = self.get_by_index(policy_index) + name_taken = self.get_by_name(new_name) + # if the name belongs to some other policy in the collection + if name_taken and name_taken != policy: + raise PolicyError('Other policy %s with name %s exists.' % + (self.get_by_name(new_name).idx, new_name)) + else: + policy.change_primary_name(new_name) + self.by_name[new_name.upper()] = policy + def parse_storage_policies(conf): """ diff --git a/test/unit/common/test_storage_policy.py b/test/unit/common/test_storage_policy.py old mode 100644 new mode 100755 index 5a2e332ba5..e1ced03717 --- a/test/unit/common/test_storage_policy.py +++ b/test/unit/common/test_storage_policy.py @@ -17,7 +17,6 @@ import unittest import os import mock from functools import partial - from six.moves.configparser import ConfigParser from tempfile import NamedTemporaryFile from test.unit import patch_policies, FakeRing, temptree, DEFAULT_TEST_EC_TYPE @@ -36,6 +35,7 @@ class FakeStoragePolicy(BaseStoragePolicy): Test StoragePolicy class - the only user at the moment is test_validate_policies_type_invalid() """ + def __init__(self, idx, name='', is_default=False, is_deprecated=False, object_ring=None): super(FakeStoragePolicy, self).__init__( @@ -43,7 +43,6 @@ class FakeStoragePolicy(BaseStoragePolicy): class TestStoragePolicies(unittest.TestCase): - def _conf(self, conf_str): conf_str = "\n".join(line.strip() for line in conf_str.split("\n")) conf = ConfigParser() @@ -75,10 +74,10 @@ class TestStoragePolicies(unittest.TestCase): ]) def test_swift_info(self): # the deprecated 'three' should not exist in expect - expect = [{'default': True, 'name': 'zero'}, - {'name': 'two'}, - {'name': 'one'}, - {'name': 'ten'}] + expect = [{'aliases': 'zero', 'default': True, 'name': 'zero', }, + {'aliases': 'two', 'name': 'two'}, + {'aliases': 'one', 'name': 'one'}, + {'aliases': 'ten', 'name': 'ten'}] swift_info = POLICIES.get_policy_info() self.assertEqual(sorted(expect, key=lambda k: k['name']), sorted(swift_info, key=lambda k: k['name'])) @@ -286,6 +285,7 @@ class TestStoragePolicies(unittest.TestCase): def test_validate_policies_type_invalid(self): class BogusStoragePolicy(FakeStoragePolicy): policy_type = 'bogus' + # unsupported policy type - initialization with FakeStoragePolicy self.assertRaisesWithMessage(PolicyError, 'Invalid type', BogusStoragePolicy, 1, 'one') @@ -330,6 +330,221 @@ class TestStoragePolicies(unittest.TestCase): self.assertEqual(pol1, policies.get_by_name(name)) self.assertEqual(policies.get_by_name(name).name, 'One') + def test_multiple_names(self): + # checking duplicate on insert + test_policies = [StoragePolicy(0, 'zero', True), + StoragePolicy(1, 'one', False, aliases='zero')] + self.assertRaises(PolicyError, StoragePolicyCollection, + test_policies) + + # checking correct retrival using other names + test_policies = [StoragePolicy(0, 'zero', True, aliases='cero, kore'), + StoragePolicy(1, 'one', False, aliases='uno, tahi'), + StoragePolicy(2, 'two', False, aliases='dos, rua')] + + policies = StoragePolicyCollection(test_policies) + + for name in ('zero', 'cero', 'kore'): + self.assertEqual(policies.get_by_name(name), test_policies[0]) + for name in ('two', 'dos', 'rua'): + self.assertEqual(policies.get_by_name(name), test_policies[2]) + + # Testing parsing of conf files/text + good_conf = self._conf(""" + [storage-policy:0] + name = one + aliases = uno, tahi + default = yes + """) + + policies = parse_storage_policies(good_conf) + self.assertEqual(policies.get_by_name('one'), + policies[0]) + self.assertEqual(policies.get_by_name('one'), + policies.get_by_name('tahi')) + + name_repeat_conf = self._conf(""" + [storage-policy:0] + name = one + aliases = one + default = yes + """) + # Test on line below should not generate errors. Repeat of main + # name under aliases is permitted during construction + # but only because automated testing requires it. + policies = parse_storage_policies(name_repeat_conf) + + bad_conf = self._conf(""" + [storage-policy:0] + name = one + aliases = uno, uno + default = yes + """) + + self.assertRaisesWithMessage(PolicyError, + 'is already assigned to this policy', + parse_storage_policies, bad_conf) + + def test_multiple_names_EC(self): + # checking duplicate names on insert + test_policies_ec = [ + ECStoragePolicy( + 0, 'ec8-2', + aliases='zeus, jupiter', + ec_type=DEFAULT_TEST_EC_TYPE, + ec_ndata=8, ec_nparity=2, + object_ring=FakeRing(replicas=8), + is_default=True), + ECStoragePolicy( + 1, 'ec10-4', + aliases='ec8-2', + ec_type=DEFAULT_TEST_EC_TYPE, + ec_ndata=10, ec_nparity=4, + object_ring=FakeRing(replicas=10))] + + self.assertRaises(PolicyError, StoragePolicyCollection, + test_policies_ec) + + # checking correct retrival using other names + good_test_policies_EC = [ + ECStoragePolicy(0, 'ec8-2', aliases='zeus, jupiter', + ec_type=DEFAULT_TEST_EC_TYPE, + ec_ndata=8, ec_nparity=2, + object_ring=FakeRing(replicas=8), + is_default=True), + ECStoragePolicy(1, 'ec10-4', aliases='athena, minerva', + ec_type=DEFAULT_TEST_EC_TYPE, + ec_ndata=10, ec_nparity=4, + object_ring=FakeRing(replicas=10)), + ECStoragePolicy(2, 'ec4-2', aliases='poseidon, neptune', + ec_type=DEFAULT_TEST_EC_TYPE, + ec_ndata=4, ec_nparity=2, + object_ring=FakeRing(replicas=7)), + ] + ec_policies = StoragePolicyCollection(good_test_policies_EC) + + for name in ('ec8-2', 'zeus', 'jupiter'): + self.assertEqual(ec_policies.get_by_name(name), ec_policies[0]) + for name in ('ec10-4', 'athena', 'minerva'): + self.assertEqual(ec_policies.get_by_name(name), ec_policies[1]) + + # Testing parsing of conf files/text + good_ec_conf = self._conf(""" + [storage-policy:0] + name = ec8-2 + aliases = zeus, jupiter + policy_type = erasure_coding + ec_type = %(ec_type)s + default = yes + ec_num_data_fragments = 8 + ec_num_parity_fragments = 2 + [storage-policy:1] + name = ec10-4 + aliases = poseidon, neptune + policy_type = erasure_coding + ec_type = %(ec_type)s + ec_num_data_fragments = 10 + ec_num_parity_fragments = 4 + """ % {'ec_type': DEFAULT_TEST_EC_TYPE}) + + ec_policies = parse_storage_policies(good_ec_conf) + self.assertEqual(ec_policies.get_by_name('ec8-2'), + ec_policies[0]) + self.assertEqual(ec_policies.get_by_name('ec10-4'), + ec_policies.get_by_name('poseidon')) + + name_repeat_ec_conf = self._conf(""" + [storage-policy:0] + name = ec8-2 + aliases = ec8-2 + policy_type = erasure_coding + ec_type = %(ec_type)s + default = yes + ec_num_data_fragments = 8 + ec_num_parity_fragments = 2 + """ % {'ec_type': DEFAULT_TEST_EC_TYPE}) + # Test on line below should not generate errors. Repeat of main + # name under aliases is permitted during construction + # but only because automated testing requires it. + ec_policies = parse_storage_policies(name_repeat_ec_conf) + + bad_ec_conf = self._conf(""" + [storage-policy:0] + name = ec8-2 + aliases = zeus, zeus + policy_type = erasure_coding + ec_type = %(ec_type)s + default = yes + ec_num_data_fragments = 8 + ec_num_parity_fragments = 2 + """ % {'ec_type': DEFAULT_TEST_EC_TYPE}) + self.assertRaisesWithMessage(PolicyError, + 'is already assigned to this policy', + parse_storage_policies, bad_ec_conf) + + def test_add_remove_names(self): + test_policies = [StoragePolicy(0, 'zero', True), + StoragePolicy(1, 'one', False), + StoragePolicy(2, 'two', False)] + policies = StoragePolicyCollection(test_policies) + + # add names + policies.add_policy_alias(1, 'tahi') + self.assertEqual(policies.get_by_name('tahi'), test_policies[1]) + + policies.add_policy_alias(2, 'rua', 'dos') + self.assertEqual(policies.get_by_name('rua'), test_policies[2]) + self.assertEqual(policies.get_by_name('dos'), test_policies[2]) + + self.assertRaisesWithMessage(PolicyError, 'Invalid name', + policies.add_policy_alias, 2, 'double\n') + + # try to add existing name + self.assertRaisesWithMessage(PolicyError, 'Duplicate name', + policies.add_policy_alias, 2, 'two') + + self.assertRaisesWithMessage(PolicyError, 'Duplicate name', + policies.add_policy_alias, 1, 'two') + + # remove name + policies.remove_policy_alias('tahi') + self.assertEqual(policies.get_by_name('tahi'), None) + + # remove only name + self.assertRaisesWithMessage(PolicyError, + 'Policies must have at least one name.', + policies.remove_policy_alias, 'zero') + + # remove non-existent name + self.assertRaisesWithMessage(PolicyError, + 'No policy with name', + policies.remove_policy_alias, 'three') + + # remove default name + policies.remove_policy_alias('two') + self.assertEqual(policies.get_by_name('two'), None) + self.assertEqual(policies.get_by_index(2).name, 'rua') + + # change default name to a new name + policies.change_policy_primary_name(2, 'two') + self.assertEqual(policies.get_by_name('two'), test_policies[2]) + self.assertEqual(policies.get_by_index(2).name, 'two') + + # change default name to an existing alias + policies.change_policy_primary_name(2, 'dos') + self.assertEqual(policies.get_by_index(2).name, 'dos') + + # change default name to a bad new name + self.assertRaisesWithMessage(PolicyError, 'Invalid name', + policies.change_policy_primary_name, + 2, 'bad\nname') + + # change default name to a name belonging to another policy + self.assertRaisesWithMessage(PolicyError, + 'Other policy', + policies.change_policy_primary_name, + 1, 'dos') + def test_deprecated_default(self): bad_conf = self._conf(""" [storage-policy:1] @@ -815,7 +1030,7 @@ class TestStoragePolicies(unittest.TestCase): part_shift=24) with mock.patch( - 'swift.common.storage_policy.RingData.load' + 'swift.common.storage_policy.RingData.load' ) as mock_ld, \ patch_policies(test_policies), \ mock.patch('swift.common.storage_policy.whataremyips') \ @@ -933,14 +1148,14 @@ class TestStoragePolicies(unittest.TestCase): msg = 'EC ring for policy %s needs to be configured with ' \ 'exactly %d nodes.' % \ (policy.name, policy.ec_ndata + policy.ec_nparity) - self.assertRaisesWithMessage( - RingValidationError, msg, - policy._validate_ring) + self.assertRaisesWithMessage(RingValidationError, msg, + policy._validate_ring) def test_storage_policy_get_info(self): test_policies = [ StoragePolicy(0, 'zero', is_default=True), - StoragePolicy(1, 'one', is_deprecated=True), + StoragePolicy(1, 'one', is_deprecated=True, + aliases='tahi, uno'), ECStoragePolicy(10, 'ten', ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=10, ec_nparity=3), @@ -953,28 +1168,33 @@ class TestStoragePolicies(unittest.TestCase): # default replication (0, True): { 'name': 'zero', + 'aliases': 'zero', 'default': True, 'deprecated': False, 'policy_type': REPL_POLICY }, (0, False): { 'name': 'zero', + 'aliases': 'zero', 'default': True, }, # deprecated replication (1, True): { 'name': 'one', + 'aliases': 'one, tahi, uno', 'default': False, 'deprecated': True, 'policy_type': REPL_POLICY }, (1, False): { 'name': 'one', + 'aliases': 'one, tahi, uno', 'deprecated': True, }, # enabled ec (10, True): { 'name': 'ten', + 'aliases': 'ten', 'default': False, 'deprecated': False, 'policy_type': EC_POLICY, @@ -985,10 +1205,12 @@ class TestStoragePolicies(unittest.TestCase): }, (10, False): { 'name': 'ten', + 'aliases': 'ten', }, # deprecated ec (11, True): { 'name': 'done', + 'aliases': 'done', 'default': False, 'deprecated': True, 'policy_type': EC_POLICY, @@ -999,6 +1221,7 @@ class TestStoragePolicies(unittest.TestCase): }, (11, False): { 'name': 'done', + 'aliases': 'done', 'deprecated': True, }, } From ca2dcc371921aa1aded6161287cc03c0940bf198 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Nov=C3=BD?= Date: Fri, 11 Dec 2015 18:21:28 +0100 Subject: [PATCH 04/52] Deprecated tox -downloadcache option removed Caching is enabled by default from pip version 6.0 More info: https://testrun.org/tox/latest/config.html#confval-downloadcache=path https://pip.pypa.io/en/stable/reference/pip_install/#caching Change-Id: I9451a0f0dee5c5a3c0ca0a52f58bd353602661a2 --- tox.ini | 3 --- 1 file changed, 3 deletions(-) diff --git a/tox.ini b/tox.ini index ac22896de4..9581e54d6d 100644 --- a/tox.ini +++ b/tox.ini @@ -22,9 +22,6 @@ setenv = VIRTUAL_ENV={envdir} NOSE_COVER_HTML=1 NOSE_COVER_HTML_DIR={toxinidir}/cover -[tox:jenkins] -downloadcache = ~/cache/pip - [testenv:py34] commands = nosetests test/unit/common/test_exceptions.py From 88c9aed7c846402355a3c7831f34f3e833bbdf11 Mon Sep 17 00:00:00 2001 From: Victor Stinner Date: Mon, 19 Oct 2015 16:19:28 +0200 Subject: [PATCH 05/52] Port swift.common.utils.StatsdClient to Python 3 * StatsdClient._send(): on Python 3, encode parts to UTF-8 and replace '|' with b'|' to join parts. * timing_stats(): replace func.func_name with func.__name__. The func_name attribute of functions was removed on Python 3, whereas the __name__ attribute is available on Python 2 and Python 3. * Fix unit tests to use bytes Change-Id: Ic279c9b54e91aabcc52587eed7758e268ffb155e --- swift/common/utils.py | 6 ++++-- test/unit/common/test_utils.py | 23 ++++++++++++++++------- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/swift/common/utils.py b/swift/common/utils.py index d6cc5d7afb..d83655f1b7 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -1166,11 +1166,13 @@ class StatsdClient(object): parts.append('@%s' % (sample_rate,)) else: return + if six.PY3: + parts = [part.encode('utf-8') for part in parts] # Ideally, we'd cache a sending socket in self, but that # results in a socket getting shared by multiple green threads. with closing(self._open_socket()) as sock: try: - return sock.sendto('|'.join(parts), self._target) + return sock.sendto(b'|'.join(parts), self._target) except IOError as err: if self.logger: self.logger.warn( @@ -1227,7 +1229,7 @@ def timing_stats(**dec_kwargs): swift's wsgi server controllers, based on response code. """ def decorating_func(func): - method = func.func_name + method = func.__name__ @functools.wraps(func) def _timing_stats(ctrl, *args, **kwargs): diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index 1de31aa438..16448bf8d1 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -34,6 +34,7 @@ import sys import json import math +import six from six import BytesIO, StringIO from six.moves.queue import Queue, Empty from six.moves import range @@ -3650,7 +3651,7 @@ class TestStatsdLogging(unittest.TestCase): self.assertEqual(len(mock_socket.sent), 1) payload = mock_socket.sent[0][0] - self.assertTrue(payload.endswith("|@0.5")) + self.assertTrue(payload.endswith(b"|@0.5")) def test_sample_rates_with_sample_rate_factor(self): logger = utils.get_logger({ @@ -3676,8 +3677,10 @@ class TestStatsdLogging(unittest.TestCase): self.assertEqual(len(mock_socket.sent), 1) payload = mock_socket.sent[0][0] - self.assertTrue(payload.endswith("|@%s" % effective_sample_rate), - payload) + suffix = "|@%s" % effective_sample_rate + if six.PY3: + suffix = suffix.encode('utf-8') + self.assertTrue(payload.endswith(suffix), payload) effective_sample_rate = 0.587 * 0.91 statsd_client.random = lambda: effective_sample_rate - 0.001 @@ -3685,8 +3688,10 @@ class TestStatsdLogging(unittest.TestCase): self.assertEqual(len(mock_socket.sent), 2) payload = mock_socket.sent[1][0] - self.assertTrue(payload.endswith("|@%s" % effective_sample_rate), - payload) + suffix = "|@%s" % effective_sample_rate + if six.PY3: + suffix = suffix.encode('utf-8') + self.assertTrue(payload.endswith(suffix), payload) def test_timing_stats(self): class MockController(object): @@ -3983,7 +3988,7 @@ class TestStatsdLoggingDelegation(unittest.TestCase): while True: try: payload = self.sock.recv(4096) - if payload and 'STOP' in payload: + if payload and b'STOP' in payload: return 42 self.queue.put(payload) except Exception as e: @@ -4006,10 +4011,14 @@ class TestStatsdLoggingDelegation(unittest.TestCase): def assertStat(self, expected, sender_fn, *args, **kwargs): got = self._send_and_get(sender_fn, *args, **kwargs) + if six.PY3: + got = got.decode('utf-8') return self.assertEqual(expected, got) def assertStatMatches(self, expected_regexp, sender_fn, *args, **kwargs): got = self._send_and_get(sender_fn, *args, **kwargs) + if six.PY3: + got = got.decode('utf-8') return self.assertTrue(re.search(expected_regexp, got), [got, expected_regexp]) @@ -4178,7 +4187,7 @@ class TestStatsdLoggingDelegation(unittest.TestCase): utils.get_valid_utf8_str(valid_utf8_str)) self.assertEqual(valid_utf8_str, utils.get_valid_utf8_str(unicode_sample)) - self.assertEqual('\xef\xbf\xbd\xef\xbf\xbd\xec\xbc\x9d\xef\xbf\xbd', + self.assertEqual(b'\xef\xbf\xbd\xef\xbf\xbd\xec\xbc\x9d\xef\xbf\xbd', utils.get_valid_utf8_str(invalid_utf8_str)) @reset_logger_state From 6ade2908cca696ce1b48a7a19f4d460081fa5b0a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Nov=C3=BD?= Date: Sun, 13 Dec 2015 21:13:42 +0100 Subject: [PATCH 06/52] Deprecated param timeout removed from memcached Change-Id: Idf042a79f0db148bf9f28a9e360cb2a3c18d385a --- swift/common/memcached.py | 55 +++++++----------------------- test/unit/common/test_memcached.py | 12 +------ 2 files changed, 14 insertions(+), 53 deletions(-) diff --git a/swift/common/memcached.py b/swift/common/memcached.py index 5dc9e0b0fc..65e0da8afe 100644 --- a/swift/common/memcached.py +++ b/swift/common/memcached.py @@ -223,7 +223,7 @@ class MemcacheRing(object): """Returns a server connection to the pool.""" self._client_cache[server].put((fp, sock)) - def set(self, key, value, serialize=True, timeout=0, time=0, + def set(self, key, value, serialize=True, time=0, min_compress_len=0): """ Set a key/value pair in memcache @@ -233,22 +233,14 @@ class MemcacheRing(object): :param serialize: if True, value is serialized with JSON before sending to memcache, or with pickle if configured to use pickle instead of JSON (to avoid cache poisoning) - :param timeout: ttl in memcache, this parameter is now deprecated. It - will be removed in next release of OpenStack, - use time parameter instead in the future - :time: equivalent to timeout, this parameter is added to keep the - signature compatible with python-memcached interface. This - implementation will take this value and sign it to the - parameter timeout + :param time: the time to live :min_compress_len: minimum compress length, this parameter was added to keep the signature compatible with python-memcached interface. This implementation ignores it. """ key = md5hash(key) - if timeout: - logging.warn("parameter timeout has been deprecated, use time") - timeout = sanitize_timeout(time or timeout) + timeout = sanitize_timeout(time) flags = 0 if serialize and self._allow_pickle: value = pickle.dumps(value, PICKLE_PROTOCOL) @@ -302,7 +294,7 @@ class MemcacheRing(object): except (Exception, Timeout) as e: self._exception_occurred(server, e, sock=sock, fp=fp) - def incr(self, key, delta=1, time=0, timeout=0): + def incr(self, key, delta=1, time=0): """ Increments a key which has a numeric value by delta. If the key can't be found, it's added as delta or 0 if delta < 0. @@ -315,22 +307,16 @@ class MemcacheRing(object): :param key: key :param delta: amount to add to the value of key (or set as the value if the key is not found) will be cast to an int - :param time: the time to live. This parameter deprecates parameter - timeout. The addition of this parameter is to make the - interface consistent with set and set_multi methods - :param timeout: ttl in memcache, deprecated, will be removed in future - OpenStack releases + :param time: the time to live :returns: result of incrementing :raises MemcacheConnectionError: """ - if timeout: - logging.warn("parameter timeout has been deprecated, use time") key = md5hash(key) command = 'incr' if delta < 0: command = 'decr' delta = str(abs(int(delta))) - timeout = sanitize_timeout(time or timeout) + timeout = sanitize_timeout(time) for (server, fp, sock) in self._get_conns(key): try: with Timeout(self._io_timeout): @@ -358,7 +344,7 @@ class MemcacheRing(object): self._exception_occurred(server, e, sock=sock, fp=fp) raise MemcacheConnectionError("No Memcached connections succeeded.") - def decr(self, key, delta=1, time=0, timeout=0): + def decr(self, key, delta=1, time=0): """ Decrements a key which has a numeric value by delta. Calls incr with -delta. @@ -367,18 +353,12 @@ class MemcacheRing(object): :param delta: amount to subtract to the value of key (or set the value to 0 if the key is not found) will be cast to an int - :param time: the time to live. This parameter depcates parameter - timeout. The addition of this parameter is to make the - interface consistent with set and set_multi methods - :param timeout: ttl in memcache, deprecated, will be removed in future - OpenStack releases + :param time: the time to live :returns: result of decrementing :raises MemcacheConnectionError: """ - if timeout: - logging.warn("parameter timeout has been deprecated, use time") - return self.incr(key, delta=-delta, time=(time or timeout)) + return self.incr(key, delta=-delta, time=time) def delete(self, key): """ @@ -398,8 +378,8 @@ class MemcacheRing(object): except (Exception, Timeout) as e: self._exception_occurred(server, e, sock=sock, fp=fp) - def set_multi(self, mapping, server_key, serialize=True, timeout=0, - time=0, min_compress_len=0): + def set_multi(self, mapping, server_key, serialize=True, time=0, + min_compress_len=0): """ Sets multiple key/value pairs in memcache. @@ -409,23 +389,14 @@ class MemcacheRing(object): :param serialize: if True, value is serialized with JSON before sending to memcache, or with pickle if configured to use pickle instead of JSON (to avoid cache poisoning) - :param timeout: ttl for memcache. This parameter is now deprecated, it - will be removed in next release of OpenStack, use time - parameter instead in the future - :time: equalvent to timeout, this parameter is added to keep the - signature compatible with python-memcached interface. This - implementation will take this value and sign it to parameter - timeout + :param time: the time to live :min_compress_len: minimum compress length, this parameter was added to keep the signature compatible with python-memcached interface. This implementation ignores it """ - if timeout: - logging.warn("parameter timeout has been deprecated, use time") - server_key = md5hash(server_key) - timeout = sanitize_timeout(time or timeout) + timeout = sanitize_timeout(time) msg = '' for key, value in mapping.items(): key = md5hash(key) diff --git a/test/unit/common/test_memcached.py b/test/unit/common/test_memcached.py index dcdaebc251..1490c02852 100644 --- a/test/unit/common/test_memcached.py +++ b/test/unit/common/test_memcached.py @@ -201,16 +201,11 @@ class TestMemcached(unittest.TestCase): self.assertEqual( memcache_client.get('some_key'), ['simple str', u'utf8 str éà']) self.assertTrue(float(mock.cache.values()[0][1]) == 0) - memcache_client.set('some_key', [1, 2, 3], timeout=10) - self.assertEqual(mock.cache.values()[0][1], '10') memcache_client.set('some_key', [1, 2, 3], time=20) self.assertEqual(mock.cache.values()[0][1], '20') sixtydays = 60 * 24 * 60 * 60 esttimeout = time.time() + sixtydays - memcache_client.set('some_key', [1, 2, 3], timeout=sixtydays) - self.assertTrue( - -1 <= float(mock.cache.values()[0][1]) - esttimeout <= 1) memcache_client.set('some_key', [1, 2, 3], time=sixtydays) self.assertTrue( -1 <= float(mock.cache.values()[0][1]) - esttimeout <= 1) @@ -313,11 +308,6 @@ class TestMemcached(unittest.TestCase): [[4, 5, 6], [1, 2, 3]]) self.assertEqual(mock.cache.values()[0][1], '0') self.assertEqual(mock.cache.values()[1][1], '0') - memcache_client.set_multi( - {'some_key1': [1, 2, 3], 'some_key2': [4, 5, 6]}, 'multi_key', - timeout=10) - self.assertEqual(mock.cache.values()[0][1], '10') - self.assertEqual(mock.cache.values()[1][1], '10') memcache_client.set_multi( {'some_key1': [1, 2, 3], 'some_key2': [4, 5, 6]}, 'multi_key', time=20) @@ -328,7 +318,7 @@ class TestMemcached(unittest.TestCase): esttimeout = time.time() + fortydays memcache_client.set_multi( {'some_key1': [1, 2, 3], 'some_key2': [4, 5, 6]}, 'multi_key', - timeout=fortydays) + time=fortydays) self.assertTrue( -1 <= float(mock.cache.values()[0][1]) - esttimeout <= 1) self.assertTrue( From 40476ea0797690d3a90a9ed91906d26103dfa058 Mon Sep 17 00:00:00 2001 From: John Dickinson Date: Mon, 14 Dec 2015 10:52:22 -0800 Subject: [PATCH 07/52] Document pretend_min_part_hours_passed Added a docstring for the swift-ring-builder CLI command "pretend_min_part_hours_passed". This is a dangerous operation, and that's why it hasn't been documented, but it can be useful at times. It should be made known to those who need it. Change-Id: I45bdbaacbbdda64c7510453e6d93e6d8563e3ecd --- swift/cli/ringbuilder.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/swift/cli/ringbuilder.py b/swift/cli/ringbuilder.py index 192a788518..c2782f2795 100755 --- a/swift/cli/ringbuilder.py +++ b/swift/cli/ringbuilder.py @@ -1029,6 +1029,19 @@ swift-ring-builder write_builder [min_part_hours] builder.save(builder_file) def pretend_min_part_hours_passed(): + """ +swift-ring-builder pretend_min_part_hours_passed + Resets the clock on the last time a rebalance happened, thus + circumventing the min_part_hours check. + + ***************************** + USE THIS WITH EXTREME CAUTION + ***************************** + + If you run this command and deploy rebalanced rings before a replication + pass completes, you may introduce unavailability in your cluster. This + has an end-user impact. + """ builder.pretend_min_part_hours_passed() builder.save(builder_file) exit(EXIT_SUCCESS) From 450737f886050e486f518cdce0c97596ccad848d Mon Sep 17 00:00:00 2001 From: Hisashi Osanai Date: Tue, 15 Dec 2015 11:33:56 +0900 Subject: [PATCH 08/52] Fix a typo in development_auth.rst This patch uses correct name for "CORS". Change-Id: I5fee5c581a2b3adb7596a273baf05708bfa97f79 --- doc/source/development_auth.rst | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/doc/source/development_auth.rst b/doc/source/development_auth.rst index bb00ca8fc7..92b7c5aafa 100644 --- a/doc/source/development_auth.rst +++ b/doc/source/development_auth.rst @@ -487,7 +487,8 @@ folks a start on their own code if they want to use repoze.what:: Allowing CORS with Auth ----------------------- -Cross Origin RequestS require that the auth system allow the OPTIONS method to -pass through without a token. The preflight request will make an OPTIONS call -against the object or container and will not work if the auth system stops it. +Cross Origin Resource Sharing (CORS) require that the auth system allow the +OPTIONS method to pass through without a token. The preflight request will +make an OPTIONS call against the object or container and will not work if +the auth system stops it. See TempAuth for an example of how OPTIONS requests are handled. From 60b2e02905d57f55169e506f4874b2334a1a68a5 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Mon, 5 Oct 2015 16:15:29 +0100 Subject: [PATCH 09/52] Make ECDiskFile report all fragments found on disk Refactor the disk file get_ondisk_files logic to enable ECDiskfile to gather *all* fragments found on disk (not just those with a matching .durable file) and make the fragments available via the DiskFile interface as a dict mapping: Timestamp --> list of fragment indexes Also, if a durable fragment has been found then the timestamp of the durable file is exposed via the diskfile interface. Co-Authored-By: Clay Gerrard Change-Id: I55e20a999685b94023d47b231d51007045ac920e --- swift/common/utils.py | 3 + swift/obj/diskfile.py | 614 ++++++++++++++++------------- swift/obj/mem_diskfile.py | 3 + test/unit/common/test_utils.py | 9 + test/unit/obj/test_diskfile.py | 319 ++++++++++++--- test/unit/obj/test_ssync_sender.py | 5 +- 6 files changed, 615 insertions(+), 338 deletions(-) diff --git a/swift/common/utils.py b/swift/common/utils.py index dcd00f064f..1436bd2174 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -831,6 +831,9 @@ class Timestamp(object): other = Timestamp(other) return cmp(self.internal, other.internal) + def __hash__(self): + return hash(self.internal) + def normalize_timestamp(timestamp): """ diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py index f8b7b72a7c..ebb849a9e9 100644 --- a/swift/obj/diskfile.py +++ b/swift/obj/diskfile.py @@ -460,92 +460,175 @@ class BaseDiskFileManager(object): """ raise NotImplementedError - def _gather_on_disk_file(self, filename, ext, context, frag_index=None, - **kwargs): + def _process_ondisk_files(self, exts, results, **kwargs): """ - Called by gather_ondisk_files() for each file in an object - datadir in reverse sorted order. If a file is considered part of a - valid on-disk file set it will be added to the context dict, keyed by - its extension. If a file is considered to be obsolete it will be added - to a list stored under the key 'obsolete' in the context dict. + Called by get_ondisk_files(). Should be over-ridden to implement + subclass specific handling of files. - :param filename: name of file to be accepted or not - :param ext: extension part of filename - :param context: a context dict that may have been populated by previous - calls to this method - :returns: True if a valid file set has been found, False otherwise + :param exts: dict of lists of file info, keyed by extension + :param results: a dict that may be updated with results """ raise NotImplementedError - def _verify_on_disk_files(self, accepted_files, **kwargs): + def _verify_ondisk_files(self, results, **kwargs): """ Verify that the final combination of on disk files complies with the diskfile contract. - :param accepted_files: files that have been found and accepted + :param results: files that have been found and accepted :returns: True if the file combination is compliant, False otherwise """ - raise NotImplementedError + data_file, meta_file, ts_file = tuple( + [results[key] + for key in ('data_file', 'meta_file', 'ts_file')]) - def gather_ondisk_files(self, files, include_obsolete=False, - verify=False, **kwargs): + return ((data_file is None and meta_file is None and ts_file is None) + or (ts_file is not None and data_file is None + and meta_file is None) + or (data_file is not None and ts_file is None)) + + def _split_list(self, original_list, condition): """ - Given a simple list of files names, iterate over them to determine the - files that constitute a valid object, and optionally determine the - files that are obsolete and could be deleted. Note that some files may - fall into neither category. + Split a list into two lists. The first list contains the first N items + of the original list, in their original order, where 0 < N <= + len(original list). The second list contains the remaining items of the + original list, in their original order. + + The index, N, at which the original list is split is the index of the + first item in the list that does not satisfy the given condition. Note + that the original list should be appropriately sorted if the second + list is to contain no items that satisfy the given condition. + + :param original_list: the list to be split. + :param condition: a single argument function that will be used to test + for the list item to split on. + :return: a tuple of two lists. + """ + for i, item in enumerate(original_list): + if not condition(item): + return original_list[:i], original_list[i:] + return original_list, [] + + def _split_gt_timestamp(self, file_info_list, timestamp): + """ + Given a list of file info dicts, reverse sorted by timestamp, split the + list into two: items newer than timestamp, and items at same time or + older than timestamp. + + :param file_info_list: a list of file_info dicts. + :param timestamp: a Timestamp. + :return: a tuple of two lists. + """ + return self._split_list( + file_info_list, lambda x: x['timestamp'] > timestamp) + + def _split_gte_timestamp(self, file_info_list, timestamp): + """ + Given a list of file info dicts, reverse sorted by timestamp, split the + list into two: items newer than or at same time as the timestamp, and + items older than timestamp. + + :param file_info_list: a list of file_info dicts. + :param timestamp: a Timestamp. + :return: a tuple of two lists. + """ + return self._split_list( + file_info_list, lambda x: x['timestamp'] >= timestamp) + + def get_ondisk_files(self, files, datadir, verify=True, **kwargs): + """ + Given a simple list of files names, determine the files that constitute + a valid fileset i.e. a set of files that defines the state of an + object, and determine the files that are obsolete and could be deleted. + Note that some files may fall into neither category. + + If a file is considered part of a valid fileset then its info dict will + be added to the results dict, keyed by _info. Any files that + are no longer required will have their info dicts added to a list + stored under the key 'obsolete'. + + The results dict will always contain entries with keys 'ts_file', + 'data_file' and 'meta_file'. Their values will be the fully qualified + path to a file of the corresponding type if there is such a file in the + valid fileset, or None. :param files: a list of file names. - :param include_obsolete: By default the iteration will stop when a - valid file set has been found. Setting this - argument to True will cause the iteration to - continue in order to find all obsolete files. + :param datadir: directory name files are from. :param verify: if True verify that the ondisk file contract has not been violated, otherwise do not verify. - :returns: a dict that may contain: valid on disk files keyed by their - filename extension; a list of obsolete files stored under the - key 'obsolete'. + :returns: a dict that will contain keys: + ts_file -> path to a .ts file or None + data_file -> path to a .data file or None + meta_file -> path to a .meta file or None + and may contain keys: + ts_info -> a file info dict for a .ts file + data_info -> a file info dict for a .data file + meta_info -> a file info dict for a .meta file + obsolete -> a list of file info dicts for obsolete files """ - files.sort(reverse=True) - results = {} + # Build the exts data structure: + # exts is a dict that maps file extensions to a list of file_info + # dicts for the files having that extension. The file_info dicts are of + # the form returned by parse_on_disk_filename, with the filename added. + # Each list is sorted in reverse timestamp order. + # + # The exts dict will be modified during subsequent processing as files + # are removed to be discarded or ignored. + exts = defaultdict(list) for afile in files: - ts_file = results.get('.ts') - data_file = results.get('.data') - if not include_obsolete: - assert ts_file is None, "On-disk file search loop" \ - " continuing after tombstone, %s, encountered" % ts_file - assert data_file is None, "On-disk file search loop" \ - " continuing after data file, %s, encountered" % data_file + # Categorize files by extension + try: + file_info = self.parse_on_disk_filename(afile) + file_info['filename'] = afile + exts[file_info['ext']].append(file_info) + except DiskFileError as e: + self.logger.warning('Unexpected file %s: %s' % + (os.path.join(datadir or '', afile), e)) + for ext in exts: + # For each extension sort files into reverse chronological order. + exts[ext] = sorted( + exts[ext], key=lambda info: info['timestamp'], reverse=True) - ext = splitext(afile)[1] - if self._gather_on_disk_file( - afile, ext, results, **kwargs): - if not include_obsolete: - break + # the results dict is used to collect results of file filtering + results = {} + + # non-tombstones older than or equal to latest tombstone are obsolete + if exts.get('.ts'): + for ext in filter(lambda ext: ext != '.ts', exts.keys()): + exts[ext], older = self._split_gt_timestamp( + exts[ext], exts['.ts'][0]['timestamp']) + results.setdefault('obsolete', []).extend(older) + + # all but most recent .meta and .ts are obsolete + for ext in ('.meta', '.ts'): + if ext in exts: + results.setdefault('obsolete', []).extend(exts[ext][1:]) + exts[ext] = exts[ext][:1] + + # delegate to subclass handler + self._process_ondisk_files(exts, results, **kwargs) + + # set final choice of files + if exts.get('.ts'): + results['ts_info'] = exts['.ts'][0] + if 'data_info' in results and exts.get('.meta'): + # only report a meta file if there is a data file + results['meta_info'] = exts['.meta'][0] + + # set ts_file, data_file and meta_file with path to chosen file or None + for info_key in ('data_info', 'meta_info', 'ts_info'): + info = results.get(info_key) + key = info_key[:-5] + '_file' + results[key] = join(datadir, info['filename']) if info else None if verify: - assert self._verify_on_disk_files( + assert self._verify_ondisk_files( results, **kwargs), \ "On-disk file search algorithm contract is broken: %s" \ - % results.values() + % str(results) + return results - def get_ondisk_files(self, files, datadir, **kwargs): - """ - Given a simple list of files names, determine the files to use. - - :param files: simple set of files as a python list - :param datadir: directory name files are from for convenience - :returns: dict of files to use having keys 'data_file', 'ts_file', - 'meta_file' and optionally other policy specific keys - """ - file_info = self.gather_ondisk_files(files, verify=True, **kwargs) - for ext in ('.data', '.meta', '.ts'): - filename = file_info.get(ext) - key = '%s_file' % ext[1:] - file_info[key] = join(datadir, filename) if filename else None - return file_info - def cleanup_ondisk_files(self, hsh_path, reclaim_age=ONE_WEEK, **kwargs): """ Clean up on-disk files that are obsolete and gather the set of valid @@ -560,27 +643,24 @@ class BaseDiskFileManager(object): key 'obsolete'; a list of files remaining in the directory, reverse sorted, stored under the key 'files'. """ - def is_reclaimable(filename): - timestamp = self.parse_on_disk_filename(filename)['timestamp'] + def is_reclaimable(timestamp): return (time.time() - float(timestamp)) > reclaim_age files = listdir(hsh_path) files.sort(reverse=True) - results = self.gather_ondisk_files(files, include_obsolete=True, - **kwargs) - # TODO ref to durables here - if '.durable' in results and not results.get('fragments'): - # a .durable with no .data is deleted as soon as it is found - results.setdefault('obsolete', []).append(results.pop('.durable')) - if '.ts' in results and is_reclaimable(results['.ts']): - results.setdefault('obsolete', []).append(results.pop('.ts')) - for filename in results.get('fragments_without_durable', []): + results = self.get_ondisk_files( + files, hsh_path, verify=False, **kwargs) + if 'ts_info' in results and is_reclaimable( + results['ts_info']['timestamp']): + remove_file(join(hsh_path, results['ts_info']['filename'])) + files.remove(results.pop('ts_info')['filename']) + for file_info in results.get('possible_reclaim', []): # stray fragments are not deleted until reclaim-age - if is_reclaimable(filename): - results.setdefault('obsolete', []).append(filename) - for filename in results.get('obsolete', []): - remove_file(join(hsh_path, filename)) - files.remove(filename) + if is_reclaimable(file_info['timestamp']): + results.setdefault('obsolete', []).append(file_info) + for file_info in results.get('obsolete', []): + remove_file(join(hsh_path, file_info['filename'])) + files.remove(file_info['filename']) results['files'] = files return results @@ -915,9 +995,9 @@ class BaseDiskFileManager(object): (os.path.join(partition_path, suffix), suffix) for suffix in suffixes) key_preference = ( - ('ts_meta', '.meta'), - ('ts_data', '.data'), - ('ts_data', '.ts'), + ('ts_meta', 'meta_info'), + ('ts_data', 'data_info'), + ('ts_data', 'ts_info'), ) for suffix_path, suffix in suffixes: for object_hash in self._listdir(suffix_path): @@ -926,14 +1006,13 @@ class BaseDiskFileManager(object): results = self.cleanup_ondisk_files( object_path, self.reclaim_age, **kwargs) timestamps = {} - for ts_key, ext in key_preference: - if ext not in results: + for ts_key, info_key in key_preference: + if info_key not in results: continue - timestamps[ts_key] = self.parse_on_disk_filename( - results[ext])['timestamp'] + timestamps[ts_key] = results[info_key]['timestamp'] if 'ts_data' not in timestamps: # file sets that do not include a .data or .ts - # file can not be opened and therefore can not + # file cannot be opened and therefore cannot # be ssync'd continue yield (object_path, object_hash, timestamps) @@ -1430,6 +1509,7 @@ class BaseDiskFile(object): self._obj = None self._datadir = None self._tmpdir = join(device_path, get_tmp_dir(policy)) + self._ondisk_info = None self._metadata = None self._datafile_metadata = None self._metafile_metadata = None @@ -1479,6 +1559,26 @@ class BaseDiskFile(object): raise DiskFileNotOpen() return Timestamp(self._datafile_metadata.get('X-Timestamp')) + @property + def durable_timestamp(self): + """ + Provides the timestamp of the newest data file found in the object + directory. + + :return: A Timestamp instance, or None if no data file was found. + :raises DiskFileNotOpen: if the open() method has not been previously + called on this instance. + """ + if self._ondisk_info is None: + raise DiskFileNotOpen() + if self._datafile_metadata: + return Timestamp(self._datafile_metadata.get('X-Timestamp')) + return None + + @property + def fragments(self): + return None + @classmethod def from_hash_dir(cls, mgr, hash_dir_path, device_path, partition, policy): return cls(mgr, device_path, None, partition, _datadir=hash_dir_path, @@ -1524,8 +1624,8 @@ class BaseDiskFile(object): # The data directory does not exist, so the object cannot exist. files = [] - # gather info about the valid files to us to open the DiskFile - file_info = self._get_ondisk_file(files) + # gather info about the valid files to use to open the DiskFile + file_info = self._get_ondisk_files(files) self._data_file = file_info.get('data_file') if not self._data_file: @@ -1579,7 +1679,7 @@ class BaseDiskFile(object): self._logger.increment('quarantines') return DiskFileQuarantined(msg) - def _get_ondisk_file(self, files): + def _get_ondisk_files(self, files): """ Determine the on-disk files to use. @@ -1950,8 +2050,9 @@ class DiskFile(BaseDiskFile): reader_cls = DiskFileReader writer_cls = DiskFileWriter - def _get_ondisk_file(self, files): - return self.manager.get_ondisk_files(files, self._datadir) + def _get_ondisk_files(self, files): + self._ondisk_info = self.manager.get_ondisk_files(files, self._datadir) + return self._ondisk_info @DiskFileRouter.register(REPL_POLICY) @@ -1967,89 +2068,44 @@ class DiskFileManager(BaseDiskFileManager): * timestamp is a :class:`~swift.common.utils.Timestamp` * ext is a string, the file extension including the leading dot or - the empty string if the filename has no extenstion. + the empty string if the filename has no extension. :raises DiskFileError: if any part of the filename is not able to be validated. """ - filename, ext = splitext(filename) + float_part, ext = splitext(filename) + try: + timestamp = Timestamp(float_part) + except ValueError: + raise DiskFileError('Invalid Timestamp value in filename %r' + % filename) return { - 'timestamp': Timestamp(filename), + 'timestamp': timestamp, 'ext': ext, } - def _gather_on_disk_file(self, filename, ext, context, frag_index=None, - **kwargs): + def _process_ondisk_files(self, exts, results, **kwargs): """ - Called by gather_ondisk_files() for each file in an object - datadir in reverse sorted order. If a file is considered part of a - valid on-disk file set it will be added to the context dict, keyed by - its extension. If a file is considered to be obsolete it will be added - to a list stored under the key 'obsolete' in the context dict. + Implement replication policy specific handling of .data files. - :param filename: name of file to be accepted or not - :param ext: extension part of filename - :param context: a context dict that may have been populated by previous - calls to this method - :returns: True if a valid file set has been found, False otherwise + :param exts: dict of lists of file info, keyed by extension + :param results: a dict that may be updated with results """ + if exts.get('.data'): + for ext in exts.keys(): + if ext == '.data': + # older .data's are obsolete + exts[ext], obsolete = self._split_gte_timestamp( + exts[ext], exts['.data'][0]['timestamp']) + else: + # other files at same or older timestamp as most recent + # data are obsolete + exts[ext], obsolete = self._split_gt_timestamp( + exts[ext], exts['.data'][0]['timestamp']) + results.setdefault('obsolete', []).extend(obsolete) - # if first file with given extension then add filename to context - # dict and return True - accept_first = lambda: context.setdefault(ext, filename) == filename - # add the filename to the list of obsolete files in context dict - discard = lambda: context.setdefault('obsolete', []).append(filename) - # set a flag in the context dict indicating that a valid fileset has - # been found - set_valid_fileset = lambda: context.setdefault('found_valid', True) - # return True if the valid fileset flag is set in the context dict - have_valid_fileset = lambda: context.get('found_valid') - - if ext == '.data': - if have_valid_fileset(): - # valid fileset means we must have a newer - # .data or .ts, so discard the older .data file - discard() - else: - accept_first() - set_valid_fileset() - elif ext == '.ts': - if have_valid_fileset() or not accept_first(): - # newer .data or .ts already found so discard this - discard() - if not have_valid_fileset(): - # remove any .meta that may have been previously found - context.pop('.meta', None) - set_valid_fileset() - elif ext == '.meta': - if have_valid_fileset() or not accept_first(): - # newer .data, .durable or .ts already found so discard this - discard() - else: - # ignore unexpected files - pass - return have_valid_fileset() - - def _verify_on_disk_files(self, accepted_files, **kwargs): - """ - Verify that the final combination of on disk files complies with the - replicated diskfile contract. - - :param accepted_files: files that have been found and accepted - :returns: True if the file combination is compliant, False otherwise - """ - # mimic legacy behavior - .meta is ignored when .ts is found - if accepted_files.get('.ts'): - accepted_files.pop('.meta', None) - - data_file, meta_file, ts_file, durable_file = tuple( - [accepted_files.get(ext) - for ext in ('.data', '.meta', '.ts', '.durable')]) - - return ((data_file is None and meta_file is None and ts_file is None) - or (ts_file is not None and data_file is None - and meta_file is None) - or (data_file is not None and ts_file is None)) + # set results + results['data_info'] = exts['.data'][0] def _hash_suffix(self, path, reclaim_age): """ @@ -2153,14 +2209,47 @@ class ECDiskFile(BaseDiskFile): if frag_index is not None: self._frag_index = self.manager.validate_fragment_index(frag_index) - def _get_ondisk_file(self, files): + @property + def durable_timestamp(self): + """ + Provides the timestamp of the newest durable file found in the object + directory. + + :return: A Timestamp instance, or None if no durable file was found. + :raises DiskFileNotOpen: if the open() method has not been previously + called on this instance. + """ + if self._ondisk_info is None: + raise DiskFileNotOpen() + if self._ondisk_info.get('durable_frag_set'): + return self._ondisk_info['durable_frag_set'][0]['timestamp'] + return None + + @property + def fragments(self): + """ + Provides information about all fragments that were found in the object + directory, including fragments without a matching durable file, and + including any fragment chosen to construct the opened diskfile. + + :return: A dict mapping -> , + or None if the diskfile has not been opened or no fragments + were found. + """ + if self._ondisk_info: + frag_sets = self._ondisk_info['frag_sets'] + return dict([(ts, [info['frag_index'] for info in frag_set]) + for ts, frag_set in frag_sets.items()]) + + def _get_ondisk_files(self, files): """ The only difference between this method and the replication policy DiskFile method is passing in the frag_index kwarg to our manager's get_ondisk_files method. """ - return self.manager.get_ondisk_files( + self._ondisk_info = self.manager.get_ondisk_files( files, self._datadir, frag_index=self._frag_index) + return self._ondisk_info def purge(self, timestamp, frag_index): """ @@ -2254,9 +2343,13 @@ class ECDiskFileManager(BaseDiskFileManager): validated. """ frag_index = None - filename, ext = splitext(filename) - parts = filename.split('#', 1) - timestamp = parts[0] + float_frag, ext = splitext(filename) + parts = float_frag.split('#', 1) + try: + timestamp = Timestamp(parts[0]) + except ValueError: + raise DiskFileError('Invalid Timestamp value in filename %r' + % filename) if ext == '.data': # it is an error for an EC data file to not have a valid # fragment index @@ -2267,137 +2360,94 @@ class ECDiskFileManager(BaseDiskFileManager): pass frag_index = self.validate_fragment_index(frag_index) return { - 'timestamp': Timestamp(timestamp), + 'timestamp': timestamp, 'frag_index': frag_index, 'ext': ext, } - def is_obsolete(self, filename, other_filename): + def _process_ondisk_files(self, exts, results, frag_index=None, **kwargs): """ - Test if a given file is considered to be obsolete with respect to - another file in an object storage dir. + Implement EC policy specific handling of .data and .durable files. - Implements EC policy specific behavior when comparing files against a - .durable file. - - A simple string comparison would consider t2#1.data to be older than - t2.durable (since t2#1.data < t2.durable). By stripping off the file - extensions we get the desired behavior: t2#1 > t2 without compromising - the detection of t1#1 < t2. - - :param filename: a string representing an absolute filename - :param other_filename: a string representing an absolute filename - :returns: True if filename is considered obsolete, False otherwise. - """ - if other_filename.endswith('.durable'): - return splitext(filename)[0] < splitext(other_filename)[0] - return filename < other_filename - - def _gather_on_disk_file(self, filename, ext, context, frag_index=None, - **kwargs): - """ - Called by gather_ondisk_files() for each file in an object - datadir in reverse sorted order. If a file is considered part of a - valid on-disk file set it will be added to the context dict, keyed by - its extension. If a file is considered to be obsolete it will be added - to a list stored under the key 'obsolete' in the context dict. - - :param filename: name of file to be accepted or not - :param ext: extension part of filename - :param context: a context dict that may have been populated by previous - calls to this method + :param exts: dict of lists of file info, keyed by extension + :param results: a dict that may be updated with results :param frag_index: if set, search for a specific fragment index .data file, otherwise accept the first valid .data file. - :returns: True if a valid file set has been found, False otherwise """ + durable_info = None + if exts.get('.durable'): + durable_info = exts['.durable'][0] + # Mark everything older than most recent .durable as obsolete + # and remove from the exts dict. + for ext in exts.keys(): + exts[ext], older = self._split_gte_timestamp( + exts[ext], durable_info['timestamp']) + results.setdefault('obsolete', []).extend(older) - # if first file with given extension then add filename to context - # dict and return True - accept_first = lambda: context.setdefault(ext, filename) == filename - # add the filename to the list of obsolete files in context dict - discard = lambda: context.setdefault('obsolete', []).append(filename) - # set a flag in the context dict indicating that a valid fileset has - # been found - set_valid_fileset = lambda: context.setdefault('found_valid', True) - # return True if the valid fileset flag is set in the context dict - have_valid_fileset = lambda: context.get('found_valid') + # Split the list of .data files into sets of frags having the same + # timestamp, identifying the durable and newest sets (if any) as we go. + # To do this we can take advantage of the list of .data files being + # reverse-time ordered. Keep the resulting per-timestamp frag sets in + # a frag_sets dict mapping a Timestamp instance -> frag_set. + all_frags = exts.get('.data') + frag_sets = {} + durable_frag_set = None + while all_frags: + frag_set, all_frags = self._split_gte_timestamp( + all_frags, all_frags[0]['timestamp']) + # sort the frag set into ascending frag_index order + frag_set.sort(key=lambda info: info['frag_index']) + timestamp = frag_set[0]['timestamp'] + frag_sets[timestamp] = frag_set + if durable_info and durable_info['timestamp'] == timestamp: + durable_frag_set = frag_set - if context.get('.durable'): - # a .durable file has been found - if ext == '.data': - if self.is_obsolete(filename, context.get('.durable')): - # this and remaining data files are older than durable - discard() - set_valid_fileset() - else: - # accept the first .data file if it matches requested - # frag_index, or if no specific frag_index is requested - fi = self.parse_on_disk_filename(filename)['frag_index'] - if frag_index is None or frag_index == int(fi): - accept_first() - set_valid_fileset() - # else: keep searching for a .data file to match frag_index - context.setdefault('fragments', []).append(filename) + # Select a single chosen frag from the chosen frag_set, by either + # matching against a specified frag_index or taking the highest index. + chosen_frag = None + if durable_frag_set: + if frag_index is not None: + # search the frag set to find the exact frag_index + for info in durable_frag_set: + if info['frag_index'] == frag_index: + chosen_frag = info + break else: - # there can no longer be a matching .data file so mark what has - # been found so far as the valid fileset - discard() - set_valid_fileset() - elif ext == '.data': - # not yet found a .durable - if have_valid_fileset(): - # valid fileset means we must have a newer - # .ts, so discard the older .data file - discard() - else: - # .data newer than a .durable or .ts, don't discard yet - context.setdefault('fragments_without_durable', []).append( - filename) - elif ext == '.ts': - if have_valid_fileset() or not accept_first(): - # newer .data, .durable or .ts already found so discard this - discard() - if not have_valid_fileset(): - # remove any .meta that may have been previously found - context.pop('.meta', None) - set_valid_fileset() - elif ext in ('.meta', '.durable'): - if have_valid_fileset() or not accept_first(): - # newer .data, .durable or .ts already found so discard this - discard() - else: - # ignore unexpected files - pass - return have_valid_fileset() + chosen_frag = durable_frag_set[-1] - def _verify_on_disk_files(self, accepted_files, frag_index=None, **kwargs): + # If we successfully found a frag then set results + if chosen_frag: + results['data_info'] = chosen_frag + results['durable_frag_set'] = durable_frag_set + results['frag_sets'] = frag_sets + + # Mark any isolated .durable as obsolete + if exts.get('.durable') and not durable_frag_set: + results.setdefault('obsolete', []).extend(exts['.durable']) + exts.pop('.durable') + + # Fragments *may* be ready for reclaim, unless they are durable or + # at the timestamp we have just chosen for constructing the diskfile. + for frag_set in frag_sets.values(): + if frag_set == durable_frag_set: + continue + results.setdefault('possible_reclaim', []).extend(frag_set) + + def _verify_ondisk_files(self, results, frag_index=None, **kwargs): """ Verify that the final combination of on disk files complies with the erasure-coded diskfile contract. - :param accepted_files: files that have been found and accepted + :param results: files that have been found and accepted :param frag_index: specifies a specific fragment index .data file :returns: True if the file combination is compliant, False otherwise """ - if not accepted_files.get('.data'): - # We may find only a .meta, which doesn't mean the on disk - # contract is broken. So we clear it to comply with - # superclass assertions. - accepted_files.pop('.meta', None) - - data_file, meta_file, ts_file, durable_file = tuple( - [accepted_files.get(ext) - for ext in ('.data', '.meta', '.ts', '.durable')]) - - return ((data_file is None or durable_file is not None) - and (data_file is None and meta_file is None - and ts_file is None and durable_file is None) - or (ts_file is not None and data_file is None - and meta_file is None and durable_file is None) - or (data_file is not None and durable_file is not None - and ts_file is None) - or (durable_file is not None and meta_file is None - and ts_file is None)) + if super(ECDiskFileManager, self)._verify_ondisk_files( + results, **kwargs): + have_data_file = results['data_file'] is not None + have_durable = results.get('durable_frag_set') is not None + return have_data_file == have_durable + return False def _hash_suffix(self, path, reclaim_age): """ @@ -2412,12 +2462,12 @@ class ECDiskFileManager(BaseDiskFileManager): # here we flatten out the hashers hexdigest into a dictionary instead # of just returning the one hexdigest for the whole suffix def mapper(filename): - info = self.parse_on_disk_filename(filename) - fi = info['frag_index'] - if fi is None: - return None, filename - else: - return fi, info['timestamp'].internal + info = self.parse_on_disk_filename(filename) + fi = info['frag_index'] + if fi is None: + return None, filename + else: + return fi, info['timestamp'].internal hash_per_fi = self._hash_suffix_dir(path, mapper, reclaim_age) return dict((fi, md5.hexdigest()) for fi, md5 in hash_per_fi.items()) diff --git a/swift/obj/mem_diskfile.py b/swift/obj/mem_diskfile.py index 277a9f1faf..fe1dc5e496 100644 --- a/swift/obj/mem_diskfile.py +++ b/swift/obj/mem_diskfile.py @@ -254,6 +254,7 @@ class DiskFile(object): self._metadata = None self._fp = None self._filesystem = fs + self.fragments = None def open(self): """ @@ -421,3 +422,5 @@ class DiskFile(object): return Timestamp(self._metadata.get('X-Timestamp')) data_timestamp = timestamp + + durable_timestamp = timestamp diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index 445c462ac1..36393dab25 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -779,6 +779,15 @@ class TestTimestamp(unittest.TestCase): self.assertEqual( sorted([t.internal for t in timestamps]), expected) + def test_hashable(self): + ts_0 = utils.Timestamp('1402444821.72589') + ts_0_also = utils.Timestamp('1402444821.72589') + self.assertEqual(ts_0, ts_0_also) # sanity + self.assertEqual(hash(ts_0), hash(ts_0_also)) + d = {ts_0: 'whatever'} + self.assertIn(ts_0, d) # sanity + self.assertIn(ts_0_also, d) + class TestUtils(unittest.TestCase): """Tests for swift.common.utils """ diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py index 991f38a496..534882bec3 100644 --- a/test/unit/obj/test_diskfile.py +++ b/test/unit/obj/test_diskfile.py @@ -20,6 +20,7 @@ import six.moves.cPickle as pickle import os import errno import itertools +from unittest.util import safe_repr import mock import unittest import email @@ -462,6 +463,35 @@ class BaseDiskFileTestMixin(object): return '.'.join([ mgr_cls.__module__, mgr_cls.__name__, manager_attribute_name]) + def _assertDictContainsSubset(self, subset, dictionary, msg=None): + """Checks whether dictionary is a superset of subset.""" + # This is almost identical to the method in python3.4 version of + # unitest.case.TestCase.assertDictContainsSubset, reproduced here to + # avoid the deprecation warning in the original when using python3. + missing = [] + mismatched = [] + for key, value in subset.items(): + if key not in dictionary: + missing.append(key) + elif value != dictionary[key]: + mismatched.append('%s, expected: %s, actual: %s' % + (safe_repr(key), safe_repr(value), + safe_repr(dictionary[key]))) + + if not (missing or mismatched): + return + + standardMsg = '' + if missing: + standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in + missing) + if mismatched: + if standardMsg: + standardMsg += '; ' + standardMsg += 'Mismatched values: %s' % ','.join(mismatched) + + self.fail(self._formatMessage(msg, standardMsg)) + class DiskFileManagerMixin(BaseDiskFileTestMixin): """ @@ -516,8 +546,8 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): for _order in ('ordered', 'shuffled', 'shuffled'): class_under_test = self._get_diskfile(policy, frag_index) try: - actual = class_under_test._get_ondisk_file(files) - self.assertDictContainsSubset( + actual = class_under_test._get_ondisk_files(files) + self._assertDictContainsSubset( expected, actual, 'Expected %s from %s but got %s' % (expected, files, actual)) @@ -593,14 +623,38 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): df_mgr = self.df_router[policy] datadir = os.path.join('/srv/node/sdb1/', diskfile.get_data_dir(policy)) - self.assertEqual(expected, df_mgr.get_ondisk_files( - files, datadir)) + actual = df_mgr.get_ondisk_files(files, datadir) + self._assertDictContainsSubset(expected, actual) # check diskfile under the hood df = self._get_diskfile(policy, frag_index=frag_index) - self.assertEqual(expected, df._get_ondisk_file(files)) + actual = df._get_ondisk_files(files) + self._assertDictContainsSubset(expected, actual) # check diskfile open self.assertRaises(DiskFileNotExist, df.open) + def test_get_ondisk_files_with_unexpected_file(self): + unexpected_files = ['junk', 'junk.data', '.junk'] + timestamp = next(make_timestamp_iter()) + tomb_file = timestamp.internal + '.ts' + for policy in POLICIES: + for unexpected in unexpected_files: + files = [unexpected, tomb_file] + df_mgr = self.df_router[policy] + df_mgr.logger = FakeLogger() + datadir = os.path.join('/srv/node/sdb1/', + diskfile.get_data_dir(policy)) + + results = df_mgr.get_ondisk_files(files, datadir) + + expected = {'ts_file': os.path.join(datadir, tomb_file)} + self._assertDictContainsSubset(expected, results) + + log_lines = df_mgr.logger.get_lines_for_level('warning') + self.assertTrue( + log_lines[0].startswith( + 'Unexpected file %s' + % os.path.join(datadir, unexpected))) + def test_construct_dev_path(self): res_path = self.df_mgr.construct_dev_path('abc') self.assertEqual(os.path.join(self.df_mgr.devices, 'abc'), res_path) @@ -1014,14 +1068,59 @@ class TestDiskFileManager(DiskFileManagerMixin, unittest.TestCase): self._test_yield_hashes_cleanup(scenarios, POLICIES[0]) def test_get_ondisk_files_with_stray_meta(self): - # get_ondisk_files does not tolerate a stray .meta file + # get_ondisk_files ignores a stray .meta file class_under_test = self._get_diskfile(POLICIES[0]) files = ['0000000007.00000.meta'] - self.assertRaises(AssertionError, - class_under_test.manager.get_ondisk_files, files, - self.testdir) + with mock.patch('swift.obj.diskfile.os.listdir', lambda *args: files): + self.assertRaises(DiskFileNotExist, class_under_test.open) + + def test_verify_ondisk_files(self): + # ._verify_ondisk_files should only return False if get_ondisk_files + # has produced a bad set of files due to a bug, so to test it we need + # to probe it directly. + mgr = self.df_router[POLICIES.default] + ok_scenarios = ( + {'ts_file': None, 'data_file': None, 'meta_file': None}, + {'ts_file': None, 'data_file': 'a_file', 'meta_file': None}, + {'ts_file': None, 'data_file': 'a_file', 'meta_file': 'a_file'}, + {'ts_file': 'a_file', 'data_file': None, 'meta_file': None}, + ) + + for scenario in ok_scenarios: + self.assertTrue(mgr._verify_ondisk_files(scenario), + 'Unexpected result for scenario %s' % scenario) + + # construct every possible invalid combination of results + vals = (None, 'a_file') + for ts_file, data_file, meta_file in [ + (a, b, c) for a in vals for b in vals for c in vals]: + scenario = { + 'ts_file': ts_file, + 'data_file': data_file, + 'meta_file': meta_file} + if scenario in ok_scenarios: + continue + self.assertFalse(mgr._verify_ondisk_files(scenario), + 'Unexpected result for scenario %s' % scenario) + + def test_parse_on_disk_filename(self): + mgr = self.df_router[POLICIES.default] + for ts in (Timestamp('1234567890.00001'), + Timestamp('1234567890.00001', offset=17)): + for ext in ('.meta', '.data', '.ts'): + fname = '%s%s' % (ts.internal, ext) + info = mgr.parse_on_disk_filename(fname) + self.assertEqual(ts, info['timestamp']) + self.assertEqual(ext, info['ext']) + + def test_parse_on_disk_filename_errors(self): + mgr = self.df_router[POLICIES.default] + with self.assertRaises(DiskFileError) as cm: + mgr.parse_on_disk_filename('junk') + self.assertEqual("Invalid Timestamp value in filename 'junk'", + str(cm.exception)) def test_hash_cleanup_listdir_reclaim(self): # Each scenario specifies a list of (filename, extension, [survives]) @@ -1187,6 +1286,10 @@ class TestECDiskFileManager(DiskFileManagerMixin, unittest.TestCase): # data with no durable is ignored [('0000000007.00000#0.data', False, True)], + # data newer than tombstone with no durable is ignored + [('0000000007.00000#0.data', False, True), + ('0000000006.00000.ts', '.ts', True)], + # data newer than durable is ignored [('0000000008.00000#1.data', False, True), ('0000000007.00000.durable', '.durable'), @@ -1365,7 +1468,7 @@ class TestECDiskFileManager(DiskFileManagerMixin, unittest.TestCase): reclaim_age=1000) def test_get_ondisk_files_with_stray_meta(self): - # get_ondisk_files does not tolerate a stray .meta file + # get_ondisk_files ignores a stray .meta file class_under_test = self._get_diskfile(POLICIES.default) @contextmanager @@ -1408,6 +1511,41 @@ class TestECDiskFileManager(DiskFileManagerMixin, unittest.TestCase): self.fail('expected DiskFileNotExist opening %s with %r' % ( class_under_test.__class__.__name__, files)) + def test_verify_ondisk_files(self): + # _verify_ondisk_files should only return False if get_ondisk_files + # has produced a bad set of files due to a bug, so to test it we need + # to probe it directly. + mgr = self.df_router[POLICIES.default] + ok_scenarios = ( + {'ts_file': None, 'data_file': None, 'meta_file': None, + 'durable_frag_set': None}, + {'ts_file': None, 'data_file': 'a_file', 'meta_file': None, + 'durable_frag_set': ['a_file']}, + {'ts_file': None, 'data_file': 'a_file', 'meta_file': 'a_file', + 'durable_frag_set': ['a_file']}, + {'ts_file': 'a_file', 'data_file': None, 'meta_file': None, + 'durable_frag_set': None}, + ) + + for scenario in ok_scenarios: + self.assertTrue(mgr._verify_ondisk_files(scenario), + 'Unexpected result for scenario %s' % scenario) + + # construct every possible invalid combination of results + vals = (None, 'a_file') + for ts_file, data_file, meta_file, durable_frag in [ + (a, b, c, d) + for a in vals for b in vals for c in vals for d in vals]: + scenario = { + 'ts_file': ts_file, + 'data_file': data_file, + 'meta_file': meta_file, + 'durable_frag_set': [durable_frag] if durable_frag else None} + if scenario in ok_scenarios: + continue + self.assertFalse(mgr._verify_ondisk_files(scenario), + 'Unexpected result for scenario %s' % scenario) + def test_parse_on_disk_filename(self): mgr = self.df_router[POLICIES.default] for ts in (Timestamp('1234567890.00001'), @@ -1416,6 +1554,7 @@ class TestECDiskFileManager(DiskFileManagerMixin, unittest.TestCase): fname = '%s#%s.data' % (ts.internal, frag) info = mgr.parse_on_disk_filename(fname) self.assertEqual(ts, info['timestamp']) + self.assertEqual('.data', info['ext']) self.assertEqual(frag, info['frag_index']) self.assertEqual(mgr.make_on_disk_filename(**info), fname) @@ -1423,6 +1562,7 @@ class TestECDiskFileManager(DiskFileManagerMixin, unittest.TestCase): fname = '%s%s' % (ts.internal, ext) info = mgr.parse_on_disk_filename(fname) self.assertEqual(ts, info['timestamp']) + self.assertEqual(ext, info['ext']) self.assertEqual(None, info['frag_index']) self.assertEqual(mgr.make_on_disk_filename(**info), fname) @@ -1431,12 +1571,9 @@ class TestECDiskFileManager(DiskFileManagerMixin, unittest.TestCase): for ts in (Timestamp('1234567890.00001'), Timestamp('1234567890.00001', offset=17)): fname = '%s.data' % ts.internal - try: + with self.assertRaises(DiskFileError) as cm: mgr.parse_on_disk_filename(fname) - msg = 'Expected DiskFileError for filename %s' % fname - self.fail(msg) - except DiskFileError: - pass + self.assertTrue(str(cm.exception).startswith("Bad fragment index")) expected = { '': 'bad', @@ -1451,13 +1588,14 @@ class TestECDiskFileManager(DiskFileManagerMixin, unittest.TestCase): for frag, msg in expected.items(): fname = '%s#%s.data' % (ts.internal, frag) - try: + with self.assertRaises(DiskFileError) as cm: mgr.parse_on_disk_filename(fname) - except DiskFileError as e: - self.assertTrue(msg in str(e).lower()) - else: - msg = 'Expected DiskFileError for filename %s' % fname - self.fail(msg) + self.assertTrue(msg in str(cm.exception).lower()) + + with self.assertRaises(DiskFileError) as cm: + mgr.parse_on_disk_filename('junk') + self.assertEqual("Invalid Timestamp value in filename 'junk'", + str(cm.exception)) def test_make_on_disk_filename(self): mgr = self.df_router[POLICIES.default] @@ -1524,34 +1662,6 @@ class TestECDiskFileManager(DiskFileManagerMixin, unittest.TestCase): actual = mgr.make_on_disk_filename(ts, ext, frag_index=frag) self.assertEqual(expected, actual) - def test_is_obsolete(self): - mgr = self.df_router[POLICIES.default] - for ts in (Timestamp('1234567890.00001'), - Timestamp('1234567890.00001', offset=17)): - for ts2 in (Timestamp('1234567890.99999'), - Timestamp('1234567890.99999', offset=17), - ts): - f_2 = mgr.make_on_disk_filename(ts, '.durable') - for fi in (0, 2): - for ext in ('.data', '.meta', '.durable', '.ts'): - f_1 = mgr.make_on_disk_filename( - ts2, ext, frag_index=fi) - self.assertFalse(mgr.is_obsolete(f_1, f_2), - '%s should not be obsolete w.r.t. %s' - % (f_1, f_2)) - - for ts2 in (Timestamp('1234567890.00000'), - Timestamp('1234500000.00000', offset=0), - Timestamp('1234500000.00000', offset=17)): - f_2 = mgr.make_on_disk_filename(ts, '.durable') - for fi in (0, 2): - for ext in ('.data', '.meta', '.durable', '.ts'): - f_1 = mgr.make_on_disk_filename( - ts2, ext, frag_index=fi) - self.assertTrue(mgr.is_obsolete(f_1, f_2), - '%s should not be w.r.t. %s' - % (f_1, f_2)) - def test_yield_hashes(self): old_ts = '1383180000.12345' fresh_ts = Timestamp(time() - 10).internal @@ -1724,6 +1834,7 @@ class TestECDiskFileManager(DiskFileManagerMixin, unittest.TestCase): # missing frag index '9444a92d072897b136b3fc06595b7456': [ ts1.internal + '.data'], + # junk '9555a92d072897b136b3fc06595b8456': [ 'junk_file'], # missing .durable @@ -1733,6 +1844,7 @@ class TestECDiskFileManager(DiskFileManagerMixin, unittest.TestCase): # .meta files w/o .data files can't be opened, and are ignored '9777a92d072897b136b3fc06595ba456': [ ts1.internal + '.meta'], + # multiple meta files with no data '9888a92d072897b136b3fc06595bb456': [ ts1.internal + '.meta', ts2.internal + '.meta'], @@ -2259,12 +2371,13 @@ class DiskFileMixin(BaseDiskFileTestMixin): def _get_open_disk_file(self, invalid_type=None, obj_name='o', fsize=1024, csize=8, mark_deleted=False, prealloc=False, ts=None, mount_check=False, extra_metadata=None, - policy=None, frag_index=None): + policy=None, frag_index=None, data=None, + commit=True): '''returns a DiskFile''' policy = policy or POLICIES.legacy df = self._simple_get_diskfile(obj=obj_name, policy=policy, frag_index=frag_index) - data = '0' * fsize + data = data or '0' * fsize etag = md5() if ts: timestamp = Timestamp(ts) @@ -2304,7 +2417,8 @@ class DiskFileMixin(BaseDiskFileTestMixin): elif invalid_type == 'Bad-X-Delete-At': metadata['X-Delete-At'] = 'bad integer' diskfile.write_metadata(writer._fd, metadata) - writer.commit(timestamp) + if commit: + writer.commit(timestamp) if mark_deleted: df.delete(timestamp) @@ -3181,6 +3295,33 @@ class DiskFileMixin(BaseDiskFileTestMixin): with self.assertRaises(DiskFileNotOpen): df.data_timestamp + def test_durable_timestamp(self): + ts_1 = self.ts() + df = self._get_open_disk_file(ts=ts_1.internal) + with df.open(): + self.assertEqual(df.durable_timestamp, ts_1.internal) + # verify durable timestamp does not change when metadata is written + ts_2 = self.ts() + df.write_metadata({'X-Timestamp': ts_2.internal}) + with df.open(): + self.assertEqual(df.durable_timestamp, ts_1.internal) + + def test_durable_timestamp_not_open(self): + df = self._simple_get_diskfile() + with self.assertRaises(DiskFileNotOpen): + df.durable_timestamp + + def test_durable_timestamp_no_data_file(self): + df = self._get_open_disk_file(self.ts().internal) + for f in os.listdir(df._datadir): + if f.endswith('.data'): + os.unlink(os.path.join(df._datadir, f)) + df = self._simple_get_diskfile() + with self.assertRaises(DiskFileNotExist): + df.open() + # open() was attempted, but no data file so expect None + self.assertIsNone(df.durable_timestamp) + def test_error_in_hash_cleanup_listdir(self): def mock_hcl(*args, **kwargs): @@ -3914,6 +4055,72 @@ class TestECDiskFile(DiskFileMixin, unittest.TestCase): 'a', 'c', 'o', policy=policy) self.assertRaises(DiskFileNotExist, df.read_metadata) + def test_fragments(self): + ts_1 = self.ts() + self._get_open_disk_file(ts=ts_1.internal, frag_index=0) + df = self._get_open_disk_file(ts=ts_1.internal, frag_index=2) + self.assertEqual(df.fragments, {ts_1: [0, 2]}) + + # now add a newer datafile for frag index 3 but don't write a + # durable with it (so ignore the error when we try to open) + ts_2 = self.ts() + try: + df = self._get_open_disk_file(ts=ts_2.internal, frag_index=3, + commit=False) + except DiskFileNotExist: + pass + + # sanity check: should have 2* .data, .durable, .data + files = os.listdir(df._datadir) + self.assertEqual(4, len(files)) + with df.open(): + self.assertEqual(df.fragments, {ts_1: [0, 2], ts_2: [3]}) + + # verify frags available even if open fails e.g. if .durable missing + for f in filter(lambda f: f.endswith('.durable'), files): + os.remove(os.path.join(df._datadir, f)) + + self.assertRaises(DiskFileNotExist, df.open) + self.assertEqual(df.fragments, {ts_1: [0, 2], ts_2: [3]}) + + def test_fragments_not_open(self): + df = self._simple_get_diskfile() + self.assertIsNone(df.fragments) + + def test_durable_timestamp_no_durable_file(self): + try: + self._get_open_disk_file(self.ts().internal, commit=False) + except DiskFileNotExist: + pass + df = self._simple_get_diskfile() + with self.assertRaises(DiskFileNotExist): + df.open() + # open() was attempted, but no durable file so expect None + self.assertIsNone(df.durable_timestamp) + + def test_durable_timestamp_missing_frag_index(self): + ts1 = self.ts() + self._get_open_disk_file(ts=ts1.internal, frag_index=1) + df = self._simple_get_diskfile(frag_index=2) + with self.assertRaises(DiskFileNotExist): + df.open() + # open() was attempted, but no data file for frag index so expect None + self.assertIsNone(df.durable_timestamp) + + def test_durable_timestamp_newer_non_durable_data_file(self): + ts1 = self.ts() + self._get_open_disk_file(ts=ts1.internal) + ts2 = self.ts() + try: + self._get_open_disk_file(ts=ts2.internal, commit=False) + except DiskFileNotExist: + pass + df = self._simple_get_diskfile() + # sanity check - one .durable file, two .data files + self.assertEqual(3, len(os.listdir(df._datadir))) + df.open() + self.assertEqual(ts1, df.durable_timestamp) + @patch_policies(with_ec_default=True) class TestSuffixHashes(unittest.TestCase): @@ -4493,15 +4700,19 @@ class TestSuffixHashes(unittest.TestCase): filename += '#%s' % df._frag_index filename += suff open(os.path.join(df._datadir, filename), 'w').close() + meta_timestamp = Timestamp(now) + metadata_filename = meta_timestamp.internal + '.meta' + open(os.path.join(df._datadir, metadata_filename), 'w').close() + # call get_hashes and it should clean things up hashes = df_mgr.get_hashes('sda1', '0', [], policy) + data_filename = timestamp.internal if policy.policy_type == EC_POLICY: data_filename += '#%s' % df._frag_index data_filename += '.data' - metadata_filename = timestamp.internal + '.meta' - durable_filename = timestamp.internal + '.durable' if policy.policy_type == EC_POLICY: + durable_filename = timestamp.internal + '.durable' hasher = md5() hasher.update(metadata_filename) hasher.update(durable_filename) diff --git a/test/unit/obj/test_ssync_sender.py b/test/unit/obj/test_ssync_sender.py index 60c42855b9..b7286527fd 100644 --- a/test/unit/obj/test_ssync_sender.py +++ b/test/unit/obj/test_ssync_sender.py @@ -1499,15 +1499,16 @@ class TestSender(BaseTest): '%(body)s\r\n' % expected) def test_send_post(self): + ts_iter = make_timestamp_iter() # create .data file extra_metadata = {'X-Object-Meta-Foo': 'old_value', 'X-Object-Sysmeta-Test': 'test_sysmeta', 'Content-Type': 'test_content_type'} - ts_0 = next(make_timestamp_iter()) + ts_0 = next(ts_iter) df = self._make_open_diskfile(extra_metadata=extra_metadata, timestamp=ts_0) # create .meta file - ts_1 = next(make_timestamp_iter()) + ts_1 = next(ts_iter) newer_metadata = {'X-Object-Meta-Foo': 'new_value', 'X-Timestamp': ts_1.internal} df.write_metadata(newer_metadata) From 2f4b79233e30d42140bbc07059417443bf7a0757 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Tue, 15 Dec 2015 15:49:42 +0000 Subject: [PATCH 10/52] Minor cleanup of repeated identical test assertions assertDictContainsSubset is being called multiple times with same arguments in a loop. Since assertDictContainsSubset is deprecated form python 3.2, replace it with checks on individual key, value pairs. Change-Id: I7089487710147021f26bd77c36accf5751855d68 --- test/unit/proxy/controllers/test_base.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/unit/proxy/controllers/test_base.py b/test/unit/proxy/controllers/test_base.py index f865954fe8..85b3b305e2 100644 --- a/test/unit/proxy/controllers/test_base.py +++ b/test/unit/proxy/controllers/test_base.py @@ -725,7 +725,8 @@ class TestFuncs(unittest.TestCase): expected_headers = {'x-base-meta-size': '151M', 'connection': 'close'} for k, v in expected_headers.items(): - self.assertDictContainsSubset(expected_headers, dst_headers) + self.assertIn(k, dst_headers) + self.assertEqual(v, dst_headers[k]) self.assertEqual('', dst_headers['Referer']) def test_client_chunk_size(self): From 9d7f71d5754c8b45f8e7c6ab80202de09933afb8 Mon Sep 17 00:00:00 2001 From: Richard Hawkins Date: Fri, 7 Aug 2015 18:14:13 -0500 Subject: [PATCH 11/52] Modify functional tests to use ostestr/testr Defcore uses Tempest, which uses Test Repository. This change makes it easier for Defcore to pull functional tests from Swift and run them. Additionally, using testr allows tests to be run in parallel. Concurrency set to 1 for now, >1 causes failures for reasons that are still TBD. With switch to ostestr all the server logs are being sent to stdout which makes it completely unreadable. Suppressing the logs by default now with a flag to enable it if desired. Co-Authored-By: John Dickinson Co-Authored-By: Robert Collins Co-Authored-By: Matthew Oliver Co-Authored-By: Ganesh Maharaj Mahalingam Change-Id: I53ef4a116996a772cf1f3abc2eb0ad60047322d5 Related-Bug: 1177924 --- .functests | 6 ++-- .gitignore | 2 ++ .testr.conf | 6 ++++ test-requirements.txt | 1 + test/functional/__init__.py | 50 +++++++++++++++++++++++------ test/functional/test_account.py | 8 +++++ test/functional/test_container.py | 8 +++++ test/functional/test_object.py | 8 +++++ test/functional/tests.py | 8 +++++ test/unit/account/test_backend.py | 4 +-- test/unit/container/test_backend.py | 4 +-- tox.ini | 2 +- 12 files changed, 91 insertions(+), 16 deletions(-) create mode 100644 .testr.conf diff --git a/.functests b/.functests index 65a9ea191c..af989f50ff 100755 --- a/.functests +++ b/.functests @@ -1,9 +1,11 @@ #!/bin/bash SRC_DIR=$(python -c "import os; print os.path.dirname(os.path.realpath('$0'))") +set -e -cd ${SRC_DIR}/test/functional -nosetests --exe $@ +cd ${SRC_DIR} +export TESTS_DIR=${SRC_DIR}/test/functional +ostestr --serial --pretty $@ rvalue=$? cd - diff --git a/.gitignore b/.gitignore index 9c4f1c6b10..580518daac 100644 --- a/.gitignore +++ b/.gitignore @@ -15,4 +15,6 @@ pycscope.* .idea MANIFEST +.testrepository/* +subunit.log test/probe/.noseids diff --git a/.testr.conf b/.testr.conf new file mode 100644 index 0000000000..293e07d22d --- /dev/null +++ b/.testr.conf @@ -0,0 +1,6 @@ +[DEFAULT] +test_command=SWIFT_TEST_DEBUG_LOGS=${SWIFT_TEST_DEBUG_LOGS} \ + ${PYTHON:-python} -m subunit.run \ + discover -t ./ ${TESTS_DIR:-./test/functional/} $LISTOPT $IDOPTION +test_id_option=--load-list $IDFILE +test_list_option=--list diff --git a/test-requirements.txt b/test-requirements.txt index 73ca508fe3..0c6e9fe2cc 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -10,6 +10,7 @@ nosexcover nosehtmloutput oslosphinx sphinx>=1.1.2,<1.2 +os-testr>=0.4.1 mock>=1.0 python-swiftclient python-keystoneclient>=1.3.0 diff --git a/test/functional/__init__.py b/test/functional/__init__.py index 242a4667a9..a2b422ec01 100644 --- a/test/functional/__init__.py +++ b/test/functional/__init__.py @@ -109,7 +109,7 @@ orig_hash_path_suff_pref = ('', '') orig_swift_conf_name = None in_process = False -_testdir = _test_servers = _test_coros = None +_testdir = _test_servers = _test_coros = _test_socks = None policy_specified = None @@ -290,6 +290,7 @@ def in_process_setup(the_object_server=object_server): _info('IN-PROCESS SERVERS IN USE FOR FUNCTIONAL TESTS') _info('Using object_server class: %s' % the_object_server.__name__) conf_src_dir = os.environ.get('SWIFT_TEST_IN_PROCESS_CONF_DIR') + show_debug_logs = os.environ.get('SWIFT_TEST_DEBUG_LOGS') if conf_src_dir is not None: if not os.path.isdir(conf_src_dir): @@ -339,10 +340,13 @@ def in_process_setup(the_object_server=object_server): orig_hash_path_suff_pref = utils.HASH_PATH_PREFIX, utils.HASH_PATH_SUFFIX utils.validate_hash_conf() + global _test_socks + _test_socks = [] # We create the proxy server listening socket to get its port number so # that we can add it as the "auth_port" value for the functional test # clients. prolis = eventlet.listen(('localhost', 0)) + _test_socks.append(prolis) # The following set of configuration values is used both for the # functional test frame work and for the various proxy, account, container @@ -388,6 +392,7 @@ def in_process_setup(the_object_server=object_server): acc2lis = eventlet.listen(('localhost', 0)) con1lis = eventlet.listen(('localhost', 0)) con2lis = eventlet.listen(('localhost', 0)) + _test_socks += [acc1lis, acc2lis, con1lis, con2lis] + obj_sockets account_ring_path = os.path.join(_testdir, 'account.ring.gz') with closing(GzipFile(account_ring_path, 'wb')) as f: @@ -416,23 +421,30 @@ def in_process_setup(the_object_server=object_server): # Default to only 4 seconds for in-process functional test runs eventlet.wsgi.WRITE_TIMEOUT = 4 + def get_logger_name(name): + if show_debug_logs: + return debug_logger(name) + else: + return None + acc1srv = account_server.AccountController( - config, logger=debug_logger('acct1')) + config, logger=get_logger_name('acct1')) acc2srv = account_server.AccountController( - config, logger=debug_logger('acct2')) + config, logger=get_logger_name('acct2')) con1srv = container_server.ContainerController( - config, logger=debug_logger('cont1')) + config, logger=get_logger_name('cont1')) con2srv = container_server.ContainerController( - config, logger=debug_logger('cont2')) + config, logger=get_logger_name('cont2')) objsrvs = [ (obj_sockets[index], the_object_server.ObjectController( - config, logger=debug_logger('obj%d' % (index + 1)))) + config, logger=get_logger_name('obj%d' % (index + 1)))) for index in range(len(obj_sockets)) ] - logger = debug_logger('proxy') + if show_debug_logs: + logger = debug_logger('proxy') def get_logger(name, *args, **kwargs): return logger @@ -446,6 +458,8 @@ def in_process_setup(the_object_server=object_server): raise InProcessException(e) nl = utils.NullLogger() + global proxy_srv + proxy_srv = prolis prospa = eventlet.spawn(eventlet.wsgi.server, prolis, app, nl) acc1spa = eventlet.spawn(eventlet.wsgi.server, acc1lis, acc1srv, nl) acc2spa = eventlet.spawn(eventlet.wsgi.server, acc2lis, acc2srv, nl) @@ -487,6 +501,7 @@ def get_cluster_info(): # We'll update those constraints based on what the /info API provides, if # anything. global cluster_info + global config try: conn = Connection(config) conn.authenticate() @@ -536,6 +551,7 @@ def setup_package(): global in_process + global config if use_in_process: # Explicitly set to True, so barrel on ahead with in-process # functional test setup. @@ -722,7 +738,6 @@ def setup_package(): % policy_specified) raise Exception('Failed to find specified policy %s' % policy_specified) - get_cluster_info() @@ -731,16 +746,21 @@ def teardown_package(): locale.setlocale(locale.LC_COLLATE, orig_collate) # clean up containers and objects left behind after running tests + global config conn = Connection(config) conn.authenticate() account = Account(conn, config.get('account', config['username'])) account.delete_containers() global in_process + global _test_socks if in_process: try: - for server in _test_coros: + for i, server in enumerate(_test_coros): server.kill() + if not server.dead: + # kill it from the socket level + _test_socks[i].close() except Exception: pass try: @@ -751,6 +771,7 @@ def teardown_package(): orig_hash_path_suff_pref utils.SWIFT_CONF_FILE = orig_swift_conf_name constraints.reload_constraints() + reset_globals() class AuthError(Exception): @@ -768,6 +789,17 @@ parsed = [None, None, None, None, None] conn = [None, None, None, None, None] +def reset_globals(): + global url, token, service_token, parsed, conn, config + url = [None, None, None, None, None] + token = [None, None, None, None, None] + service_token = [None, None, None, None, None] + parsed = [None, None, None, None, None] + conn = [None, None, None, None, None] + if config: + config = {} + + def connection(url): if has_insecure: parsed_url, http_conn = http_connection(url, insecure=insecure) diff --git a/test/functional/test_account.py b/test/functional/test_account.py index e952c0923b..e2847f29be 100755 --- a/test/functional/test_account.py +++ b/test/functional/test_account.py @@ -29,6 +29,14 @@ from test.functional import check_response, retry, requires_acls, \ import test.functional as tf +def setUpModule(): + tf.setup_package() + + +def tearDownModule(): + tf.teardown_package() + + class TestAccount(unittest.TestCase): def setUp(self): diff --git a/test/functional/test_container.py b/test/functional/test_container.py index 345aa0aa84..c4a8a3fcdf 100755 --- a/test/functional/test_container.py +++ b/test/functional/test_container.py @@ -27,6 +27,14 @@ import test.functional as tf from six.moves import range +def setUpModule(): + tf.setup_package() + + +def tearDownModule(): + tf.teardown_package() + + class TestContainer(unittest.TestCase): def setUp(self): diff --git a/test/functional/test_object.py b/test/functional/test_object.py index 55868098be..b13d3f8fe7 100755 --- a/test/functional/test_object.py +++ b/test/functional/test_object.py @@ -27,6 +27,14 @@ from test.functional import check_response, retry, requires_acls, \ import test.functional as tf +def setUpModule(): + tf.setup_package() + + +def tearDownModule(): + tf.teardown_package() + + class TestObject(unittest.TestCase): def setUp(self): diff --git a/test/functional/tests.py b/test/functional/tests.py index 571a6b3473..fcc239c4c4 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -39,6 +39,14 @@ from test.functional.swift_test_client import Account, Connection, File, \ ResponseError +def setUpModule(): + tf.setup_package() + + +def tearDownModule(): + tf.teardown_package() + + class Utils(object): @classmethod def create_ascii_name(cls, length=None): diff --git a/test/unit/account/test_backend.py b/test/unit/account/test_backend.py index ca89736e5c..ebc0ebfca2 100644 --- a/test/unit/account/test_backend.py +++ b/test/unit/account/test_backend.py @@ -37,7 +37,7 @@ from test.unit import patch_policies, with_tempdir, make_timestamp_iter from swift.common.db import DatabaseConnectionError from swift.common.storage_policy import StoragePolicy, POLICIES -from test.unit.common.test_db import TestExampleBroker +from test.unit.common import test_db @patch_policies @@ -979,7 +979,7 @@ def premetadata_create_account_stat_table(self, conn, put_timestamp): put_timestamp)) -class TestCommonAccountBroker(TestExampleBroker): +class TestCommonAccountBroker(test_db.TestExampleBroker): broker_class = AccountBroker diff --git a/test/unit/container/test_backend.py b/test/unit/container/test_backend.py index 332e161eef..721f0f9094 100644 --- a/test/unit/container/test_backend.py +++ b/test/unit/container/test_backend.py @@ -36,7 +36,7 @@ import mock from test.unit import (patch_policies, with_tempdir, make_timestamp_iter, EMPTY_ETAG) -from test.unit.common.test_db import TestExampleBroker +from test.unit.common import test_db class TestContainerBroker(unittest.TestCase): @@ -1680,7 +1680,7 @@ class TestContainerBroker(unittest.TestCase): self.assertEqual(broker.get_policy_stats(), expected) -class TestCommonContainerBroker(TestExampleBroker): +class TestCommonContainerBroker(test_db.TestExampleBroker): broker_class = ContainerBroker diff --git a/tox.ini b/tox.ini index ac22896de4..46e7b37494 100644 --- a/tox.ini +++ b/tox.ini @@ -45,7 +45,7 @@ commands = flake8 --filename=swift* bin [testenv:func] -commands = nosetests {posargs:test/functional} +commands = ./.functests {posargs} [testenv:venv] commands = {posargs} From 1bb665331af92422290fb585de7cb6a2497236e6 Mon Sep 17 00:00:00 2001 From: Venkateswarlu Pallamala Date: Mon, 9 Nov 2015 19:22:38 -0800 Subject: [PATCH 12/52] remove unused parameters in the method make the helper methods as private by using convention Change-Id: I73b9604f8d5a0e85d012aac42b7963b618f5ad97 --- swift/common/middleware/dlo.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/swift/common/middleware/dlo.py b/swift/common/middleware/dlo.py index 2bd1347327..80959f1149 100644 --- a/swift/common/middleware/dlo.py +++ b/swift/common/middleware/dlo.py @@ -416,13 +416,12 @@ class DynamicLargeObject(object): return GetContext(self, self.logger).\ handle_request(req, start_response) elif req.method == 'PUT': - error_response = self.validate_x_object_manifest_header( - req, start_response) + error_response = self._validate_x_object_manifest_header(req) if error_response: return error_response(env, start_response) return self.app(env, start_response) - def validate_x_object_manifest_header(self, req, start_response): + def _validate_x_object_manifest_header(self, req): """ Make sure that X-Object-Manifest is valid if present. """ From b68311db95860ac1cab585a5ab66bd3b3abb765e Mon Sep 17 00:00:00 2001 From: Kota Tsuyuzaki Date: Tue, 15 Dec 2015 18:55:41 -0800 Subject: [PATCH 13/52] Fix reconciler test to calc lastmodified as UTC Swift reconciler calculates the last-modified date as UTC but current test calculates it as local time zone. It triggers unit test failure in non-UTC environment. This patch fixes the test to calculate the last-modified as UTC as well. Change-Id: Ia0053f350daf2cb8c61ac01a933924b6e4b0cb37 Closes-Bug: #1526588 --- test/unit/container/test_reconciler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/unit/container/test_reconciler.py b/test/unit/container/test_reconciler.py index 974a35c7bc..771e9f83e3 100644 --- a/test/unit/container/test_reconciler.py +++ b/test/unit/container/test_reconciler.py @@ -36,7 +36,7 @@ from test.unit.common.middleware.helpers import FakeSwift def timestamp_to_last_modified(timestamp): - return datetime.fromtimestamp( + return datetime.utcfromtimestamp( float(Timestamp(timestamp))).strftime('%Y-%m-%dT%H:%M:%S.%f') From 169a7c7f9e12ebc9933bd9ca4592e13b0de8b47b Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Wed, 16 Dec 2015 15:28:25 +0000 Subject: [PATCH 14/52] Fix func test --until-failure and --no-discover options This patch changes functional test classes to subclass unittest2.TestCase rather than unittest.TestCase. This fixes errors when attempting to use tox -e func -- -n and tox -e func -- --until-failure Also migrate from using nose.SkipTest to unittest2.SkipTest Change-Id: I903033f5e01833550b2f2b945894edca4233c4a2 Closes-Bug: 1526725 Co-Authored-By: Ganesh Maharaj Mahalingam --- .testr.conf | 4 +--- test/functional/__init__.py | 2 +- test/functional/swift_test_client.py | 2 +- test/functional/test_account.py | 10 +++++----- test/functional/test_container.py | 10 +++++----- test/functional/test_object.py | 8 ++++---- test/functional/tests.py | 10 +++++----- 7 files changed, 22 insertions(+), 24 deletions(-) diff --git a/.testr.conf b/.testr.conf index 293e07d22d..d5ddc32969 100644 --- a/.testr.conf +++ b/.testr.conf @@ -1,6 +1,4 @@ [DEFAULT] -test_command=SWIFT_TEST_DEBUG_LOGS=${SWIFT_TEST_DEBUG_LOGS} \ - ${PYTHON:-python} -m subunit.run \ - discover -t ./ ${TESTS_DIR:-./test/functional/} $LISTOPT $IDOPTION +test_command=SWIFT_TEST_DEBUG_LOGS=${SWIFT_TEST_DEBUG_LOGS} ${PYTHON:-python} -m subunit.run discover -t ./ ${TESTS_DIR:-./test/functional/} $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list diff --git a/test/functional/__init__.py b/test/functional/__init__.py index a2b422ec01..b458b180ee 100644 --- a/test/functional/__init__.py +++ b/test/functional/__init__.py @@ -27,11 +27,11 @@ import functools import random from time import time, sleep -from nose import SkipTest from contextlib import closing from gzip import GzipFile from shutil import rmtree from tempfile import mkdtemp +from unittest2 import SkipTest from six.moves.configparser import ConfigParser, NoSectionError from six.moves import http_client diff --git a/test/functional/swift_test_client.py b/test/functional/swift_test_client.py index 10ce6705eb..7d08e1f2ee 100644 --- a/test/functional/swift_test_client.py +++ b/test/functional/swift_test_client.py @@ -20,7 +20,7 @@ import random import socket import time -from nose import SkipTest +from unittest2 import SkipTest from xml.dom import minidom import six diff --git a/test/functional/test_account.py b/test/functional/test_account.py index e2847f29be..9688a5f493 100755 --- a/test/functional/test_account.py +++ b/test/functional/test_account.py @@ -15,10 +15,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import unittest +import unittest2 import json from uuid import uuid4 -from nose import SkipTest +from unittest2 import SkipTest from string import letters from six.moves import range @@ -37,7 +37,7 @@ def tearDownModule(): tf.teardown_package() -class TestAccount(unittest.TestCase): +class TestAccount(unittest2.TestCase): def setUp(self): self.max_meta_count = load_constraint('max_meta_count') @@ -862,7 +862,7 @@ class TestAccount(unittest.TestCase): self.assertEqual(resp.status, 400) -class TestAccountInNonDefaultDomain(unittest.TestCase): +class TestAccountInNonDefaultDomain(unittest2.TestCase): def setUp(self): if tf.skip or tf.skip2 or tf.skip_if_not_v3: raise SkipTest('AUTH VERSION 3 SPECIFIC TEST') @@ -891,4 +891,4 @@ class TestAccountInNonDefaultDomain(unittest.TestCase): if __name__ == '__main__': - unittest.main() + unittest2.main() diff --git a/test/functional/test_container.py b/test/functional/test_container.py index c4a8a3fcdf..49d3d4ac92 100755 --- a/test/functional/test_container.py +++ b/test/functional/test_container.py @@ -16,8 +16,8 @@ # limitations under the License. import json -import unittest -from nose import SkipTest +import unittest2 +from unittest2 import SkipTest from uuid import uuid4 from test.functional import check_response, retry, requires_acls, \ @@ -35,7 +35,7 @@ def tearDownModule(): tf.teardown_package() -class TestContainer(unittest.TestCase): +class TestContainer(unittest2.TestCase): def setUp(self): if tf.skip: @@ -1559,7 +1559,7 @@ class TestContainer(unittest.TestCase): policy['name']) -class BaseTestContainerACLs(unittest.TestCase): +class BaseTestContainerACLs(unittest2.TestCase): # subclasses can change the account in which container # is created/deleted by setUp/tearDown account = 1 @@ -1734,4 +1734,4 @@ class TestContainerACLsAccount4(BaseTestContainerACLs): if __name__ == '__main__': - unittest.main() + unittest2.main() diff --git a/test/functional/test_object.py b/test/functional/test_object.py index b13d3f8fe7..f559efb31a 100755 --- a/test/functional/test_object.py +++ b/test/functional/test_object.py @@ -16,8 +16,8 @@ # limitations under the License. import json -import unittest -from nose import SkipTest +import unittest2 +from unittest2 import SkipTest from uuid import uuid4 from six.moves import range @@ -35,7 +35,7 @@ def tearDownModule(): tf.teardown_package() -class TestObject(unittest.TestCase): +class TestObject(unittest2.TestCase): def setUp(self): if tf.skip: @@ -1254,4 +1254,4 @@ class TestObject(unittest.TestCase): if __name__ == '__main__': - unittest.main() + unittest2.main() diff --git a/test/functional/tests.py b/test/functional/tests.py index fcc239c4c4..def119ff35 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -25,11 +25,11 @@ import random import six from six.moves import urllib import time -import unittest +import unittest2 import uuid from copy import deepcopy import eventlet -from nose import SkipTest +from unittest2 import SkipTest from swift.common.http import is_success, is_client_error from test.functional import normalized_urls, load_constraint, cluster_info @@ -70,7 +70,7 @@ class Utils(object): create_name = create_ascii_name -class Base(unittest.TestCase): +class Base(unittest2.TestCase): def setUp(self): cls = type(self) if not cls.set_up: @@ -4148,7 +4148,7 @@ class TestSloTempurlUTF8(Base2, TestSloTempurl): set_up = False -class TestServiceToken(unittest.TestCase): +class TestServiceToken(unittest2.TestCase): def setUp(self): if tf.skip_service_tokens: @@ -4316,4 +4316,4 @@ class TestServiceToken(unittest.TestCase): if __name__ == '__main__': - unittest.main() + unittest2.main() From e15960a5d86e00a7d420edc4af034b27da0af8fd Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Thu, 17 Dec 2015 12:08:45 +0000 Subject: [PATCH 15/52] Fix incorrect kwarg in auth middleware example When calling memcache_client.set(), timeout was deprecated and is now removed as a keyword arg, use time instead. Change-Id: Iedbd5b064853ef2b386963246f639fbcd3931cd3 --- doc/source/development_auth.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/development_auth.rst b/doc/source/development_auth.rst index bb00ca8fc7..50f21fb251 100644 --- a/doc/source/development_auth.rst +++ b/doc/source/development_auth.rst @@ -375,7 +375,7 @@ folks a start on their own code if they want to use repoze.what:: expiration = float(resp.getheader('x-auth-ttl')) user = resp.getheader('x-auth-user') memcache_client.set(key, (time(), expiration, user), - timeout=expiration) + time=expiration) return user return None From 87f7e907ee412f5847f1f9ffca7a566fb148c6b1 Mon Sep 17 00:00:00 2001 From: Matthew Oliver Date: Wed, 16 Dec 2015 17:19:24 +1100 Subject: [PATCH 16/52] Pass HTTP_REFERER down to subrequests Currently a HTTP_REFERER (Referer) header isn't passed down to subrequests. This means *LO subrequests to segment containers return a 403 on a *LO GET when accessed by requests using referer ACLs. Currently the only way around referer access to *LO's is to make the segments container world readable. This change makes sure the referer header is passed into subrequests allowing a segments container to only need to be locked down with the same referer as the *LO container. This is a 1 line change to code, but also adds a unit and 2 functional functional tests (one for DLO and one for SLO). Change-Id: I1fa5328979302d9c8133aa739787c8dae6084f54 Closes-Bug: #1526575 --- swift/common/wsgi.py | 3 +- test/functional/tests.py | 99 ++++++++++++++++++++++++++++++++--- test/unit/common/test_wsgi.py | 7 +++ 3 files changed, 102 insertions(+), 7 deletions(-) diff --git a/swift/common/wsgi.py b/swift/common/wsgi.py index 97e704228a..7ba97eefce 100644 --- a/swift/common/wsgi.py +++ b/swift/common/wsgi.py @@ -1095,7 +1095,8 @@ def make_env(env, method=None, path=None, agent='Swift', query_string=None, 'HTTP_ORIGIN', 'HTTP_ACCESS_CONTROL_REQUEST_METHOD', 'SERVER_PROTOCOL', 'swift.cache', 'swift.source', 'swift.trans_id', 'swift.authorize_override', - 'swift.authorize', 'HTTP_X_USER_ID', 'HTTP_X_PROJECT_ID'): + 'swift.authorize', 'HTTP_X_USER_ID', 'HTTP_X_PROJECT_ID', + 'HTTP_REFERER'): if name in env: newenv[name] = env[name] if method: diff --git a/test/functional/tests.py b/test/functional/tests.py index 571a6b3473..80ccfe7d94 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -2178,14 +2178,23 @@ class TestDloEnv(object): def setUp(cls): cls.conn = Connection(tf.config) cls.conn.authenticate() + + config2 = tf.config.copy() + config2['username'] = tf.config['username3'] + config2['password'] = tf.config['password3'] + cls.conn2 = Connection(config2) + cls.conn2.authenticate() + cls.account = Account(cls.conn, tf.config.get('account', tf.config['username'])) cls.account.delete_containers() cls.container = cls.account.container(Utils.create_name()) + cls.container2 = cls.account.container(Utils.create_name()) - if not cls.container.create(): - raise ResponseError(cls.conn.response) + for cont in (cls.container, cls.container2): + if not cont.create(): + raise ResponseError(cls.conn.response) # avoid getting a prefix that stops halfway through an encoded # character @@ -2199,13 +2208,18 @@ class TestDloEnv(object): file_item = cls.container.file("%s/seg_upper%s" % (prefix, letter)) file_item.write(letter.upper() * 10) + for letter in ('f', 'g', 'h', 'i', 'j'): + file_item = cls.container2.file("%s/seg_lower%s" % + (prefix, letter)) + file_item.write(letter * 10) + man1 = cls.container.file("man1") man1.write('man1-contents', hdrs={"X-Object-Manifest": "%s/%s/seg_lower" % (cls.container.name, prefix)}) - man1 = cls.container.file("man2") - man1.write('man2-contents', + man2 = cls.container.file("man2") + man2.write('man2-contents', hdrs={"X-Object-Manifest": "%s/%s/seg_upper" % (cls.container.name, prefix)}) @@ -2214,6 +2228,12 @@ class TestDloEnv(object): hdrs={"X-Object-Manifest": "%s/%s/seg" % (cls.container.name, prefix)}) + mancont2 = cls.container.file("mancont2") + mancont2.write( + 'mancont2-contents', + hdrs={"X-Object-Manifest": "%s/%s/seg_lower" % + (cls.container2.name, prefix)}) + class TestDlo(Base): env = TestDloEnv @@ -2375,6 +2395,31 @@ class TestDlo(Base): manifest.info(hdrs={'If-None-Match': "not-%s" % etag}) self.assert_status(200) + def test_dlo_referer_on_segment_container(self): + # First the account2 (test3) should fail + headers = {'X-Auth-Token': self.env.conn2.storage_token, + 'Referer': 'http://blah.example.com'} + dlo_file = self.env.container.file("mancont2") + self.assertRaises(ResponseError, dlo_file.read, + hdrs=headers) + self.assert_status(403) + + # Now set the referer on the dlo container only + referer_metadata = {'X-Container-Read': '.r:*.example.com,.rlistings'} + self.env.container.update_metadata(referer_metadata) + + self.assertRaises(ResponseError, dlo_file.read, + hdrs=headers) + self.assert_status(403) + + # Finally set the referer on the segment container + self.env.container2.update_metadata(referer_metadata) + + contents = dlo_file.read(hdrs=headers) + self.assertEqual( + contents, + "ffffffffffgggggggggghhhhhhhhhhiiiiiiiiiijjjjjjjjjj") + class TestDloUTF8(Base2, TestDlo): set_up = False @@ -2516,6 +2561,11 @@ class TestSloEnv(object): cls.conn2.authenticate() cls.account2 = cls.conn2.get_account() cls.account2.delete_containers() + config3 = tf.config.copy() + config3['username'] = tf.config['username3'] + config3['password'] = tf.config['password3'] + cls.conn3 = Connection(config3) + cls.conn3.authenticate() if cls.slo_enabled is None: cls.slo_enabled = 'slo' in cluster_info @@ -2527,9 +2577,11 @@ class TestSloEnv(object): cls.account.delete_containers() cls.container = cls.account.container(Utils.create_name()) + cls.container2 = cls.account.container(Utils.create_name()) - if not cls.container.create(): - raise ResponseError(cls.conn.response) + for cont in (cls.container, cls.container2): + if not cont.create(): + raise ResponseError(cls.conn.response) cls.seg_info = seg_info = {} for letter, size in (('a', 1024 * 1024), @@ -2552,6 +2604,14 @@ class TestSloEnv(object): seg_info['seg_e']]), parms={'multipart-manifest': 'put'}) + # Put the same manifest in the container2 + file_item = cls.container2.file("manifest-abcde") + file_item.write( + json.dumps([seg_info['seg_a'], seg_info['seg_b'], + seg_info['seg_c'], seg_info['seg_d'], + seg_info['seg_e']]), + parms={'multipart-manifest': 'put'}) + file_item = cls.container.file('manifest-cd') cd_json = json.dumps([seg_info['seg_c'], seg_info['seg_d']]) file_item.write(cd_json, parms={'multipart-manifest': 'put'}) @@ -3083,6 +3143,33 @@ class TestSlo(Base): manifest.info(hdrs={'If-None-Match': "not-%s" % etag}) self.assert_status(200) + def test_slo_referer_on_segment_container(self): + # First the account2 (test3) should fail + headers = {'X-Auth-Token': self.env.conn3.storage_token, + 'Referer': 'http://blah.example.com'} + slo_file = self.env.container2.file('manifest-abcde') + self.assertRaises(ResponseError, slo_file.read, + hdrs=headers) + self.assert_status(403) + + # Now set the referer on the slo container only + referer_metadata = {'X-Container-Read': '.r:*.example.com,.rlistings'} + self.env.container2.update_metadata(referer_metadata) + + self.assertRaises(ResponseError, slo_file.read, + hdrs=headers) + self.assert_status(409) + + # Finally set the referer on the segment container + self.env.container.update_metadata(referer_metadata) + contents = slo_file.read(hdrs=headers) + self.assertEqual(4 * 1024 * 1024 + 1, len(contents)) + self.assertEqual('a', contents[0]) + self.assertEqual('a', contents[1024 * 1024 - 1]) + self.assertEqual('b', contents[1024 * 1024]) + self.assertEqual('d', contents[-2]) + self.assertEqual('e', contents[-1]) + class TestSloUTF8(Base2, TestSlo): set_up = False diff --git a/test/unit/common/test_wsgi.py b/test/unit/common/test_wsgi.py index 2212eb0b88..da297d74a7 100644 --- a/test/unit/common/test_wsgi.py +++ b/test/unit/common/test_wsgi.py @@ -825,6 +825,13 @@ class TestWSGI(unittest.TestCase): self.assertTrue('HTTP_X_PROJECT_ID' in newenv) self.assertEqual(newenv['HTTP_X_PROJECT_ID'], '5678') + def test_make_env_keeps_referer(self): + oldenv = {'HTTP_REFERER': 'http://blah.example.com'} + newenv = wsgi.make_env(oldenv) + + self.assertTrue('HTTP_REFERER' in newenv) + self.assertEqual(newenv['HTTP_REFERER'], 'http://blah.example.com') + class TestServersPerPortStrategy(unittest.TestCase): def setUp(self): From 9fe0e25604dff35db7eab1bca312821a81db6c1d Mon Sep 17 00:00:00 2001 From: Kota Tsuyuzaki Date: Tue, 8 Dec 2015 22:27:44 -0800 Subject: [PATCH 17/52] Sleep enough for trampoline When running unite test suite in local poor resource environment, sometimes test/unit/proxy/test_server.py fails due to a lack of waiting time to trampoline of eventlet thread. This patch enables to sleep 1 more second when it doesn't seem to have enough time to tranpoline. Change-Id: I0bbc8fc245919d3c0a071ff87ff6e20b8d58f9b8 --- test/unit/proxy/test_server.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index c3e8fc6b20..685463ac14 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -1204,6 +1204,12 @@ class TestObjectController(unittest.TestCase): pass self.assertEqual(res.status_int, expected) + def _sleep_enough(self, condition): + for sleeptime in (0.1, 1.0): + sleep(sleeptime) + if condition(): + break + @unpatch_policies def test_policy_IO(self): def check_file(policy, cont, devs, check_val): @@ -5625,7 +5631,9 @@ class TestObjectController(unittest.TestCase): # read most of the object, and disconnect fd.read(10) sock.fd._sock.close() - sleep(0.1) + condition = \ + lambda: _test_servers[0].logger.get_lines_for_level('warning') + self._sleep_enough(condition) # check for disconnect message! expected = ['Client disconnected on read'] * 2 @@ -5665,7 +5673,9 @@ class TestObjectController(unittest.TestCase): fd.close() sock.close() # sleep to trampoline enough - sleep(0.1) + condition = \ + lambda: _test_servers[0].logger.get_lines_for_level('warning') + self._sleep_enough(condition) expected = ['Client disconnected without sending enough data'] warns = _test_servers[0].logger.get_lines_for_level('warning') self.assertEqual(expected, warns) From 84ba24a75640be4212e0f984c284faf4c894e7c6 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Fri, 18 Dec 2015 11:24:34 +0000 Subject: [PATCH 18/52] Fix rst errors so that html docs are complete rst table format errors don't break the gate job but do cause sections of the documents to go missing from the html output. Change-Id: Ic8c9953c93d03dcdafd8f47b271d276c7b356dc3 --- doc/source/deployment_guide.rst | 244 ++++++++++++++++---------------- 1 file changed, 123 insertions(+), 121 deletions(-) diff --git a/doc/source/deployment_guide.rst b/doc/source/deployment_guide.rst index befa0d19f3..f06afc483b 100644 --- a/doc/source/deployment_guide.rst +++ b/doc/source/deployment_guide.rst @@ -510,9 +510,9 @@ container_update_timeout 1 Time to wait while sending a contai [object-server] -============================= ====================== ================================= +============================= ====================== =============================================== Option Default Description ------------------------------ ---------------------- --------------------------------- +----------------------------- ---------------------- ----------------------------------------------- use paste.deploy entry point for the object server. For most cases, this should be @@ -537,9 +537,9 @@ keep_cache_private false Allow non-public objects t in kernel's buffer cache allowed_headers Content-Disposition, Comma separated list of headers Content-Encoding, that can be set in metadata on an object. - X-Delete-At, This list is in addition to X-Object-Meta-* headers and cannot include - X-Object-Manifest, Content-Type, etag, Content-Length, or deleted - X-Static-Large-Object + X-Delete-At, This list is in addition to + X-Object-Manifest, X-Object-Meta-* headers and cannot include + X-Static-Large-Object Content-Type, etag, Content-Length, or deleted auto_create_account_prefix . Prefix used when automatically creating accounts. threads_per_disk 0 Size of the per-disk thread pool @@ -596,98 +596,99 @@ splice no Use splice() for zero-copy will appear in the object server logs at startup, but your object servers should continue to function. -============================= ====================== ================================= +============================= ====================== =============================================== [object-replicator] -================== ======================== ================================ -Option Default Description ------------------- ------------------------ -------------------------------- -log_name object-replicator Label used when logging -log_facility LOG_LOCAL0 Syslog log facility -log_level INFO Logging level -log_address /dev/log Logging directory -daemonize yes Whether or not to run replication - as a daemon -interval 30 Time in seconds to wait between - replication passes -concurrency 1 Number of replication workers to - spawn -sync_method rsync The sync method to use; default is - rsync but you can use ssync to try the - EXPERIMENTAL all-swift-code-no-rsync-callouts - method. Once ssync is verified - as having performance comparable to, - or better than, rsync, we plan to - deprecate rsync so we can move on - with more features for replication. -rsync_timeout 900 Max duration of a partition rsync -rsync_bwlimit 0 Bandwidth limit for rsync in kB/s. - 0 means unlimited. -rsync_io_timeout 30 Timeout value sent to rsync - --timeout and --contimeout - options -rsync_compress no Allow rsync to compress data - which is transmitted to destination - node during sync. However, this - is applicable only when destination - node is in a different region - than the local one. - NOTE: Objects that are already - compressed (for example: .tar.gz, - .mp3) might slow down the syncing - process. -stats_interval 300 Interval in seconds between - logging replication statistics -reclaim_age 604800 Time elapsed in seconds before an - object can be reclaimed -handoffs_first false If set to True, partitions that - are not supposed to be on the - node will be replicated first. - The default setting should not be - changed, except for extreme - situations. -handoff_delete auto By default handoff partitions - will be removed when it has - successfully replicated to all - the canonical nodes. If set to an - integer n, it will remove the - partition if it is successfully - replicated to n nodes. The - default setting should not be - changed, except for extreme - situations. -node_timeout DEFAULT or 10 Request timeout to external - services. This uses what's set - here, or what's set in the - DEFAULT section, or 10 (though - other sections use 3 as the final - default). -http_timeout 60 Max duration of an http request. - This is for REPLICATE finalization - calls and so should be longer - than node_timeout. -lockup_timeout 1800 Attempts to kill all workers if - nothing replicates for - lockup_timeout seconds -rsync_module {replication_ip}::object Format of the rsync module where - the replicator will send data. - The configuration value can - include some variables that will - be extracted from the ring. - Variables must follow the format - {NAME} where NAME is one of: ip, - port, replication_ip, - replication_port, region, zone, - device, meta. See - etc/rsyncd.conf-sample for some - examples. -rsync_error_log_line_length 0 Limits how long rsync error log - lines are -ring_check_interval 15 Interval for checking new ring - file -recon_cache_path /var/cache/swift Path to recon cache -================== ======================== ================================ +=========================== ======================== ================================ +Option Default Description +--------------------------- ------------------------ -------------------------------- +log_name object-replicator Label used when logging +log_facility LOG_LOCAL0 Syslog log facility +log_level INFO Logging level +log_address /dev/log Logging directory +daemonize yes Whether or not to run replication + as a daemon +interval 30 Time in seconds to wait between + replication passes +concurrency 1 Number of replication workers to + spawn +sync_method rsync The sync method to use; default + is rsync but you can use ssync to + try the EXPERIMENTAL + all-swift-code-no-rsync-callouts + method. Once ssync is verified as + or better than, rsync, we plan to + deprecate rsync so we can move on + with more features for + replication. +rsync_timeout 900 Max duration of a partition rsync +rsync_bwlimit 0 Bandwidth limit for rsync in kB/s. + 0 means unlimited. +rsync_io_timeout 30 Timeout value sent to rsync + --timeout and --contimeout + options +rsync_compress no Allow rsync to compress data + which is transmitted to destination + node during sync. However, this + is applicable only when destination + node is in a different region + than the local one. + NOTE: Objects that are already + compressed (for example: .tar.gz, + .mp3) might slow down the syncing + process. +stats_interval 300 Interval in seconds between + logging replication statistics +reclaim_age 604800 Time elapsed in seconds before an + object can be reclaimed +handoffs_first false If set to True, partitions that + are not supposed to be on the + node will be replicated first. + The default setting should not be + changed, except for extreme + situations. +handoff_delete auto By default handoff partitions + will be removed when it has + successfully replicated to all + the canonical nodes. If set to an + integer n, it will remove the + partition if it is successfully + replicated to n nodes. The + default setting should not be + changed, except for extreme + situations. +node_timeout DEFAULT or 10 Request timeout to external + services. This uses what's set + here, or what's set in the + DEFAULT section, or 10 (though + other sections use 3 as the final + default). +http_timeout 60 Max duration of an http request. + This is for REPLICATE finalization + calls and so should be longer + than node_timeout. +lockup_timeout 1800 Attempts to kill all workers if + nothing replicates for + lockup_timeout seconds +rsync_module {replication_ip}::object Format of the rsync module where + the replicator will send data. + The configuration value can + include some variables that will + be extracted from the ring. + Variables must follow the format + {NAME} where NAME is one of: ip, + port, replication_ip, + replication_port, region, zone, + device, meta. See + etc/rsyncd.conf-sample for some + examples. +rsync_error_log_line_length 0 Limits how long rsync error log + lines are +ring_check_interval 15 Interval for checking new ring + file +recon_cache_path /var/cache/swift Path to recon cache +=========================== ======================== ================================ [object-updater] @@ -822,7 +823,7 @@ set log_address /dev/log Logging directory node_timeout 3 Request timeout to external services conn_timeout 0.5 Connection timeout to external services allow_versions false Enable/Disable object versioning feature -auto_create_account_prefix . Prefix used when automatically +auto_create_account_prefix . Prefix used when automatically replication_server Configure parameter for creating specific server. To handle all verbs, including replication verbs, do not @@ -887,15 +888,15 @@ rsync_module {replication_ip}::container Format of the rsync module etc/rsyncd.conf-sample for some examples. rsync_compress no Allow rsync to compress data - which is transmitted to destination - node during sync. However, this - is applicable only when destination - node is in a different region - than the local one. - NOTE: Objects that are already - compressed (for example: .tar.gz, - .mp3) might slow down the syncing - process. + which is transmitted to + destination node during sync. + However, this is applicable + only when destination node is + in a different region than the + local one. NOTE: Objects that + are already compressed (for + example: .tar.gz, mp3) might + slow down the syncing process. recon_cache_path /var/cache/swift Path to recon cache ================== =========================== ============================= @@ -1090,15 +1091,15 @@ rsync_module {replication_ip}::account Format of the rsync module where etc/rsyncd.conf-sample for some examples. rsync_compress no Allow rsync to compress data - which is transmitted to destination - node during sync. However, this - is applicable only when destination - node is in a different region - than the local one. - NOTE: Objects that are already - compressed (for example: .tar.gz, - .mp3) might slow down the syncing - process. + which is transmitted to + destination node during sync. + However, this is applicable only + when destination node is in a + different region than the local + one. NOTE: Objects that are + already compressed (for example: + .tar.gz, mp3) might slow down + the syncing process. recon_cache_path /var/cache/swift Path to recon cache ================== ========================= =============================== @@ -1159,9 +1160,9 @@ The following configuration options are available: [DEFAULT] -==================================== ======================== ============================= +==================================== ======================== ======================================== Option Default Description ------------------------------------- ------------------------ ----------------------------- +------------------------------------ ------------------------ ---------------------------------------- bind_ip 0.0.0.0 IP Address for server to bind to bind_port 80 Port for server to bind to @@ -1205,11 +1206,12 @@ cors_allow_origin This is a list o strict_cors_mode True client_timeout 60 trans_id_suffix This optional suffix (default is empty) - that would be appended to the swift transaction - id allows one to easily figure out from - which cluster that X-Trans-Id belongs to. - This is very useful when one is managing - more than one swift cluster. + that would be appended to the swift + transaction id allows one to easily + figure out from which cluster that + X-Trans-Id belongs to. This is very + useful when one is managing more than + one swift cluster. log_name swift Label used when logging log_facility LOG_LOCAL0 Syslog log facility log_level INFO Logging level @@ -1246,7 +1248,7 @@ disallowed_sections swift.valid_api_versions Allows the abili the dict level with a ".". expiring_objects_container_divisor 86400 expiring_objects_account_name expiring_objects -==================================== ======================== ============================= +==================================== ======================== ======================================== [proxy-server] From 0bcd7fd50ec0763dcb366dbf43a9696ca3806f15 Mon Sep 17 00:00:00 2001 From: Bill Huber Date: Fri, 20 Nov 2015 12:09:26 -0600 Subject: [PATCH 19/52] Update Erasure Coding Overview doc to remove Beta version The major functionality of EC has been released for Liberty and the beta version of the code has been removed since it is now in production. Change-Id: If60712045fb1af803093d6753fcd60434e637772 --- doc/source/overview_erasure_code.rst | 14 -------------- etc/object-server.conf-sample | 5 +---- 2 files changed, 1 insertion(+), 18 deletions(-) diff --git a/doc/source/overview_erasure_code.rst b/doc/source/overview_erasure_code.rst index 0f9f00eb88..b09adcfbd3 100755 --- a/doc/source/overview_erasure_code.rst +++ b/doc/source/overview_erasure_code.rst @@ -2,20 +2,6 @@ Erasure Code Support ==================== - --------------------------- -Beta: Not production ready --------------------------- -The erasure code support in Swift is considered "beta" at this point. -Most major functionality is included, but it has not been tested or validated -at large scale. This feature relies on ssync for durability. Deployers are -urged to do extensive testing and not deploy production data using an -erasure code storage policy. - -If any bugs are found during testing, please report them to -https://bugs.launchpad.net/swift - - ------------------------------- History and Theory of Operation ------------------------------- diff --git a/etc/object-server.conf-sample b/etc/object-server.conf-sample index 84328b0c3f..815b63cc5d 100644 --- a/etc/object-server.conf-sample +++ b/etc/object-server.conf-sample @@ -172,10 +172,7 @@ use = egg:swift#recon # concurrency = 1 # stats_interval = 300 # -# The sync method to use; default is rsync but you can use ssync to try the -# EXPERIMENTAL all-swift-code-no-rsync-callouts method. Once ssync is verified -# as having performance comparable to, or better than, rsync, we plan to -# deprecate rsync so we can move on with more features for replication. +# default is rsync, alternative is ssync # sync_method = rsync # # max duration of a partition rsync From d0a026fcb8e8a9f5475699cc56e1998bdc4cd5ca Mon Sep 17 00:00:00 2001 From: Hisashi Osanai Date: Wed, 16 Dec 2015 18:50:37 +0900 Subject: [PATCH 20/52] Fix duplication for headers in Access-Control-Expose-Headers There are following problems with Access-Control-Expose-Headers. * If headers in X-Container-Meta-Access-Control-Expose-Headers are configured, the headers are kept with case-sensitive string. Then a CORS request comes, the headers are merged into Access-Control-Expose-Headers as case-sensitive string even if there is a same header which is not case-sensitive string. * Access-Control-Expose-Headers is handled by a list. If X-Container/Object-Meta-XXX is configured in container/object and X-Container-Meta-Access-Control-Expose-Headers, same header is listed in Access-Control-Expose-Headers. This patch provides a fix for the problems. Change-Id: Ifc1c14eb3833ec6a851631cfc23008648463bd81 --- swift/proxy/controllers/base.py | 10 +++---- test/unit/proxy/test_server.py | 53 ++++++++++++++++++++++++++++++--- 2 files changed, 54 insertions(+), 9 deletions(-) diff --git a/swift/proxy/controllers/base.py b/swift/proxy/controllers/base.py index f6469192e5..1f98097c94 100644 --- a/swift/proxy/controllers/base.py +++ b/swift/proxy/controllers/base.py @@ -235,17 +235,17 @@ def cors_validation(func): # - headers provided by the user in # x-container-meta-access-control-expose-headers if 'Access-Control-Expose-Headers' not in resp.headers: - expose_headers = [ + expose_headers = set([ 'cache-control', 'content-language', 'content-type', 'expires', 'last-modified', 'pragma', 'etag', - 'x-timestamp', 'x-trans-id'] + 'x-timestamp', 'x-trans-id']) for header in resp.headers: if header.startswith('X-Container-Meta') or \ header.startswith('X-Object-Meta'): - expose_headers.append(header.lower()) + expose_headers.add(header.lower()) if cors_info.get('expose_headers'): - expose_headers.extend( - [header_line.strip() + expose_headers = expose_headers.union( + [header_line.strip().lower() for header_line in cors_info['expose_headers'].split(' ') if header_line.strip()]) diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index c3e8fc6b20..8dbd3e799a 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -5847,7 +5847,9 @@ class TestObjectController(unittest.TestCase): def stubContainerInfo(*args): return { 'cors': { - 'allow_origin': 'http://not.foo.bar' + 'allow_origin': 'http://not.foo.bar', + 'expose_headers': 'X-Object-Meta-Color ' + 'X-Object-Meta-Color-Ex' } } controller.container_info = stubContainerInfo @@ -5872,14 +5874,15 @@ class TestObjectController(unittest.TestCase): self.assertEqual('red', resp.headers['x-object-meta-color']) # X-Super-Secret is in the response, but not "exposed" self.assertEqual('hush', resp.headers['x-super-secret']) - self.assertTrue('access-control-expose-headers' in resp.headers) + self.assertIn('access-control-expose-headers', resp.headers) exposed = set( h.strip() for h in resp.headers['access-control-expose-headers'].split(',')) expected_exposed = set(['cache-control', 'content-language', 'content-type', 'expires', 'last-modified', 'pragma', 'etag', 'x-timestamp', - 'x-trans-id', 'x-object-meta-color']) + 'x-trans-id', 'x-object-meta-color', + 'x-object-meta-color-ex']) self.assertEqual(expected_exposed, exposed) controller.app.strict_cors_mode = True @@ -5891,7 +5894,49 @@ class TestObjectController(unittest.TestCase): resp = cors_validation(objectGET)(controller, req) self.assertEqual(200, resp.status_int) - self.assertTrue('access-control-allow-origin' not in resp.headers) + self.assertNotIn('access-control-expose-headers', resp.headers) + self.assertNotIn('access-control-allow-origin', resp.headers) + + controller.app.strict_cors_mode = False + + def stubContainerInfoWithAsteriskAllowOrigin(*args): + return { + 'cors': { + 'allow_origin': '*' + } + } + controller.container_info = \ + stubContainerInfoWithAsteriskAllowOrigin + + req = Request.blank( + '/v1/a/c/o.jpg', + {'REQUEST_METHOD': 'GET'}, + headers={'Origin': 'http://foo.bar'}) + + resp = cors_validation(objectGET)(controller, req) + + self.assertEqual(200, resp.status_int) + self.assertEqual('*', + resp.headers['access-control-allow-origin']) + + def stubContainerInfoWithEmptyAllowOrigin(*args): + return { + 'cors': { + 'allow_origin': '' + } + } + controller.container_info = stubContainerInfoWithEmptyAllowOrigin + + req = Request.blank( + '/v1/a/c/o.jpg', + {'REQUEST_METHOD': 'GET'}, + headers={'Origin': 'http://foo.bar'}) + + resp = cors_validation(objectGET)(controller, req) + + self.assertEqual(200, resp.status_int) + self.assertEqual('http://foo.bar', + resp.headers['access-control-allow-origin']) def test_CORS_valid_with_obj_headers(self): with save_globals(): From 684c4c04592278a280032002b5313b171ee7a4c0 Mon Sep 17 00:00:00 2001 From: janonymous Date: Sun, 2 Aug 2015 22:47:42 +0530 Subject: [PATCH 21/52] Python 3 deprecated the logger.warn method in favor of warning DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead Change-Id: I35df44374c4521b1f06be7a96c0b873e8c3674d8 --- swift/account/reaper.py | 4 ++-- swift/common/db_replicator.py | 2 +- swift/common/memcached.py | 1 - swift/common/middleware/keystoneauth.py | 6 +++--- swift/common/middleware/tempauth.py | 6 ++++-- swift/common/utils.py | 24 ++++++++++++------------ swift/container/updater.py | 2 +- swift/obj/diskfile.py | 14 +++++++------- swift/obj/reconstructor.py | 4 ++-- swift/obj/replicator.py | 22 ++++++++++++---------- swift/obj/updater.py | 9 +++++---- swift/proxy/controllers/base.py | 4 ++-- swift/proxy/controllers/obj.py | 12 ++++++------ swift/proxy/server.py | 7 ++++--- test/unit/__init__.py | 9 +++++++++ test/unit/common/test_utils.py | 8 ++++---- test/unit/obj/test_updater.py | 2 +- 17 files changed, 75 insertions(+), 61 deletions(-) diff --git a/swift/account/reaper.py b/swift/account/reaper.py index 5ac491cac9..696277ca2d 100644 --- a/swift/account/reaper.py +++ b/swift/account/reaper.py @@ -311,8 +311,8 @@ class AccountReaper(Daemon): delete_timestamp = Timestamp(info['delete_timestamp']) if self.stats_containers_remaining and \ begin - float(delete_timestamp) >= self.reap_not_done_after: - self.logger.warn(_('Account %s has not been reaped since %s') % - (account, delete_timestamp.isoformat)) + self.logger.warning(_('Account %s has not been reaped since %s') % + (account, delete_timestamp.isoformat)) return True def reap_container(self, account, account_partition, account_nodes, diff --git a/swift/common/db_replicator.py b/swift/common/db_replicator.py index 616e742ca6..b67b71520e 100644 --- a/swift/common/db_replicator.py +++ b/swift/common/db_replicator.py @@ -632,7 +632,7 @@ class Replicator(Daemon): [(failure_dev['replication_ip'], failure_dev['device']) for failure_dev in self.ring.devs if failure_dev]) - self.logger.warn( + self.logger.warning( _('Skipping %(device)s as it is not mounted') % node) continue unlink_older_than( diff --git a/swift/common/memcached.py b/swift/common/memcached.py index 65e0da8afe..bb359539ae 100644 --- a/swift/common/memcached.py +++ b/swift/common/memcached.py @@ -357,7 +357,6 @@ class MemcacheRing(object): :returns: result of decrementing :raises MemcacheConnectionError: """ - return self.incr(key, delta=-delta, time=time) def delete(self, key): diff --git a/swift/common/middleware/keystoneauth.py b/swift/common/middleware/keystoneauth.py index 6a0b91bbc4..a00701c39b 100644 --- a/swift/common/middleware/keystoneauth.py +++ b/swift/common/middleware/keystoneauth.py @@ -325,9 +325,9 @@ class KeystoneAuth(object): # unknown domain, update if req confirms domain new_id = req_id or '' elif req_has_id and sysmeta_id != req_id: - self.logger.warn("Inconsistent project domain id: " + - "%s in token vs %s in account metadata." - % (req_id, sysmeta_id)) + self.logger.warning("Inconsistent project domain id: " + + "%s in token vs %s in account metadata." + % (req_id, sysmeta_id)) if new_id is not None: req.headers[PROJECT_DOMAIN_ID_SYSMETA_HEADER] = new_id diff --git a/swift/common/middleware/tempauth.py b/swift/common/middleware/tempauth.py index 48f791ada3..9eec784a6e 100644 --- a/swift/common/middleware/tempauth.py +++ b/swift/common/middleware/tempauth.py @@ -429,10 +429,12 @@ class TempAuth(object): try: acls = acls_from_account_info(info) except ValueError as e1: - self.logger.warn("Invalid ACL stored in metadata: %r" % e1) + self.logger.warning("Invalid ACL stored in metadata: %r" % e1) return None except NotImplementedError as e2: - self.logger.warn("ACL version exceeds middleware version: %r" % e2) + self.logger.warning( + "ACL version exceeds middleware version: %r" + % e2) return None return acls diff --git a/swift/common/utils.py b/swift/common/utils.py index ab80487a02..dd9377dbfb 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -389,8 +389,8 @@ def load_libc_function(func_name, log_error=True, if fail_if_missing: raise if log_error: - logging.warn(_("Unable to locate %s in libc. Leaving as a " - "no-op."), func_name) + logging.warning(_("Unable to locate %s in libc. Leaving as a " + "no-op."), func_name) return noop_libc_function @@ -580,8 +580,8 @@ class FallocateWrapper(object): if self.fallocate is not noop_libc_function: break if self.fallocate is noop_libc_function: - logging.warn(_("Unable to locate fallocate, posix_fallocate in " - "libc. Leaving as a no-op.")) + logging.warning(_("Unable to locate fallocate, posix_fallocate in " + "libc. Leaving as a no-op.")) def __call__(self, fd, mode, offset, length): """The length parameter must be a ctypes.c_uint64.""" @@ -664,8 +664,8 @@ def fsync_dir(dirpath): if err.errno == errno.ENOTDIR: # Raise error if someone calls fsync_dir on a non-directory raise - logging.warn(_("Unable to perform fsync() on directory %s: %s"), - dirpath, os.strerror(err.errno)) + logging.warning(_("Unable to perform fsync() on directory %s: %s"), + dirpath, os.strerror(err.errno)) finally: if dirfd: os.close(dirfd) @@ -686,9 +686,9 @@ def drop_buffer_cache(fd, offset, length): ret = _posix_fadvise(fd, ctypes.c_uint64(offset), ctypes.c_uint64(length), 4) if ret != 0: - logging.warn("posix_fadvise64(%(fd)s, %(offset)s, %(length)s, 4) " - "-> %(ret)s", {'fd': fd, 'offset': offset, - 'length': length, 'ret': ret}) + logging.warning("posix_fadvise64(%(fd)s, %(offset)s, %(length)s, 4) " + "-> %(ret)s", {'fd': fd, 'offset': offset, + 'length': length, 'ret': ret}) NORMAL_FORMAT = "%016.05f" @@ -1176,7 +1176,7 @@ class StatsdClient(object): return sock.sendto('|'.join(parts), self._target) except IOError as err: if self.logger: - self.logger.warn( + self.logger.warning( 'Error sending UDP message to %r: %s', self._target, err) @@ -1261,7 +1261,6 @@ class LogAdapter(logging.LoggerAdapter, object): def __init__(self, logger, server): logging.LoggerAdapter.__init__(self, logger, {}) self.server = server - setattr(self, 'warn', self.warning) @property def txn_id(self): @@ -3561,7 +3560,8 @@ def document_iters_to_http_response_body(ranges_iter, boundary, multipart, except StopIteration: pass else: - logger.warn("More than one part in a single-part response?") + logger.warning( + "More than one part in a single-part response?") return string_along(response_body_iter, ranges_iter, logger) diff --git a/swift/container/updater.py b/swift/container/updater.py index f070e5f570..3d79db2032 100644 --- a/swift/container/updater.py +++ b/swift/container/updater.py @@ -89,7 +89,7 @@ class ContainerUpdater(Daemon): for device in self._listdir(self.devices): dev_path = os.path.join(self.devices, device) if self.mount_check and not ismount(dev_path): - self.logger.warn(_('%s is not mounted'), device) + self.logger.warning(_('%s is not mounted'), device) continue con_path = os.path.join(dev_path, DATADIR) if not os.path.exists(con_path): diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py index ebb849a9e9..e0ff11d330 100644 --- a/swift/obj/diskfile.py +++ b/swift/obj/diskfile.py @@ -303,8 +303,8 @@ def object_audit_location_generator(devices, mount_check=True, logger=None, base, policy = split_policy_string(dir_) except PolicyError as e: if logger: - logger.warn(_('Directory %r does not map ' - 'to a valid policy (%s)') % (dir_, e)) + logger.warning(_('Directory %r does not map ' + 'to a valid policy (%s)') % (dir_, e)) continue datadir_path = os.path.join(devices, device, dir_) partitions = listdir(datadir_path) @@ -420,7 +420,7 @@ class BaseDiskFileManager(object): # If the operator wants zero-copy with splice() but we don't have the # requisite kernel support, complain so they can go fix it. if conf_wants_splice and not splice.available: - self.logger.warn( + self.logger.warning( "Use of splice() requested (config says \"splice = %s\"), " "but the system does not support it. " "splice() will not be used." % conf.get('splice')) @@ -434,8 +434,8 @@ class BaseDiskFileManager(object): # AF_ALG support), we can't use zero-copy. if err.errno != errno.EAFNOSUPPORT: raise - self.logger.warn("MD5 sockets not supported. " - "splice() will not be used.") + self.logger.warning("MD5 sockets not supported. " + "splice() will not be used.") else: self.use_splice = True with open('/proc/sys/fs/pipe-max-size') as f: @@ -1404,7 +1404,7 @@ class BaseDiskFileReader(object): self._quarantined_dir = self._threadpool.run_in_thread( self.manager.quarantine_renamer, self._device_path, self._data_file) - self._logger.warn("Quarantined object %s: %s" % ( + self._logger.warning("Quarantined object %s: %s" % ( self._data_file, msg)) self._logger.increment('quarantines') self._quarantine_hook(msg) @@ -1674,7 +1674,7 @@ class BaseDiskFile(object): """ self._quarantined_dir = self._threadpool.run_in_thread( self.manager.quarantine_renamer, self._device_path, data_file) - self._logger.warn("Quarantined object %s: %s" % ( + self._logger.warning("Quarantined object %s: %s" % ( data_file, msg)) self._logger.increment('quarantines') return DiskFileQuarantined(msg) diff --git a/swift/obj/reconstructor.py b/swift/obj/reconstructor.py index 9ead83b1ac..151c00c1e7 100644 --- a/swift/obj/reconstructor.py +++ b/swift/obj/reconstructor.py @@ -819,8 +819,8 @@ class ObjectReconstructor(Daemon): dev_path = self._df_router[policy].get_dev_path( local_dev['device']) if not dev_path: - self.logger.warn(_('%s is not mounted'), - local_dev['device']) + self.logger.warning(_('%s is not mounted'), + local_dev['device']) continue obj_path = join(dev_path, data_dir) tmp_path = join(dev_path, get_tmp_dir(int(policy))) diff --git a/swift/obj/replicator.py b/swift/obj/replicator.py index aa9686133d..8daeb051f2 100644 --- a/swift/obj/replicator.py +++ b/swift/obj/replicator.py @@ -85,10 +85,11 @@ class ObjectReplicator(Daemon): if not self.rsync_module: self.rsync_module = '{replication_ip}::object' if config_true_value(conf.get('vm_test_mode', 'no')): - self.logger.warn('Option object-replicator/vm_test_mode is ' - 'deprecated and will be removed in a future ' - 'version. Update your configuration to use ' - 'option object-replicator/rsync_module.') + self.logger.warning('Option object-replicator/vm_test_mode ' + 'is deprecated and will be removed in a ' + 'future version. Update your ' + 'configuration to use option ' + 'object-replicator/rsync_module.') self.rsync_module += '{replication_port}' self.http_timeout = int(conf.get('http_timeout', 60)) self.lockup_timeout = int(conf.get('lockup_timeout', 1800)) @@ -109,10 +110,10 @@ class ObjectReplicator(Daemon): self.handoff_delete = config_auto_int_value( conf.get('handoff_delete', 'auto'), 0) if any((self.handoff_delete, self.handoffs_first)): - self.logger.warn('Handoff only mode is not intended for normal ' - 'operation, please disable handoffs_first and ' - 'handoff_delete before the next ' - 'normal rebalance') + self.logger.warning('Handoff only mode is not intended for normal ' + 'operation, please disable handoffs_first and ' + 'handoff_delete before the next ' + 'normal rebalance') self._diskfile_mgr = DiskFileManager(conf, self.logger) def _zero_stats(self): @@ -585,7 +586,8 @@ class ObjectReplicator(Daemon): failure_dev['device']) for failure_dev in policy.object_ring.devs if failure_dev]) - self.logger.warn(_('%s is not mounted'), local_dev['device']) + self.logger.warning( + _('%s is not mounted'), local_dev['device']) continue unlink_older_than(tmp_path, time.time() - self.reclaim_age) if not os.path.exists(obj_path): @@ -701,7 +703,7 @@ class ObjectReplicator(Daemon): self._add_failure_stats([(failure_dev['replication_ip'], failure_dev['device']) for failure_dev in job['nodes']]) - self.logger.warn(_('%s is not mounted'), job['device']) + self.logger.warning(_('%s is not mounted'), job['device']) continue if not self.check_ring(job['policy'].object_ring): self.logger.info(_("Ring change detected. Aborting " diff --git a/swift/obj/updater.py b/swift/obj/updater.py index 675c7c509f..e84ddfd466 100644 --- a/swift/obj/updater.py +++ b/swift/obj/updater.py @@ -84,7 +84,7 @@ class ObjectUpdater(Daemon): if self.mount_check and \ not ismount(os.path.join(self.devices, device)): self.logger.increment('errors') - self.logger.warn( + self.logger.warning( _('Skipping %s as it is not mounted'), device) continue while len(pids) >= self.concurrency: @@ -127,7 +127,7 @@ class ObjectUpdater(Daemon): if self.mount_check and \ not ismount(os.path.join(self.devices, device)): self.logger.increment('errors') - self.logger.warn( + self.logger.warning( _('Skipping %s as it is not mounted'), device) continue self.object_sweep(os.path.join(self.devices, device)) @@ -159,8 +159,9 @@ class ObjectUpdater(Daemon): try: base, policy = split_policy_string(asyncdir) except PolicyError as e: - self.logger.warn(_('Directory %r does not map ' - 'to a valid policy (%s)') % (asyncdir, e)) + self.logger.warning(_('Directory %r does not map ' + 'to a valid policy (%s)') % + (asyncdir, e)) continue for prefix in self._listdir(async_pending): prefix_path = os.path.join(async_pending, prefix) diff --git a/swift/proxy/controllers/base.py b/swift/proxy/controllers/base.py index 1f98097c94..7fc08a06e2 100644 --- a/swift/proxy/controllers/base.py +++ b/swift/proxy/controllers/base.py @@ -941,13 +941,13 @@ class ResumingGetter(object): _('Trying to read during GET')) raise except ChunkWriteTimeout: - self.app.logger.warn( + self.app.logger.warning( _('Client did not read from proxy within %ss') % self.app.client_timeout) self.app.logger.increment('client_timeouts') except GeneratorExit: if not req.environ.get('swift.non_client_disconnect'): - self.app.logger.warn(_('Client disconnected on read')) + self.app.logger.warning(_('Client disconnected on read')) except Exception: self.app.logger.exception(_('Trying to send to client')) raise diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py index 8c6b6bbab3..e5910d312e 100644 --- a/swift/proxy/controllers/obj.py +++ b/swift/proxy/controllers/obj.py @@ -981,7 +981,7 @@ class ReplicatedObjectController(BaseObjectController): msg='Object PUT exceptions after last send, ' '%(conns)s/%(nodes)s required connections') except ChunkReadTimeout as err: - self.app.logger.warn( + self.app.logger.warning( _('ERROR Client read timeout (%ss)'), err.seconds) self.app.logger.increment('client_timeouts') raise HTTPRequestTimeout(request=req) @@ -989,7 +989,7 @@ class ReplicatedObjectController(BaseObjectController): raise except ChunkReadError: req.client_disconnect = True - self.app.logger.warn( + self.app.logger.warning( _('Client disconnected without sending last chunk')) self.app.logger.increment('client_disconnects') raise HTTPClientDisconnect(request=req) @@ -1004,7 +1004,7 @@ class ReplicatedObjectController(BaseObjectController): raise HTTPInternalServerError(request=req) if req.content_length and bytes_transferred < req.content_length: req.client_disconnect = True - self.app.logger.warn( + self.app.logger.warning( _('Client disconnected without sending enough data')) self.app.logger.increment('client_disconnects') raise HTTPClientDisconnect(request=req) @@ -2209,7 +2209,7 @@ class ECObjectController(BaseObjectController): if req.content_length and ( bytes_transferred < req.content_length): req.client_disconnect = True - self.app.logger.warn( + self.app.logger.warning( _('Client disconnected without sending enough data')) self.app.logger.increment('client_disconnects') raise HTTPClientDisconnect(request=req) @@ -2278,13 +2278,13 @@ class ECObjectController(BaseObjectController): for putter in putters: putter.wait() except ChunkReadTimeout as err: - self.app.logger.warn( + self.app.logger.warning( _('ERROR Client read timeout (%ss)'), err.seconds) self.app.logger.increment('client_timeouts') raise HTTPRequestTimeout(request=req) except ChunkReadError: req.client_disconnect = True - self.app.logger.warn( + self.app.logger.warning( _('Client disconnected without sending last chunk')) self.app.logger.increment('client_disconnects') raise HTTPClientDisconnect(request=req) diff --git a/swift/proxy/server.py b/swift/proxy/server.py index 0747a861bb..3ecf93dbe8 100644 --- a/swift/proxy/server.py +++ b/swift/proxy/server.py @@ -229,9 +229,10 @@ class Application(object): Check the configuration for possible errors """ if self._read_affinity and self.sorting_method != 'affinity': - self.logger.warn("sorting_method is set to '%s', not 'affinity'; " - "read_affinity setting will have no effect." % - self.sorting_method) + self.logger.warning( + "sorting_method is set to '%s', not 'affinity'; " + "read_affinity setting will have no effect." % + self.sorting_method) def get_object_ring(self, policy_idx): """ diff --git a/test/unit/__init__.py b/test/unit/__init__.py index 6ab3618780..ec6a2a0985 100644 --- a/test/unit/__init__.py +++ b/test/unit/__init__.py @@ -477,6 +477,12 @@ class UnmockTimeModule(object): logging.time = UnmockTimeModule() +class WARN_DEPRECATED(Exception): + def __init__(self, msg): + self.msg = msg + print(self.msg) + + class FakeLogger(logging.Logger, object): # a thread safe fake logger @@ -499,6 +505,9 @@ class FakeLogger(logging.Logger, object): NOTICE: 'notice', } + def warn(self, *args, **kwargs): + raise WARN_DEPRECATED("Deprecated Method warn use warning instead") + def notice(self, msg, *args, **kwargs): """ Convenience function for syslog priority LOG_NOTICE. The python diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index a336e78b60..dcc24042ba 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -1221,7 +1221,7 @@ class TestUtils(unittest.TestCase): logger = logging.getLogger('server') logger.addHandler(logging.StreamHandler(sio)) logger = utils.get_logger(None, 'server', log_route='server') - logger.warn('test1') + logger.warning('test1') self.assertEqual(sio.getvalue(), 'test1\n') logger.debug('test2') self.assertEqual(sio.getvalue(), 'test1\n') @@ -1233,7 +1233,7 @@ class TestUtils(unittest.TestCase): # way to syslog; but exercises the code. logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server', log_route='server') - logger.warn('test4') + logger.warning('test4') self.assertEqual(sio.getvalue(), 'test1\ntest3\ntest4\n') # make sure debug doesn't log by default @@ -1491,7 +1491,7 @@ class TestUtils(unittest.TestCase): self.assertTrue('12345' not in log_msg) # test txn already in message self.assertEqual(logger.txn_id, '12345') - logger.warn('test 12345 test') + logger.warning('test 12345 test') self.assertEqual(strip_value(sio), 'test 12345 test\n') # Test multi line collapsing logger.error('my\nerror\nmessage') @@ -1517,7 +1517,7 @@ class TestUtils(unittest.TestCase): self.assertTrue('1.2.3.4' not in log_msg) # test client_ip (and txn) already in message self.assertEqual(logger.client_ip, '1.2.3.4') - logger.warn('test 1.2.3.4 test 12345') + logger.warning('test 1.2.3.4 test 12345') self.assertEqual(strip_value(sio), 'test 1.2.3.4 test 12345\n') finally: logger.logger.removeHandler(handler) diff --git a/test/unit/obj/test_updater.py b/test/unit/obj/test_updater.py index 3900bf4944..db6541e956 100644 --- a/test/unit/obj/test_updater.py +++ b/test/unit/obj/test_updater.py @@ -183,7 +183,7 @@ class TestObjectUpdater(unittest.TestCase): 'node_timeout': '5'}) cu.logger = mock_logger = mock.MagicMock() cu.object_sweep(self.sda1) - self.assertEqual(mock_logger.warn.call_count, warn) + self.assertEqual(mock_logger.warning.call_count, warn) self.assertTrue( os.path.exists(os.path.join(self.sda1, 'not_a_dir'))) if should_skip: From 79222e327f9df6335b58e17a6c8dd0dc44b86c17 Mon Sep 17 00:00:00 2001 From: "ChangBo Guo(gcb)" Date: Sat, 26 Dec 2015 13:13:37 +0800 Subject: [PATCH 22/52] Fix AttributeError for LogAdapter LogAdapter object has no attribute 'warn' but has attribute 'warning'. Closes-Bug: #1529321 Change-Id: I0e0bd0a3dbc4bb5c1f0b343a8809e53491a1da5f --- swift/common/db_replicator.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/swift/common/db_replicator.py b/swift/common/db_replicator.py index b67b71520e..d4abf25efe 100644 --- a/swift/common/db_replicator.py +++ b/swift/common/db_replicator.py @@ -174,11 +174,12 @@ class Replicator(Daemon): if not self.rsync_module: self.rsync_module = '{replication_ip}::%s' % self.server_type if config_true_value(conf.get('vm_test_mode', 'no')): - self.logger.warn('Option %(type)s-replicator/vm_test_mode is ' - 'deprecated and will be removed in a future ' - 'version. Update your configuration to use ' - 'option %(type)s-replicator/rsync_module.' - % {'type': self.server_type}) + self.logger.warning('Option %(type)s-replicator/vm_test_mode ' + 'is deprecated and will be removed in a ' + 'future version. Update your configuration' + ' to use option %(type)s-replicator/' + 'rsync_module.' + % {'type': self.server_type}) self.rsync_module += '{replication_port}' self.reclaim_age = float(conf.get('reclaim_age', 86400 * 7)) swift.common.db.DB_PREALLOCATION = \ From 3b1591f235f4b85796917507be5e7fd80365ff9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Nov=C3=BD?= Date: Wed, 30 Sep 2015 19:08:09 +0200 Subject: [PATCH 23/52] swift-init: New option kill-after-timeout This option send SIGKILL to daemon after kill_wait period. When daemon hangs and doesn't respond to SIGTERM/SIGHUP there is no way to stop it using swift-init now. Classic init scripts in Linux kills hanged process after grace period and this patch add same behaviour. This is most usefull when using "restart" on hanged daemon. Change-Id: I8c932b673a0f51e52132df87ea2f4396f4bba9d8 --- bin/swift-init | 5 ++ doc/manpages/swift-init.1 | 1 + swift/common/manager.py | 30 ++++++++++- test/unit/common/test_manager.py | 90 ++++++++++++++++++++++++++++++-- 4 files changed, 120 insertions(+), 6 deletions(-) diff --git a/bin/swift-init b/bin/swift-init index 3fe18cdaa6..0fcbff5708 100755 --- a/bin/swift-init +++ b/bin/swift-init @@ -74,6 +74,11 @@ def main(): help="Return zero status code even if some config is " "missing. Default mode if any server is a glob or " "one of aliases `all`, `main` or `rest`.") + # SIGKILL daemon after kill_wait period + parser.add_option('--kill-after-timeout', dest='kill_after_timeout', + action='store_true', + help="Kill daemon and all childs after kill-wait " + "period.") options, args = parser.parse_args() diff --git a/doc/manpages/swift-init.1 b/doc/manpages/swift-init.1 index 3a0e112659..7a1ac42ab5 100644 --- a/doc/manpages/swift-init.1 +++ b/doc/manpages/swift-init.1 @@ -111,6 +111,7 @@ allows one to use the keywords such as "all", "main" and "rest" for the .IP "-r RUN_DIR, --run-dir=RUN_DIR directory where the pids will be stored (default /var/run/swift) .IP "--strict return non-zero status code if some config is missing. Default mode if server is explicitly named." .IP "--non-strict return zero status code even if some config is missing. Default mode if server is one of aliases `all`, `main` or `rest`." +.IP "--kill-after-timeout kill daemon and all childs after kill-wait period." .PD .RE diff --git a/swift/common/manager.py b/swift/common/manager.py index 03eb0479e9..e67f8a32f7 100644 --- a/swift/common/manager.py +++ b/swift/common/manager.py @@ -162,6 +162,16 @@ def safe_kill(pid, sig, name): os.kill(pid, sig) +def kill_group(pid, sig): + """Send signal to process group + + : param pid: process id + : param sig: signal to send + """ + # Negative PID means process group + os.kill(-pid, sig) + + class UnknownCommandError(Exception): pass @@ -285,11 +295,27 @@ class Manager(object): return 0 # reached interval n watch_pids w/o killing all servers + kill_after_timeout = kwargs.get('kill_after_timeout', False) for server, pids in server_pids.items(): if not killed_pids.issuperset(pids): # some pids of this server were not killed - print(_('Waited %s seconds for %s to die; giving up') % ( - kill_wait, server)) + if kill_after_timeout: + print(_('Waited %s seconds for %s to die; killing') % ( + kill_wait, server)) + # Send SIGKILL to all remaining pids + for pid in set(pids.keys()) - killed_pids: + print(_('Signal %s pid: %s signal: %s') % ( + server, pid, signal.SIGKILL)) + # Send SIGKILL to process group + try: + kill_group(pid, signal.SIGKILL) + except OSError as e: + # PID died before kill_group can take action? + if e.errno != errno.ESRCH: + raise e + else: + print(_('Waited %s seconds for %s to die; giving up') % ( + kill_wait, server)) return 1 @command diff --git a/test/unit/common/test_manager.py b/test/unit/common/test_manager.py index e0d1bdb924..3280e444f0 100644 --- a/test/unit/common/test_manager.py +++ b/test/unit/common/test_manager.py @@ -1916,13 +1916,18 @@ class TestManager(unittest.TestCase): continue yield server, pid + def mock_kill_group(pid, sig): + self.fail('kill_group should not be called') + _orig_server = manager.Server _orig_watch_server_pids = manager.watch_server_pids + _orig_kill_group = manager.kill_group try: manager.watch_server_pids = mock_watch_server_pids + manager.kill_group = mock_kill_group # test stop one server server_pids = { - 'test': [1] + 'test': {1: "dummy.pid"} } manager.Server = MockServerFactory(server_pids) m = manager.Manager(['test']) @@ -1930,7 +1935,7 @@ class TestManager(unittest.TestCase): self.assertEqual(status, 0) # test not running server_pids = { - 'test': [] + 'test': {} } manager.Server = MockServerFactory(server_pids) m = manager.Manager(['test']) @@ -1938,7 +1943,7 @@ class TestManager(unittest.TestCase): self.assertEqual(status, 1) # test kill not running server_pids = { - 'test': [] + 'test': {} } manager.Server = MockServerFactory(server_pids) m = manager.Manager(['test']) @@ -1946,7 +1951,7 @@ class TestManager(unittest.TestCase): self.assertEqual(status, 0) # test won't die server_pids = { - 'test': [None] + 'test': {None: None} } manager.Server = MockServerFactory(server_pids) m = manager.Manager(['test']) @@ -1956,6 +1961,83 @@ class TestManager(unittest.TestCase): finally: manager.Server = _orig_server manager.watch_server_pids = _orig_watch_server_pids + manager.kill_group = _orig_kill_group + + def test_stop_kill_after_timeout(self): + class MockServerFactory(object): + class MockServer(object): + def __init__(self, pids, run_dir=manager.RUN_DIR): + self.pids = pids + + def stop(self, **kwargs): + return self.pids + + def status(self, **kwargs): + return not self.pids + + def __init__(self, server_pids, run_dir=manager.RUN_DIR): + self.server_pids = server_pids + + def __call__(self, server, run_dir=manager.RUN_DIR): + return MockServerFactory.MockServer(self.server_pids[server]) + + def mock_watch_server_pids(server_pids, **kwargs): + for server, pids in server_pids.items(): + for pid in pids: + if pid is None: + continue + yield server, pid + + mock_kill_group_called = [] + + def mock_kill_group(*args): + mock_kill_group_called.append(args) + + def mock_kill_group_oserr(*args): + raise OSError() + + def mock_kill_group_oserr_ESRCH(*args): + raise OSError(errno.ESRCH, 'No such process') + + _orig_server = manager.Server + _orig_watch_server_pids = manager.watch_server_pids + _orig_kill_group = manager.kill_group + try: + manager.watch_server_pids = mock_watch_server_pids + manager.kill_group = mock_kill_group + # test stop one server + server_pids = { + 'test': {None: None} + } + manager.Server = MockServerFactory(server_pids) + m = manager.Manager(['test']) + status = m.stop(kill_after_timeout=True) + self.assertEqual(status, 1) + self.assertEqual(mock_kill_group_called, [(None, 9)]) + + manager.kill_group = mock_kill_group_oserr + # test stop one server - OSError + server_pids = { + 'test': {None: None} + } + manager.Server = MockServerFactory(server_pids) + m = manager.Manager(['test']) + with self.assertRaises(OSError): + status = m.stop(kill_after_timeout=True) + + manager.kill_group = mock_kill_group_oserr_ESRCH + # test stop one server - OSError: No such process + server_pids = { + 'test': {None: None} + } + manager.Server = MockServerFactory(server_pids) + m = manager.Manager(['test']) + status = m.stop(kill_after_timeout=True) + self.assertEqual(status, 1) + finally: + manager.Server = _orig_server + manager.watch_server_pids = _orig_watch_server_pids + manager.kill_group = _orig_kill_group # TODO(clayg): more tests def test_shutdown(self): From f53cf1043d078451c4b9957027bf3af378aa0166 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Nov=C3=BD?= Date: Tue, 5 Jan 2016 20:20:15 +0100 Subject: [PATCH 24/52] Fixed few misspellings in comments Change-Id: I8479c85cb8821c48b5da197cac37c80e5c1c7f05 --- swift/common/middleware/tempurl.py | 4 ++-- swift/common/storage_policy.py | 4 ++-- swift/common/utils.py | 2 +- test/unit/obj/test_reconstructor.py | 2 +- test/unit/obj/test_server.py | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/swift/common/middleware/tempurl.py b/swift/common/middleware/tempurl.py index dfa264bf43..b71df51c35 100644 --- a/swift/common/middleware/tempurl.py +++ b/swift/common/middleware/tempurl.py @@ -375,7 +375,7 @@ class TempURL(object): break if not is_valid_hmac: return self._invalid(env, start_response) - # disallowed headers prevent accidently allowing upload of a pointer + # disallowed headers prevent accidentally allowing upload of a pointer # to data that the PUT tempurl would not otherwise allow access for. # It should be safe to provide a GET tempurl for data that an # untrusted client just uploaded with a PUT tempurl. @@ -540,7 +540,7 @@ class TempURL(object): def _clean_disallowed_headers(self, env, start_response): """ - Validate the absense of disallowed headers for "unsafe" operations. + Validate the absence of disallowed headers for "unsafe" operations. :returns: None for safe operations or swob.HTTPBadResponse if the request includes disallowed headers. diff --git a/swift/common/storage_policy.py b/swift/common/storage_policy.py index 90fcedd661..52eb5a9d19 100755 --- a/swift/common/storage_policy.py +++ b/swift/common/storage_policy.py @@ -324,7 +324,7 @@ class BaseStoragePolicy(object): Removes an alias name from the storage policy. Shouldn't be called directly from the storage policy but instead through the storage policy collection class, so lookups by name resolve correctly. If - the name removed is the primary name then the next availiable alias + the name removed is the primary name then the next available alias will be adopted as the new primary name. :param name: a name assigned to the storage policy @@ -776,7 +776,7 @@ class StoragePolicyCollection(object): def remove_policy_alias(self, *aliases): """ Removes a name or names from a policy. If the name removed is the - primary name then the next availiable alias will be adopted + primary name then the next available alias will be adopted as the new primary name. :param *aliases: arbitrary number of existing policy names to remove. diff --git a/swift/common/utils.py b/swift/common/utils.py index 6dadca3264..831f651ff1 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -424,7 +424,7 @@ def get_log_line(req, res, trans_time, additional_info): :param trans_time: the time the request took to complete, a float. :param additional_info: a string to log at the end of the line - :returns: a properly formated line for logging. + :returns: a properly formatted line for logging. """ policy_index = get_policy_index(req.headers, res.headers) diff --git a/test/unit/obj/test_reconstructor.py b/test/unit/obj/test_reconstructor.py index 7baa1342af..a093a80213 100755 --- a/test/unit/obj/test_reconstructor.py +++ b/test/unit/obj/test_reconstructor.py @@ -221,7 +221,7 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase): def part_2(set): # this part is a handoff in our config (always) - # so lets do a set with indicies from different nodes + # so lets do a set with indices from different nodes if set == 0: return (local_id + 1) % 3 else: diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py index 232f0091b2..ef32f29b06 100755 --- a/test/unit/obj/test_server.py +++ b/test/unit/obj/test_server.py @@ -5558,7 +5558,7 @@ class TestObjectServer(unittest.TestCase): object-metadata (e.g. X-Backend-Obj-Content-Length) is generally expected tomatch the test_doc) :param finish_body: boolean, if true send "0\r\n\r\n" after test_doc - and wait for 100-continue before yeilding context + and wait for 100-continue before yielding context """ test_data = 'obj data' footer_meta = { From e75888b281d59df0889f28d0b32241dac3a34aa2 Mon Sep 17 00:00:00 2001 From: HugoKuo Date: Wed, 6 Jan 2016 14:33:23 +0800 Subject: [PATCH 25/52] Add more description for write_affinity_node_count parameter in the doc. Change-Id: Iad410a2be4f9a2cd5c53e860b9f91993aa7f2369 Closes-Bug: #1531173 --- doc/source/admin_guide.rst | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/doc/source/admin_guide.rst b/doc/source/admin_guide.rst index cb6532b4be..2577c2aac5 100644 --- a/doc/source/admin_guide.rst +++ b/doc/source/admin_guide.rst @@ -463,7 +463,12 @@ Example:: Assuming 3 replicas, this configuration will make object PUTs try storing the object's replicas on up to 6 disks ("2 * replicas") in -region 1 ("r1"). +region 1 ("r1"). Proxy server tries to find 3 devices for storing the +object. While a device is unavailable, it queries the ring for the 4th +device and so on until 6th device. If the 6th disk is still unavailable, +the last replica will be sent to other region. It doesn't mean there'll +have 6 replicas in region 1. + You should be aware that, if you have data coming into SF faster than your link to NY can transfer it, then your cluster's data distribution From 85a0a6a28e166bc076cf8786de2b46248d8786a2 Mon Sep 17 00:00:00 2001 From: Eran Rom Date: Sun, 26 Jul 2015 13:31:17 +0300 Subject: [PATCH 26/52] Container-Sync to iterate only over synced containers This change introduces a sync_store which holds only containers that are enabled for sync. The store is implemented using a directory structure that resembles that of the containers directory, but has entries only for containers enabled for sync. The store is maintained in two ways: 1. Preemptively by the container server when processing PUT/POST/DELETE operations targeted at containers with x-container-sync-key / x-container-sync-to 2. In the background using the containers replicator whenever it processes a container set up for sync The change updates [1] [1] http://docs.openstack.org/developer/swift/overview_container_sync.html Change-Id: I9ae4d4c7ff6336611df4122b7c753cc4fa46c0ff Closes-Bug: #1476623 --- doc/source/overview_container_sync.rst | 71 +++-- swift/container/replicator.py | 18 ++ swift/container/server.py | 15 + swift/container/sync.py | 40 ++- swift/container/sync_store.py | 177 ++++++++++++ test/probe/test_container_sync.py | 78 +++++- test/unit/container/test_replicator.py | 133 ++++++++- test/unit/container/test_server.py | 69 +++++ test/unit/container/test_sync.py | 182 +++++++----- test/unit/container/test_sync_store.py | 367 +++++++++++++++++++++++++ 10 files changed, 1034 insertions(+), 116 deletions(-) create mode 100644 swift/container/sync_store.py create mode 100644 test/unit/container/test_sync_store.py diff --git a/doc/source/overview_container_sync.rst b/doc/source/overview_container_sync.rst index 8f03bf8174..c1255acaff 100644 --- a/doc/source/overview_container_sync.rst +++ b/doc/source/overview_container_sync.rst @@ -29,7 +29,7 @@ synchronization key. Configuring Container Sync -------------------------- -Create a container-sync-realms.conf file specifying the allowable clusters +Create a ``container-sync-realms.conf`` file specifying the allowable clusters and their information:: [realm1] @@ -50,18 +50,18 @@ clusters that have agreed to allow container syncing with each other. Realm names will be considered case insensitive. The key is the overall cluster-to-cluster key used in combination with the -external users' key that they set on their containers' X-Container-Sync-Key -metadata header values. These keys will be used to sign each request the -container sync daemon makes and used to validate each incoming container sync -request. +external users' key that they set on their containers' +``X-Container-Sync-Key`` metadata header values. These keys will be used to +sign each request the container sync daemon makes and used to validate each +incoming container sync request. The key2 is optional and is an additional key incoming requests will be checked against. This is so you can rotate keys if you wish; you move the existing key to key2 and make a new key value. -Any values in the realm section whose names begin with cluster\_ will indicate -the name and endpoint of a cluster and will be used by external users in -their containers' X-Container-Sync-To metadata header values with the format +Any values in the realm section whose names begin with ``cluster_`` will +indicate the name and endpoint of a cluster and will be used by external users in +their containers' ``X-Container-Sync-To`` metadata header values with the format "//realm_name/cluster_name/account_name/container_name". Realm and cluster names are considered case insensitive. @@ -71,7 +71,7 @@ container servers, since that is where the container sync daemon runs. Note that the endpoint ends with /v1/ and that the container sync daemon will then add the account/container/obj name after that. -Distribute this container-sync-realms.conf file to all your proxy servers +Distribute this ``container-sync-realms.conf`` file to all your proxy servers and container servers. You also need to add the container_sync middleware to your proxy pipeline. It @@ -95,7 +95,7 @@ section, Configuring Container Sync, for the new-style. With the old-style, the Swift cluster operator must allow synchronization with a set of hosts before the user can enable container synchronization. First, the backend container server needs to be given this list of hosts in the -container-server.conf file:: +``container-server.conf`` file:: [DEFAULT] # This is a comma separated list of hosts allowed in the @@ -170,8 +170,8 @@ we'll make next:: The ``-t`` indicates the cluster to sync to, which is the realm name of the section from container-sync-realms.conf, followed by the cluster name from -that section (without the cluster\_ prefix), followed by the account and container names we want to sync to. -The ``-k`` specifies the secret key the two containers will share for +that section (without the cluster\_ prefix), followed by the account and container +names we want to sync to. The ``-k`` specifies the secret key the two containers will share for synchronization; this is the user key, the cluster key in container-sync-realms.conf will also be used behind the scenes. @@ -195,8 +195,18 @@ as it gets synchronized over to the second:: list container2 [Nothing there yet, so we wait a bit...] - [If you're an operator running SAIO and just testing, you may need to - run 'swift-init container-sync once' to perform a sync scan.] + +.. note:: + + If you're an operator running SAIO and just testing, each time you + configure a container for synchronization and place objects in the + source container you will need to ensure that container-sync runs + before attempting to retrieve objects from the target container. + That is, you need to run:: + + swift-init container-sync once + +Now expect to see objects copied from the first container to the second:: $ swift -A http://cluster2/auth/v1.0 -U test2:tester2 -K testing2 \ list container2 @@ -340,13 +350,34 @@ synchronize to the second, we could have used this curl command:: What's going on behind the scenes, in the cluster? -------------------------------------------------- -The swift-container-sync does the job of sending updates to the remote -container. +Container ring devices have a directory called ``containers``, where container +databases reside. In addition to ``containers``, each container ring device +also has a directory called ``sync-containers``. ``sync-containers`` holds +symlinks to container databases that were configured for container sync using +``x-container-sync-to`` and ``x-container-sync-key`` metadata keys. -This is done by scanning the local devices for container databases and -checking for x-container-sync-to and x-container-sync-key metadata values. -If they exist, newer rows since the last sync will trigger PUTs or DELETEs -to the other container. +The swift-container-sync process does the job of sending updates to the remote +container. This is done by scanning ``sync-containers`` for container +databases. For each container db found, newer rows since the last sync will +trigger PUTs or DELETEs to the other container. + +``sync-containers`` is maintained as follows: +Whenever the container-server processes a PUT or a POST request that carries +``x-container-sync-to`` and ``x-container-sync-key`` metadata keys the server +creates a symlink to the container database in ``sync-containers``. Whenever +the container server deletes a synced container, the appropriate symlink +is deleted from ``sync-containers``. + +In addition to the container-server, the container-replicator process does the +job of identifying containers that should be synchronized. This is done by +scanning the local devices for container databases and checking for +x-container-sync-to and x-container-sync-key metadata values. If they exist +then a symlink to the container database is created in a sync-containers +sub-directory on the same device. + +Similarly, when the container sync metadata keys are deleted, the container +server and container-replicator would take care of deleting the symlinks +from ``sync-containers``. .. note:: diff --git a/swift/container/replicator.py b/swift/container/replicator.py index 810c7db600..b428086bdd 100644 --- a/swift/container/replicator.py +++ b/swift/container/replicator.py @@ -20,6 +20,7 @@ import time from collections import defaultdict from eventlet import Timeout +from swift.container.sync_store import ContainerSyncStore from swift.container.backend import ContainerBroker, DATADIR from swift.container.reconciler import ( MISPLACED_OBJECTS_ACCOUNT, incorrect_policy_index, @@ -189,6 +190,13 @@ class ContainerReplicator(db_replicator.Replicator): def _post_replicate_hook(self, broker, info, responses): if info['account'] == MISPLACED_OBJECTS_ACCOUNT: return + + try: + self.sync_store.update_sync_store(broker) + except Exception: + self.logger.exception('Failed to update sync_store %s' % + broker.db_file) + point = broker.get_reconciler_sync() if not broker.has_multiple_policies() and info['max_row'] != point: broker.update_reconciler_sync(info['max_row']) @@ -210,6 +218,13 @@ class ContainerReplicator(db_replicator.Replicator): # this container shouldn't be here, make sure it's cleaned up self.reconciler_cleanups[broker.container] = broker return + try: + # DB is going to get deleted. Be preemptive about it + self.sync_store.remove_synced_container(broker) + except Exception: + self.logger.exception('Failed to remove sync_store entry %s' % + broker.db_file) + return super(ContainerReplicator, self).delete_db(broker) def replicate_reconcilers(self): @@ -237,6 +252,9 @@ class ContainerReplicator(db_replicator.Replicator): def run_once(self, *args, **kwargs): self.reconciler_containers = {} self.reconciler_cleanups = {} + self.sync_store = ContainerSyncStore(self.root, + self.logger, + self.mount_check) rv = super(ContainerReplicator, self).run_once(*args, **kwargs) if any([self.reconciler_containers, self.reconciler_cleanups]): self.replicate_reconcilers() diff --git a/swift/container/server.py b/swift/container/server.py index 5f571ef9f2..0a09f57615 100644 --- a/swift/container/server.py +++ b/swift/container/server.py @@ -23,6 +23,7 @@ from xml.etree.cElementTree import Element, SubElement, tostring from eventlet import Timeout import swift.common.db +from swift.container.sync_store import ContainerSyncStore from swift.container.backend import ContainerBroker, DATADIR from swift.container.replicator import ContainerReplicatorRpc from swift.common.db import DatabaseAlreadyExists @@ -110,6 +111,9 @@ class ContainerController(BaseStorageServer): self.save_headers.append('x-versions-location') swift.common.db.DB_PREALLOCATION = \ config_true_value(conf.get('db_preallocation', 'f')) + self.sync_store = ContainerSyncStore(self.root, + self.logger, + self.mount_check) def _get_container_broker(self, drive, part, account, container, **kwargs): """ @@ -242,6 +246,13 @@ class ContainerController(BaseStorageServer): else: return None + def _update_sync_store(self, broker, method): + try: + self.sync_store.update_sync_store(broker) + except Exception: + self.logger.exception('Failed to update sync_store %s during %s' % + broker.db_file, method) + @public @timing_stats() def DELETE(self, req): @@ -276,6 +287,7 @@ class ContainerController(BaseStorageServer): broker.delete_db(req_timestamp.internal) if not broker.is_deleted(): return HTTPConflict(request=req) + self._update_sync_store(broker, 'DELETE') resp = self.account_update(req, account, container, broker) if resp: return resp @@ -381,6 +393,8 @@ class ContainerController(BaseStorageServer): broker.metadata['X-Container-Sync-To'][0]: broker.set_x_container_sync_points(-1, -1) broker.update_metadata(metadata, validate_metadata=True) + if metadata: + self._update_sync_store(broker, 'PUT') resp = self.account_update(req, account, container, broker) if resp: return resp @@ -564,6 +578,7 @@ class ContainerController(BaseStorageServer): broker.metadata['X-Container-Sync-To'][0]: broker.set_x_container_sync_points(-1, -1) broker.update_metadata(metadata, validate_metadata=True) + self._update_sync_store(broker, 'POST') return HTTPNoContent(request=req) def __call__(self, env, start_response): diff --git a/swift/container/sync.py b/swift/container/sync.py index 089c9a7481..ef9543883a 100644 --- a/swift/container/sync.py +++ b/swift/container/sync.py @@ -24,7 +24,9 @@ from struct import unpack_from from eventlet import sleep, Timeout import swift.common.db -from swift.container.backend import ContainerBroker, DATADIR +from swift.common.db import DatabaseConnectionError +from swift.container.backend import ContainerBroker +from swift.container.sync_store import ContainerSyncStore from swift.common.container_sync_realms import ContainerSyncRealms from swift.common.internal_client import ( delete_object, put_object, InternalClient, UnexpectedResponse) @@ -32,7 +34,7 @@ from swift.common.exceptions import ClientException from swift.common.ring import Ring from swift.common.ring.utils import is_local_device from swift.common.utils import ( - audit_location_generator, clean_content_type, config_true_value, + clean_content_type, config_true_value, FileLikeIter, get_logger, hash_path, quote, urlparse, validate_sync_to, whataremyips, Timestamp) from swift.common.daemon import Daemon @@ -187,6 +189,10 @@ class ContainerSync(Daemon): a.strip() for a in conf.get('sync_proxy', '').split(',') if a.strip()] + #: ContainerSyncStore instance for iterating over synced containers + self.sync_store = ContainerSyncStore(self.devices, + self.logger, + self.mount_check) #: Number of containers with sync turned on that were successfully #: synced. self.container_syncs = 0 @@ -194,7 +200,8 @@ class ContainerSync(Daemon): self.container_deletes = 0 #: Number of successful PUTs triggered. self.container_puts = 0 - #: Number of containers that didn't have sync turned on. + #: Number of containers whose sync has been turned off, but + #: are not yet cleared from the sync store. self.container_skips = 0 #: Number of containers that had a failure of some type. self.container_failures = 0 @@ -247,10 +254,7 @@ class ContainerSync(Daemon): sleep(random() * self.interval) while True: begin = time() - all_locs = audit_location_generator(self.devices, DATADIR, '.db', - mount_check=self.mount_check, - logger=self.logger) - for path, device, partition in all_locs: + for path in self.sync_store.synced_containers_generator(): self.container_sync(path) if time() - self.reported >= 3600: # once an hour self.report() @@ -264,10 +268,7 @@ class ContainerSync(Daemon): """ self.logger.info(_('Begin container sync "once" mode')) begin = time() - all_locs = audit_location_generator(self.devices, DATADIR, '.db', - mount_check=self.mount_check, - logger=self.logger) - for path, device, partition in all_locs: + for path in self.sync_store.synced_containers_generator(): self.container_sync(path) if time() - self.reported >= 3600: # once an hour self.report() @@ -308,7 +309,20 @@ class ContainerSync(Daemon): broker = None try: broker = ContainerBroker(path) - info = broker.get_info() + # The path we pass to the ContainerBroker is a real path of + # a container DB. If we get here, however, it means that this + # path is linked from the sync_containers dir. In rare cases + # of race or processes failures the link can be stale and + # the get_info below will raise a DB doesn't exist exception + # In this case we remove the stale link and raise an error + # since in most cases the db should be there. + try: + info = broker.get_info() + except DatabaseConnectionError as db_err: + if str(db_err).endswith("DB doesn't exist"): + self.sync_store.remove_synced_container(broker) + raise + x, nodes = self.container_ring.get_nodes(info['account'], info['container']) for ordinal, node in enumerate(nodes): @@ -388,7 +402,7 @@ class ContainerSync(Daemon): broker.set_x_container_sync_points(sync_point1, None) self.container_syncs += 1 self.logger.increment('syncs') - except (Exception, Timeout) as err: + except (Exception, Timeout): self.container_failures += 1 self.logger.increment('failures') self.logger.exception(_('ERROR Syncing %s'), diff --git a/swift/container/sync_store.py b/swift/container/sync_store.py new file mode 100644 index 0000000000..729eaee093 --- /dev/null +++ b/swift/container/sync_store.py @@ -0,0 +1,177 @@ +# Copyright (c) 2010-2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import errno + +from swift.common.utils import audit_location_generator, mkdirs +from swift.container.backend import DATADIR + +SYNC_DATADIR = 'sync_containers' + + +class ContainerSyncStore(object): + """ + Filesystem based store for local containers that needs to be synced. + + The store holds a list of containers that need to be synced by the + container sync daemon. The store is local to the container server node, + that is, only containers whose databases are kept locally on the node are + listed. + """ + def __init__(self, devices, logger, mount_check): + self.devices = os.path.normpath(os.path.join('/', devices)) + '/' + self.logger = logger + self.mount_check = mount_check + + def _container_to_synced_container_path(self, path): + # path is assumed to be of the form: + # /srv/node/sdb/containers/part/.../*.db + # or more generally: + # devices/device/containers/part/.../*.db + # Below we split the path to the following parts: + # devices, device, rest + devices = self.devices + path = os.path.normpath(path) + device = path[len(devices):path.rfind(DATADIR)] + rest = path[path.rfind(DATADIR) + len(DATADIR) + 1:] + + return os.path.join(devices, device, SYNC_DATADIR, rest) + + def _synced_container_to_container_path(self, path): + # synced path is assumed to be of the form: + # /srv/node/sdb/sync_containers/part/.../*.db + # or more generally: + # devices/device/sync_containers/part/.../*.db + # Below we split the path to the following parts: + # devices, device, rest + devices = self.devices + path = os.path.normpath(path) + device = path[len(devices):path.rfind(SYNC_DATADIR)] + rest = path[path.rfind(SYNC_DATADIR) + len(SYNC_DATADIR) + 1:] + + return os.path.join(devices, device, DATADIR, rest) + + def add_synced_container(self, broker): + """ + Adds the container db represented by broker to the list of synced + containers. + + :param broker: An instance of ContainerBroker representing the + container to add. + """ + sync_file = self._container_to_synced_container_path(broker.db_file) + stat = None + try: + stat = os.stat(sync_file) + except OSError as oserr: + if oserr.errno != errno.ENOENT: + raise oserr + + if stat is not None: + return + + sync_path = os.path.dirname(sync_file) + mkdirs(sync_path) + + try: + os.symlink(broker.db_file, sync_file) + except OSError as oserr: + if (oserr.errno != errno.EEXIST or + not os.path.islink(sync_file)): + raise oserr + + def remove_synced_container(self, broker): + """ + Removes the container db represented by broker from the list of synced + containers. + + :param broker: An instance of ContainerBroker representing the + container to remove. + """ + sync_file = broker.db_file + sync_file = self._container_to_synced_container_path(sync_file) + try: + os.unlink(sync_file) + os.removedirs(os.path.dirname(sync_file)) + except OSError as oserr: + if oserr.errno != errno.ENOENT: + raise oserr + + def update_sync_store(self, broker): + """ + Add or remove a symlink to/from the sync-containers directory + according to the broker's metadata. + + Decide according to the broker x-container-sync-to and + x-container-sync-key whether a symlink needs to be added or + removed. + + We mention that if both metadata items do not appear + at all, the container has never been set for sync in reclaim_age + in which case we do nothing. This is important as this method is + called for ALL containers from the container replicator. + + Once we realize that we do need to do something, we check if + the container is marked for delete, in which case we want to + remove the symlink + + For adding a symlink we notice that both x-container-sync-to and + x-container-sync-key exist and are valid, that is, are not empty. + + At this point we know we need to do something, the container + is not marked for delete and the condition to add a symlink + is not met. conclusion need to remove the symlink. + + :param broker: An instance of ContainerBroker + """ + # If the broker metadata does not have both x-container-sync-to + # and x-container-sync-key it has *never* been set. Make sure + # we do nothing in this case + if ('X-Container-Sync-To' not in broker.metadata and + 'X-Container-Sync-Key' not in broker.metadata): + return + + if broker.is_deleted(): + self.remove_synced_container(broker) + return + + # If both x-container-sync-to and x-container-sync-key + # exist and valid, add the symlink + sync_to = sync_key = None + if 'X-Container-Sync-To' in broker.metadata: + sync_to = broker.metadata['X-Container-Sync-To'][0] + if 'X-Container-Sync-Key' in broker.metadata: + sync_key = broker.metadata['X-Container-Sync-Key'][0] + if sync_to and sync_key: + self.add_synced_container(broker) + return + + self.remove_synced_container(broker) + + def synced_containers_generator(self): + """ + Iterates over the list of synced containers + yielding the path of the container db + """ + all_locs = audit_location_generator(self.devices, SYNC_DATADIR, '.db', + mount_check=self.mount_check, + logger=self.logger) + for path, device, partition in all_locs: + # What we want to yield is the real path as its being used for + # initiating a container broker. The broker would break if not + # given the db real path, as it e.g. assumes the existence of + # .pending in the same path + yield self._synced_container_to_container_path(path) diff --git a/test/probe/test_container_sync.py b/test/probe/test_container_sync.py index 4288dd4644..763b2d3159 100644 --- a/test/probe/test_container_sync.py +++ b/test/probe/test_container_sync.py @@ -18,8 +18,9 @@ from nose import SkipTest import unittest from six.moves.urllib.parse import urlparse -from swiftclient import client +from swiftclient import client, ClientException +from swift.common.http import HTTP_NOT_FOUND from swift.common.manager import Manager from test.probe.common import ReplProbeTest, ENABLED_POLICIES @@ -49,25 +50,27 @@ class TestContainerSync(ReplProbeTest): super(TestContainerSync, self).setUp() self.realm, self.cluster = get_current_realm_cluster(self.url) - def test_sync(self): - base_headers = {'X-Container-Sync-Key': 'secret'} - + def _setup_synced_containers(self, skey='secret', dkey='secret'): # setup dest container dest_container = 'dest-container-%s' % uuid.uuid4() - dest_headers = base_headers.copy() + dest_headers = {} dest_policy = None if len(ENABLED_POLICIES) > 1: dest_policy = random.choice(ENABLED_POLICIES) dest_headers['X-Storage-Policy'] = dest_policy.name + if dkey is not None: + dest_headers['X-Container-Sync-Key'] = dkey client.put_container(self.url, self.token, dest_container, headers=dest_headers) # setup source container source_container = 'source-container-%s' % uuid.uuid4() - source_headers = base_headers.copy() + source_headers = {} sync_to = '//%s/%s/%s/%s' % (self.realm, self.cluster, self.account, dest_container) source_headers['X-Container-Sync-To'] = sync_to + if skey is not None: + source_headers['X-Container-Sync-Key'] = skey if dest_policy: source_policy = random.choice([p for p in ENABLED_POLICIES if p is not dest_policy]) @@ -75,6 +78,11 @@ class TestContainerSync(ReplProbeTest): client.put_container(self.url, self.token, source_container, headers=source_headers) + return source_container, dest_container + + def test_sync(self): + source_container, dest_container = self._setup_synced_containers() + # upload to source object_name = 'object-%s' % uuid.uuid4() client.put_object(self.url, self.token, source_container, object_name, @@ -83,11 +91,63 @@ class TestContainerSync(ReplProbeTest): # cycle container-sync Manager(['container-sync']).once() - # retrieve from sync'd container - headers, body = client.get_object(self.url, self.token, - dest_container, object_name) + _junk, body = client.get_object(self.url, self.token, + dest_container, object_name) self.assertEqual(body, 'test-body') + def test_sync_lazy_skey(self): + # Create synced containers, but with no key at source + source_container, dest_container =\ + self._setup_synced_containers(None, 'secret') + + # upload to source + object_name = 'object-%s' % uuid.uuid4() + client.put_object(self.url, self.token, source_container, object_name, + 'test-body') + + # cycle container-sync, nothing should happen + Manager(['container-sync']).once() + with self.assertRaises(ClientException) as err: + _junk, body = client.get_object(self.url, self.token, + dest_container, object_name) + self.assertEqual(err.exception.http_status, HTTP_NOT_FOUND) + + # amend source key + source_headers = {'X-Container-Sync-Key': 'secret'} + client.put_container(self.url, self.token, source_container, + headers=source_headers) + # cycle container-sync, should replicate + Manager(['container-sync']).once() + _junk, body = client.get_object(self.url, self.token, + dest_container, object_name) + self.assertEqual(body, 'test-body') + + def test_sync_lazy_dkey(self): + # Create synced containers, but with no key at dest + source_container, dest_container =\ + self._setup_synced_containers('secret', None) + + # upload to source + object_name = 'object-%s' % uuid.uuid4() + client.put_object(self.url, self.token, source_container, object_name, + 'test-body') + + # cycle container-sync, nothing should happen + Manager(['container-sync']).once() + with self.assertRaises(ClientException) as err: + _junk, body = client.get_object(self.url, self.token, + dest_container, object_name) + self.assertEqual(err.exception.http_status, HTTP_NOT_FOUND) + + # amend dest key + dest_headers = {'X-Container-Sync-Key': 'secret'} + client.put_container(self.url, self.token, dest_container, + headers=dest_headers) + # cycle container-sync, should replicate + Manager(['container-sync']).once() + _junk, body = client.get_object(self.url, self.token, + dest_container, object_name) + self.assertEqual(body, 'test-body') if __name__ == "__main__": unittest.main() diff --git a/test/unit/container/test_replicator.py b/test/unit/container/test_replicator.py index 9216fed5ed..bdfe481d15 100644 --- a/test/unit/container/test_replicator.py +++ b/test/unit/container/test_replicator.py @@ -23,14 +23,14 @@ import random import sqlite3 from swift.common import db_replicator -from swift.container import replicator, backend, server +from swift.container import replicator, backend, server, sync_store from swift.container.reconciler import ( MISPLACED_OBJECTS_ACCOUNT, get_reconciler_container_name) from swift.common.utils import Timestamp from swift.common.storage_policy import POLICIES from test.unit.common import test_db_replicator -from test.unit import patch_policies, make_timestamp_iter +from test.unit import patch_policies, make_timestamp_iter, FakeLogger from contextlib import contextmanager @@ -998,6 +998,135 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync): daemon._post_replicate_hook(broker, info, []) self.assertEqual(0, len(calls)) + def test_update_sync_store_exception(self): + class FakeContainerSyncStore(object): + def update_sync_store(self, broker): + raise OSError(1, '1') + + logger = FakeLogger() + daemon = replicator.ContainerReplicator({}, logger) + daemon.sync_store = FakeContainerSyncStore() + ts_iter = make_timestamp_iter() + broker = self._get_broker('a', 'c', node_index=0) + timestamp = next(ts_iter) + broker.initialize(timestamp.internal, POLICIES.default.idx) + info = broker.get_replication_info() + daemon._post_replicate_hook(broker, info, []) + log_lines = logger.get_lines_for_level('error') + self.assertEqual(1, len(log_lines)) + self.assertIn('Failed to update sync_store', log_lines[0]) + + def test_update_sync_store(self): + klass = 'swift.container.sync_store.ContainerSyncStore' + daemon = replicator.ContainerReplicator({}) + daemon.sync_store = sync_store.ContainerSyncStore( + daemon.root, daemon.logger, daemon.mount_check) + ts_iter = make_timestamp_iter() + broker = self._get_broker('a', 'c', node_index=0) + timestamp = next(ts_iter) + broker.initialize(timestamp.internal, POLICIES.default.idx) + info = broker.get_replication_info() + with mock.patch(klass + '.remove_synced_container') as mock_remove: + with mock.patch(klass + '.add_synced_container') as mock_add: + daemon._post_replicate_hook(broker, info, []) + self.assertEqual(0, mock_remove.call_count) + self.assertEqual(0, mock_add.call_count) + + timestamp = next(ts_iter) + # sync-to and sync-key empty - remove from store + broker.update_metadata( + {'X-Container-Sync-To': ('', timestamp.internal), + 'X-Container-Sync-Key': ('', timestamp.internal)}) + with mock.patch(klass + '.remove_synced_container') as mock_remove: + with mock.patch(klass + '.add_synced_container') as mock_add: + daemon._post_replicate_hook(broker, info, []) + self.assertEqual(0, mock_add.call_count) + mock_remove.assert_called_once_with(broker) + + timestamp = next(ts_iter) + # sync-to is not empty sync-key is empty - remove from store + broker.update_metadata( + {'X-Container-Sync-To': ('a', timestamp.internal)}) + with mock.patch(klass + '.remove_synced_container') as mock_remove: + with mock.patch(klass + '.add_synced_container') as mock_add: + daemon._post_replicate_hook(broker, info, []) + self.assertEqual(0, mock_add.call_count) + mock_remove.assert_called_once_with(broker) + + timestamp = next(ts_iter) + # sync-to is empty sync-key is not empty - remove from store + broker.update_metadata( + {'X-Container-Sync-To': ('', timestamp.internal), + 'X-Container-Sync-Key': ('secret', timestamp.internal)}) + with mock.patch(klass + '.remove_synced_container') as mock_remove: + with mock.patch(klass + '.add_synced_container') as mock_add: + daemon._post_replicate_hook(broker, info, []) + self.assertEqual(0, mock_add.call_count) + mock_remove.assert_called_once_with(broker) + + timestamp = next(ts_iter) + # sync-to, sync-key both not empty - add to store + broker.update_metadata( + {'X-Container-Sync-To': ('a', timestamp.internal), + 'X-Container-Sync-Key': ('secret', timestamp.internal)}) + with mock.patch(klass + '.remove_synced_container') as mock_remove: + with mock.patch(klass + '.add_synced_container') as mock_add: + daemon._post_replicate_hook(broker, info, []) + mock_add.assert_called_once_with(broker) + self.assertEqual(0, mock_remove.call_count) + + timestamp = next(ts_iter) + # container is removed - need to remove from store + broker.delete_db(timestamp.internal) + broker.update_metadata( + {'X-Container-Sync-To': ('a', timestamp.internal), + 'X-Container-Sync-Key': ('secret', timestamp.internal)}) + with mock.patch(klass + '.remove_synced_container') as mock_remove: + with mock.patch(klass + '.add_synced_container') as mock_add: + daemon._post_replicate_hook(broker, info, []) + self.assertEqual(0, mock_add.call_count) + mock_remove.assert_called_once_with(broker) + + def test_sync_triggers_sync_store_update(self): + klass = 'swift.container.sync_store.ContainerSyncStore' + ts_iter = make_timestamp_iter() + # Create two containers as follows: + # broker_1 which is not set for sync + # broker_2 which is set for sync and then unset + # test that while replicating both we see no activity + # for broker_1, and the anticipated activity for broker_2 + broker_1 = self._get_broker('a', 'c', node_index=0) + broker_1.initialize(next(ts_iter).internal, POLICIES.default.idx) + broker_2 = self._get_broker('b', 'd', node_index=0) + broker_2.initialize(next(ts_iter).internal, POLICIES.default.idx) + broker_2.update_metadata( + {'X-Container-Sync-To': ('a', next(ts_iter).internal), + 'X-Container-Sync-Key': ('secret', next(ts_iter).internal)}) + + # replicate once according to broker_1 + # relying on the fact that FakeRing would place both + # in the same partition. + part, node = self._get_broker_part_node(broker_1) + with mock.patch(klass + '.remove_synced_container') as mock_remove: + with mock.patch(klass + '.add_synced_container') as mock_add: + self._run_once(node) + self.assertEqual(1, mock_add.call_count) + self.assertEqual(broker_2.db_file, mock_add.call_args[0][0].db_file) + self.assertEqual(0, mock_remove.call_count) + + broker_2.update_metadata( + {'X-Container-Sync-To': ('', next(ts_iter).internal)}) + # replicate once this time according to broker_2 + # relying on the fact that FakeRing would place both + # in the same partition. + part, node = self._get_broker_part_node(broker_2) + with mock.patch(klass + '.remove_synced_container') as mock_remove: + with mock.patch(klass + '.add_synced_container') as mock_add: + self._run_once(node) + self.assertEqual(0, mock_add.call_count) + self.assertEqual(1, mock_remove.call_count) + self.assertEqual(broker_2.db_file, mock_remove.call_args[0][0].db_file) + if __name__ == '__main__': unittest.main() diff --git a/test/unit/container/test_server.py b/test/unit/container/test_server.py index fb414207d5..22e0f00c41 100644 --- a/test/unit/container/test_server.py +++ b/test/unit/container/test_server.py @@ -1153,6 +1153,75 @@ class TestContainerController(unittest.TestCase): self.assertEqual(info['x_container_sync_point1'], -1) self.assertEqual(info['x_container_sync_point2'], -1) + def test_update_sync_store_on_PUT(self): + # Create a synced container and validate a link is created + self._create_synced_container_and_validate_sync_store('PUT') + # remove the sync using PUT and validate the link is deleted + self._remove_sync_and_validate_sync_store('PUT') + + def test_update_sync_store_on_POST(self): + # Create a container and validate a link is not created + self._create_container_and_validate_sync_store() + # Update the container to be synced and validate a link is created + self._create_synced_container_and_validate_sync_store('POST') + # remove the sync using POST and validate the link is deleted + self._remove_sync_and_validate_sync_store('POST') + + def test_update_sync_store_on_DELETE(self): + # Create a synced container and validate a link is created + self._create_synced_container_and_validate_sync_store('PUT') + # Remove the container and validate the link is deleted + self._remove_sync_and_validate_sync_store('DELETE') + + def _create_container_and_validate_sync_store(self): + req = Request.blank( + '/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, + headers={'x-timestamp': '0'}) + req.get_response(self.controller) + db = self.controller._get_container_broker('sda1', 'p', 'a', 'c') + sync_store = self.controller.sync_store + db_path = db.db_file + db_link = sync_store._container_to_synced_container_path(db_path) + self.assertFalse(os.path.exists(db_link)) + sync_containers = [c for c in sync_store.synced_containers_generator()] + self.assertFalse(sync_containers) + + def _create_synced_container_and_validate_sync_store(self, method): + req = Request.blank( + '/sda1/p/a/c', environ={'REQUEST_METHOD': method}, + headers={'x-timestamp': '1', + 'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c', + 'x-container-sync-key': '1234'}) + req.get_response(self.controller) + db = self.controller._get_container_broker('sda1', 'p', 'a', 'c') + sync_store = self.controller.sync_store + db_path = db.db_file + db_link = sync_store._container_to_synced_container_path(db_path) + self.assertTrue(os.path.exists(db_link)) + sync_containers = [c for c in sync_store.synced_containers_generator()] + self.assertEqual(1, len(sync_containers)) + self.assertEqual(db_path, sync_containers[0]) + + def _remove_sync_and_validate_sync_store(self, method): + if method == 'DELETE': + headers = {'x-timestamp': '2'} + else: + headers = {'x-timestamp': '2', + 'x-container-sync-to': '', + 'x-container-sync-key': '1234'} + + req = Request.blank( + '/sda1/p/a/c', environ={'REQUEST_METHOD': method}, + headers=headers) + req.get_response(self.controller) + db = self.controller._get_container_broker('sda1', 'p', 'a', 'c') + sync_store = self.controller.sync_store + db_path = db.db_file + db_link = sync_store._container_to_synced_container_path(db_path) + self.assertFalse(os.path.exists(db_link)) + sync_containers = [c for c in sync_store.synced_containers_generator()] + self.assertFalse(sync_containers) + def test_REPLICATE_insufficient_storage(self): conf = {'devices': self.testdir, 'mount_check': 'true'} self.container_controller = container_server.ContainerController( diff --git a/test/unit/container/test_sync.py b/test/unit/container/test_sync.py index bc69fcffa5..60d606110d 100644 --- a/test/unit/container/test_sync.py +++ b/test/unit/container/test_sync.py @@ -19,8 +19,10 @@ import unittest from textwrap import dedent import mock +import errno from test.unit import debug_logger from swift.container import sync +from swift.common.db import DatabaseConnectionError from swift.common import utils from swift.common.wsgi import ConfigString from swift.common.exceptions import ClientException @@ -47,6 +49,7 @@ class FakeContainerBroker(object): def __init__(self, path, metadata=None, info=None, deleted=False, items_since=None): self.db_file = path + self.db_dir = os.path.dirname(path) self.metadata = metadata if metadata else {} self.info = info if info else {} self.deleted = deleted @@ -157,7 +160,6 @@ class TestContainerSync(unittest.TestCase): # interval sleep. time_calls = [0] sleep_calls = [] - audit_location_generator_calls = [0] def fake_time(): time_calls[0] += 1 @@ -176,48 +178,36 @@ class TestContainerSync(unittest.TestCase): def fake_sleep(amount): sleep_calls.append(amount) - def fake_audit_location_generator(*args, **kwargs): - audit_location_generator_calls[0] += 1 - # Makes .container_sync() short-circuit - yield 'container.db', 'device', 'partition' - return + gen_func = ('swift.container.sync_store.' + 'ContainerSyncStore.synced_containers_generator') + with mock.patch('swift.container.sync.InternalClient'), \ + mock.patch('swift.container.sync.time', fake_time), \ + mock.patch('swift.container.sync.sleep', fake_sleep), \ + mock.patch(gen_func) as fake_generator, \ + mock.patch('swift.container.sync.ContainerBroker', + lambda p: FakeContainerBroker(p, info={ + 'account': 'a', 'container': 'c', + 'storage_policy_index': 0})): + fake_generator.side_effect = [iter(['container.db']), + iter(['container.db'])] + cs = sync.ContainerSync({}, container_ring=FakeRing()) + try: + cs.run_forever() + except Exception as err: + if str(err) != 'we are now done': + raise - orig_time = sync.time - orig_sleep = sync.sleep - orig_ContainerBroker = sync.ContainerBroker - orig_audit_location_generator = sync.audit_location_generator - try: - sync.ContainerBroker = lambda p: FakeContainerBroker( - p, info={'account': 'a', 'container': 'c', - 'storage_policy_index': 0}) - sync.time = fake_time - sync.sleep = fake_sleep - - with mock.patch('swift.container.sync.InternalClient'): - cs = sync.ContainerSync({}, container_ring=FakeRing()) - sync.audit_location_generator = fake_audit_location_generator - cs.run_forever(1, 2, a=3, b=4, verbose=True) - except Exception as err: - if str(err) != 'we are now done': - raise - finally: - sync.time = orig_time - sync.sleep = orig_sleep - sync.audit_location_generator = orig_audit_location_generator - sync.ContainerBroker = orig_ContainerBroker - - self.assertEqual(time_calls, [9]) - self.assertEqual(len(sleep_calls), 2) - self.assertTrue(sleep_calls[0] <= cs.interval) - self.assertTrue(sleep_calls[1] == cs.interval - 1) - self.assertEqual(audit_location_generator_calls, [2]) - self.assertEqual(cs.reported, 3602) + self.assertEqual(time_calls, [9]) + self.assertEqual(len(sleep_calls), 2) + self.assertLessEqual(sleep_calls[0], cs.interval) + self.assertEqual(cs.interval - 1, sleep_calls[1]) + self.assertEqual(2, fake_generator.call_count) + self.assertEqual(cs.reported, 3602) def test_run_once(self): # This runs runs_once with fakes twice, the first causing an interim # report, the second with no interim report. time_calls = [0] - audit_location_generator_calls = [0] def fake_time(): time_calls[0] += 1 @@ -235,40 +225,31 @@ class TestContainerSync(unittest.TestCase): raise Exception('we are now done') return returns[time_calls[0] - 1] - def fake_audit_location_generator(*args, **kwargs): - audit_location_generator_calls[0] += 1 - # Makes .container_sync() short-circuit - yield 'container.db', 'device', 'partition' - return + gen_func = ('swift.container.sync_store.' + 'ContainerSyncStore.synced_containers_generator') + with mock.patch('swift.container.sync.InternalClient'), \ + mock.patch('swift.container.sync.time', fake_time), \ + mock.patch(gen_func) as fake_generator, \ + mock.patch('swift.container.sync.ContainerBroker', + lambda p: FakeContainerBroker(p, info={ + 'account': 'a', 'container': 'c', + 'storage_policy_index': 0})): + fake_generator.side_effect = [iter(['container.db']), + iter(['container.db'])] + cs = sync.ContainerSync({}, container_ring=FakeRing()) + try: + cs.run_once() + self.assertEqual(time_calls, [6]) + self.assertEqual(1, fake_generator.call_count) + self.assertEqual(cs.reported, 3602) + cs.run_once() + except Exception as err: + if str(err) != 'we are now done': + raise - orig_time = sync.time - orig_audit_location_generator = sync.audit_location_generator - orig_ContainerBroker = sync.ContainerBroker - try: - sync.ContainerBroker = lambda p: FakeContainerBroker( - p, info={'account': 'a', 'container': 'c', - 'storage_policy_index': 0}) - sync.time = fake_time - - with mock.patch('swift.container.sync.InternalClient'): - cs = sync.ContainerSync({}, container_ring=FakeRing()) - sync.audit_location_generator = fake_audit_location_generator - cs.run_once(1, 2, a=3, b=4, verbose=True) - self.assertEqual(time_calls, [6]) - self.assertEqual(audit_location_generator_calls, [1]) - self.assertEqual(cs.reported, 3602) - cs.run_once() - except Exception as err: - if str(err) != 'we are now done': - raise - finally: - sync.time = orig_time - sync.audit_location_generator = orig_audit_location_generator - sync.ContainerBroker = orig_ContainerBroker - - self.assertEqual(time_calls, [10]) - self.assertEqual(audit_location_generator_calls, [2]) - self.assertEqual(cs.reported, 3604) + self.assertEqual(time_calls, [10]) + self.assertEqual(2, fake_generator.call_count) + self.assertEqual(cs.reported, 3604) def test_container_sync_not_db(self): cring = FakeRing() @@ -280,8 +261,65 @@ class TestContainerSync(unittest.TestCase): cring = FakeRing() with mock.patch('swift.container.sync.InternalClient'): cs = sync.ContainerSync({}, container_ring=cring) - cs.container_sync('isa.db') - self.assertEqual(cs.container_failures, 1) + + broker = 'swift.container.backend.ContainerBroker' + store = 'swift.container.sync_store.ContainerSyncStore' + + # In this test we call the container_sync instance several + # times with a missing db in various combinations. + # Since we use the same ContainerSync instance for all tests + # its failures counter increases by one with each call. + + # Test the case where get_info returns DatabaseConnectionError + # with DB does not exist, and we succeed in deleting it. + with mock.patch(broker + '.get_info') as fake_get_info: + with mock.patch(store + '.remove_synced_container') as fake_remove: + fake_get_info.side_effect = DatabaseConnectionError( + 'a', + "DB doesn't exist") + cs.container_sync('isa.db') + self.assertEqual(cs.container_failures, 1) + self.assertEqual(cs.container_skips, 0) + self.assertEqual(1, fake_remove.call_count) + self.assertEqual('isa.db', fake_remove.call_args[0][0].db_file) + + # Test the case where get_info returns DatabaseConnectionError + # with DB does not exist, and we fail to delete it. + with mock.patch(broker + '.get_info') as fake_get_info: + with mock.patch(store + '.remove_synced_container') as fake_remove: + fake_get_info.side_effect = DatabaseConnectionError( + 'a', + "DB doesn't exist") + fake_remove.side_effect = OSError('1') + cs.container_sync('isa.db') + self.assertEqual(cs.container_failures, 2) + self.assertEqual(cs.container_skips, 0) + self.assertEqual(1, fake_remove.call_count) + self.assertEqual('isa.db', fake_remove.call_args[0][0].db_file) + + # Test the case where get_info returns DatabaseConnectionError + # with DB does not exist, and it returns an error != ENOENT. + with mock.patch(broker + '.get_info') as fake_get_info: + with mock.patch(store + '.remove_synced_container') as fake_remove: + fake_get_info.side_effect = DatabaseConnectionError( + 'a', + "DB doesn't exist") + fake_remove.side_effect = OSError(errno.EPERM, 'a') + cs.container_sync('isa.db') + self.assertEqual(cs.container_failures, 3) + self.assertEqual(cs.container_skips, 0) + self.assertEqual(1, fake_remove.call_count) + self.assertEqual('isa.db', fake_remove.call_args[0][0].db_file) + + # Test the case where get_info returns DatabaseConnectionError + # error different than DB does not exist + with mock.patch(broker + '.get_info') as fake_get_info: + with mock.patch(store + '.remove_synced_container') as fake_remove: + fake_get_info.side_effect = DatabaseConnectionError('a', 'a') + cs.container_sync('isa.db') + self.assertEqual(cs.container_failures, 4) + self.assertEqual(cs.container_skips, 0) + self.assertEqual(0, fake_remove.call_count) def test_container_sync_not_my_db(self): # Db could be there due to handoff replication so test that we ignore diff --git a/test/unit/container/test_sync_store.py b/test/unit/container/test_sync_store.py new file mode 100644 index 0000000000..d38d4bc3e5 --- /dev/null +++ b/test/unit/container/test_sync_store.py @@ -0,0 +1,367 @@ +# Copyright (c) 2010-2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import errno +import mock +import random +import logging +import unittest +import tempfile +from shutil import rmtree +from test.unit import debug_logger + +from swift.container.backend import DATADIR +from swift.container import sync_store + + +class FakeContainerBroker(object): + + def __init__(self, path): + self.db_file = path + self.db_dir = os.path.dirname(path) + self.metadata = dict() + self._is_deleted = False + + def is_deleted(self): + return self._is_deleted + + +class TestContainerSyncStore(unittest.TestCase): + + def setUp(self): + self.logger = debug_logger('test-container-sync-store') + self.logger.level = logging.DEBUG + self.test_dir_prefix = tempfile.mkdtemp() + self.devices_dir = os.path.join(self.test_dir_prefix, 'srv/node/') + os.makedirs(self.devices_dir) + # Create dummy container dbs + self.devices = ['sdax', 'sdb', 'sdc'] + self.partitions = ['21765', '38965', '13234'] + self.suffixes = ['312', '435'] + self.hashes = ['f19ed', '53ef', '0ab5', '9c3a'] + for device in self.devices: + data_dir_path = os.path.join(self.devices_dir, + device, + DATADIR) + os.makedirs(data_dir_path) + for part in self.partitions: + for suffix in self.suffixes: + for hsh in self.hashes: + db_dir = os.path.join(data_dir_path, + part, + suffix, + hsh) + os.makedirs(db_dir) + db_file = os.path.join(db_dir, '%s.db' % hsh) + with open(db_file, 'w') as outfile: + outfile.write('%s' % db_file) + + def teardown(self): + rmtree(self.test_dir_prefix) + + def pick_dbfile(self): + hsh = random.choice(self.hashes) + return os.path.join(self.devices_dir, + random.choice(self.devices), + DATADIR, + random.choice(self.partitions), + random.choice(self.suffixes), + hsh, + '%s.db' % hsh) + + # Path conversion tests + # container path is of the form: + # /srv/node/sdb/containers/part/.../*.db + # or more generally: + # devices/device/DATADIR/part/.../*.db + # synced container path is assumed to be of the form: + # /srv/node/sdb/sync_containers/part/.../*.db + # or more generally: + # devices/device/SYNC_DATADIR/part/.../*.db + # Indeed the ONLY DIFFERENCE is DATADIR <-> SYNC_DATADIR + # Since, however, the strings represented by the constants + # DATADIR or SYNC_DATADIR + # can appear in the devices or the device part, the conversion + # function between the two is a bit more subtle then a mere replacement. + + # This function tests the conversion between a container path + # and a synced container path + def test_container_to_synced_container_path_conversion(self): + # The conversion functions are oblivious to the suffix + # so we just pick up a constant one. + db_path_suffix = self._db_path_suffix() + + # We build various container path putting in both + # DATADIR and SYNC_DATADIR strings in the + # device and devices parts. + for devices, device in self._container_path_elements_generator(): + path = os.path.join(devices, device, DATADIR, db_path_suffix) + # Call the conversion function + sds = sync_store.ContainerSyncStore(devices, self.logger, False) + path = sds._container_to_synced_container_path(path) + # Validate that ONLY the DATADIR part was replaced with + # sync_store.SYNC_DATADIR + self._validate_container_path_parts(path, devices, device, + sync_store.SYNC_DATADIR, + db_path_suffix) + + # This function tests the conversion between a synced container path + # and a container path + def test_synced_container_to_container_path_conversion(self): + # The conversion functions are oblivious to the suffix + # so we just pick up a constant one. + db_path_suffix = ('133791/625/82a7f5a2c43281b0eab3597e35bb9625/' + '82a7f5a2c43281b0eab3597e35bb9625.db') + + # We build various synced container path putting in both + # DATADIR and SYNC_DATADIR strings in the + # device and devices parts. + for devices, device in self._container_path_elements_generator(): + path = os.path.join(devices, device, + sync_store.SYNC_DATADIR, db_path_suffix) + # Call the conversion function + sds = sync_store.ContainerSyncStore(devices, self.logger, False) + path = sds._synced_container_to_container_path(path) + # Validate that ONLY the SYNC_DATADIR part was replaced with + # DATADIR + self._validate_container_path_parts(path, devices, device, + DATADIR, + db_path_suffix) + + # Constructs a db path suffix of the form: + # 133791/625/82...25/82...25.db + def _db_path_suffix(self): + def random_hexa_string(length): + '%0xlength' % random.randrange(16 ** length) + + db = random_hexa_string(32) + return '%s/%s/%s/%s.db' % (random_hexa_string(5), + random_hexa_string(3), + db, db) + + def _container_path_elements_generator(self): + # We build various container path elements putting in both + # DATADIR and SYNC_DATADIR strings in the + # device and devices parts. + for devices in ['/srv/node', '/srv/node/', + '/srv/node/dev', + '/srv/node/%s' % DATADIR, + '/srv/node/%s' % sync_store.SYNC_DATADIR]: + for device in ['sdf1', 'sdf1/sdf2', + 'sdf1/%s' % DATADIR, + 'sdf1/%s' % sync_store.SYNC_DATADIR, + '%s/sda' % DATADIR, + '%s/sda' % sync_store.SYNC_DATADIR]: + yield devices, device + + def _validate_container_path_parts(self, path, devices, + device, target, suffix): + # Recall that the path is of the form: + # devices/device/target/suffix + # where each of the sub path elements (e.g. devices) + # has a path structure containing path elements separated by '/' + # We thus validate by splitting the path according to '/' + # traversing all of its path elements making sure that the + # first elements are those of devices, + # the second are those of device + # etc. + spath = path.split('/') + spath.reverse() + self.assertEqual(spath.pop(), '') + # Validate path against 'devices' + for p in [p for p in devices.split('/') if p]: + self.assertEqual(spath.pop(), p) + # Validate path against 'device' + for p in [p for p in device.split('/') if p]: + self.assertEqual(spath.pop(), p) + # Validate path against target + self.assertEqual(spath.pop(), target) + # Validate path against suffix + for p in [p for p in suffix.split('/') if p]: + self.assertEqual(spath.pop(), p) + + def test_add_synced_container(self): + # Add non-existing and existing synced containers + sds = sync_store.ContainerSyncStore(self.devices_dir, + self.logger, + False) + cfile = self.pick_dbfile() + broker = FakeContainerBroker(cfile) + for i in range(2): + sds.add_synced_container(broker) + scpath = sds._container_to_synced_container_path(cfile) + with open(scpath, 'r') as infile: + self.assertEqual(infile.read(), cfile) + + iterated_synced_containers = list() + for db_path in sds.synced_containers_generator(): + iterated_synced_containers.append(db_path) + + self.assertEqual(len(iterated_synced_containers), 1) + + def test_remove_synced_container(self): + # Add a synced container to remove + sds = sync_store.ContainerSyncStore(self.devices_dir, + self.logger, + False) + cfile = self.pick_dbfile() + # We keep here the link file so as to validate its deletion later + lfile = sds._container_to_synced_container_path(cfile) + broker = FakeContainerBroker(cfile) + sds.add_synced_container(broker) + + # Remove existing and non-existing synced containers + for i in range(2): + sds.remove_synced_container(broker) + + iterated_synced_containers = list() + for db_path in sds.synced_containers_generator(): + iterated_synced_containers.append(db_path) + + self.assertEqual(len(iterated_synced_containers), 0) + + # Make sure the whole link path gets deleted + # recall that the path has the following suffix: + # // + # /.db + # and we expect the .db as well as all path elements + # to get deleted + self.assertFalse(os.path.exists(lfile)) + lfile = os.path.dirname(lfile) + for i in range(3): + self.assertFalse(os.path.exists(os.path.dirname(lfile))) + lfile = os.path.dirname(lfile) + + def test_iterate_synced_containers(self): + # populate sync container db + sds = sync_store.ContainerSyncStore(self.devices_dir, + self.logger, + False) + containers = list() + for i in range(10): + cfile = self.pick_dbfile() + broker = FakeContainerBroker(cfile) + sds.add_synced_container(broker) + containers.append(cfile) + + iterated_synced_containers = list() + for db_path in sds.synced_containers_generator(): + iterated_synced_containers.append(db_path) + + self.assertEqual( + set(containers), set(iterated_synced_containers)) + + def test_unhandled_exceptions_in_add_remove(self): + sds = sync_store.ContainerSyncStore(self.devices_dir, + self.logger, + False) + cfile = self.pick_dbfile() + broker = FakeContainerBroker(cfile) + + with mock.patch( + 'swift.container.sync_store.os.stat', + side_effect=OSError(errno.EPERM, 'permission denied')): + with self.assertRaises(OSError) as cm: + sds.add_synced_container(broker) + self.assertEqual(errno.EPERM, cm.exception.errno) + + with mock.patch( + 'swift.container.sync_store.os.makedirs', + side_effect=OSError(errno.EPERM, 'permission denied')): + with self.assertRaises(OSError) as cm: + sds.add_synced_container(broker) + self.assertEqual(errno.EPERM, cm.exception.errno) + + with mock.patch( + 'swift.container.sync_store.os.symlink', + side_effect=OSError(errno.EPERM, 'permission denied')): + with self.assertRaises(OSError) as cm: + sds.add_synced_container(broker) + self.assertEqual(errno.EPERM, cm.exception.errno) + + with mock.patch( + 'swift.container.sync_store.os.unlink', + side_effect=OSError(errno.EPERM, 'permission denied')): + with self.assertRaises(OSError) as cm: + sds.remove_synced_container(broker) + self.assertEqual(errno.EPERM, cm.exception.errno) + + def test_update_sync_store_according_to_metadata_and_deleted(self): + # This function tests the update_sync_store 'logics' + # with respect to various combinations of the + # sync-to and sync-key metadata items and whether + # the database is marked for delete. + # The table below summarizes the expected result + # for the various combinations, e.g.: + # If metadata items exist and the database + # is not marked for delete then add should be called. + + results_list = [ + [False, 'a', 'b', 'add'], + [False, 'a', '', 'remove'], + [False, 'a', None, 'remove'], + [False, '', 'b', 'remove'], + [False, '', '', 'remove'], + [False, '', None, 'remove'], + [False, None, 'b', 'remove'], + [False, None, '', 'remove'], + [False, None, None, 'none'], + [True, 'a', 'b', 'remove'], + [True, 'a', '', 'remove'], + [True, 'a', None, 'remove'], + [True, '', 'b', 'remove'], + [True, '', '', 'remove'], + [True, '', None, 'remove'], + [True, None, 'b', 'remove'], + [True, None, '', 'remove'], + [True, None, None, 'none'], + ] + + store = 'swift.container.sync_store.ContainerSyncStore' + with mock.patch(store + '.add_synced_container') as add_container: + with mock.patch( + store + '.remove_synced_container') as remove_container: + sds = sync_store.ContainerSyncStore(self.devices_dir, + self.logger, + False) + add_calls = 0 + remove_calls = 0 + # We now iterate over the list of combinations + # Validating that add and removed are called as + # expected + for deleted, sync_to, sync_key, expected_op in results_list: + cfile = self.pick_dbfile() + broker = FakeContainerBroker(cfile) + broker._is_deleted = deleted + if sync_to is not None: + broker.metadata['X-Container-Sync-To'] = [ + sync_to, 1] + if sync_key is not None: + broker.metadata['X-Container-Sync-Key'] = [ + sync_key, 1] + sds.update_sync_store(broker) + if expected_op == 'add': + add_calls += 1 + if expected_op == 'remove': + remove_calls += 1 + self.assertEqual(add_container.call_count, + add_calls) + self.assertEqual(remove_container.call_count, + remove_calls) + + +if __name__ == '__main__': + unittest.main() From 6786cdf036b4faabe3928c3d0dd9615d94834801 Mon Sep 17 00:00:00 2001 From: Harshada Mangesh Kakad Date: Thu, 31 Dec 2015 01:44:00 -0800 Subject: [PATCH 27/52] Fixing the deprecated library function. os.popen() is deprecated since version 2.6. Resolved with use of subprocess module. Change-Id: I4409cdd9edbc1a26d6f99c125c9100fadda5d758 Partial-Bug: #1529836 --- doc/source/conf.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 5d2fbd304d..7dd61d33b8 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -13,9 +13,10 @@ # All configuration values have a default; values that are commented out # serve to show the default. -import sys -import os import datetime +import os +import subprocess +import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the @@ -144,8 +145,10 @@ modindex_common_prefix = ['swift.'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' -git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1" -html_last_updated_fmt = os.popen(git_cmd).read() +git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local", + "-n1"] +html_last_updated_fmt = subprocess.Popen( + git_cmd, stdout=subprocess.PIPE).communicate()[0] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. From 23c7a58f8f1412c28b3a16b79be09c224c9f7d55 Mon Sep 17 00:00:00 2001 From: Hisashi Osanai Date: Fri, 11 Dec 2015 18:26:34 +0900 Subject: [PATCH 28/52] Fix ClientException handling in Container Sync swift/container/sync.py uses swift.common.internal_client.delete_object and put_object and expected these methods raise ClientException. But delete_object and put_object never raise the exception so this patch raises ClientException when urllib2 library raises HTTPError. Co-Authored-By: Eran Rom Closes-Bug: #1419901 Change-Id: I58cbf77988979a07998a46d9d81be84d29b0d9bf --- bin/swift-dispersion-report | 5 ++--- swift/common/internal_client.py | 12 +++++++++--- test/unit/common/test_internal_client.py | 9 ++++----- 3 files changed, 15 insertions(+), 11 deletions(-) diff --git a/bin/swift-dispersion-report b/bin/swift-dispersion-report index a1b5fdaab0..48dff80a89 100755 --- a/bin/swift-dispersion-report +++ b/bin/swift-dispersion-report @@ -23,7 +23,6 @@ from time import time from eventlet import GreenPool, hubs, patcher, Timeout from eventlet.pools import Pool -from eventlet.green import urllib2 from swift.common import direct_client try: @@ -174,8 +173,8 @@ def object_dispersion_report(coropool, connpool, account, object_ring, try: objects = [o['name'] for o in conn.get_container( container, prefix='dispersion_', full_listing=True)[1]] - except urllib2.HTTPError as err: - if err.getcode() != 404: + except ClientException as err: + if err.http_status != 404: raise print >>stderr, 'No objects to query. Has ' \ diff --git a/swift/common/internal_client.py b/swift/common/internal_client.py index 7dceda8427..8aaff72eb6 100644 --- a/swift/common/internal_client.py +++ b/swift/common/internal_client.py @@ -26,9 +26,10 @@ from swift import gettext_ as _ from time import gmtime, strftime, time from zlib import compressobj -from swift.common.utils import quote +from swift.common.exceptions import ClientException from swift.common.http import HTTP_NOT_FOUND, HTTP_MULTIPLE_CHOICES from swift.common.swob import Request +from swift.common.utils import quote from swift.common.wsgi import loadapp, pipeline_property @@ -807,9 +808,14 @@ class SimpleClient(object): self.attempts += 1 try: return self.base_request(method, **kwargs) - except (socket.error, httplib.HTTPException, urllib2.URLError): + except (socket.error, httplib.HTTPException, urllib2.URLError) \ + as err: if self.attempts > retries: - raise + if isinstance(err, urllib2.HTTPError): + raise ClientException('Raise too many retries', + http_status=err.getcode()) + else: + raise sleep(backoff) backoff = min(backoff * 2, self.max_backoff) diff --git a/test/unit/common/test_internal_client.py b/test/unit/common/test_internal_client.py index d2ef735324..1dfc31b409 100644 --- a/test/unit/common/test_internal_client.py +++ b/test/unit/common/test_internal_client.py @@ -26,8 +26,7 @@ from six.moves import range from six.moves.urllib.parse import quote from test.unit import FakeLogger from eventlet.green import urllib2 -from swift.common import internal_client -from swift.common import swob +from swift.common import exceptions, internal_client, swob from swift.common.storage_policy import StoragePolicy from test.unit import with_tempdir, write_fake_ring, patch_policies @@ -1329,7 +1328,7 @@ class TestSimpleClient(unittest.TestCase): mock_urlopen.side_effect = urllib2.HTTPError(*[None] * 5) with mock.patch('swift.common.internal_client.sleep') \ as mock_sleep: - self.assertRaises(urllib2.HTTPError, + self.assertRaises(exceptions.ClientException, c.retry_request, request_method, retries=1) self.assertEqual(mock_sleep.call_count, 1) self.assertEqual(mock_urlopen.call_count, 2) @@ -1347,7 +1346,7 @@ class TestSimpleClient(unittest.TestCase): mock_urlopen.side_effect = urllib2.HTTPError(*[None] * 5) with mock.patch('swift.common.internal_client.sleep') \ as mock_sleep: - self.assertRaises(urllib2.HTTPError, + self.assertRaises(exceptions.ClientException, c.retry_request, request_method, container='con', retries=1) self.assertEqual(mock_sleep.call_count, 1) @@ -1366,7 +1365,7 @@ class TestSimpleClient(unittest.TestCase): mock_urlopen.side_effect = urllib2.HTTPError(*[None] * 5) with mock.patch('swift.common.internal_client.sleep') \ as mock_sleep: - self.assertRaises(urllib2.HTTPError, + self.assertRaises(exceptions.ClientException, c.retry_request, request_method, container='con', name='obj', retries=1) self.assertEqual(mock_sleep.call_count, 1) From b35f3c90bde8a7ccb50440bda5800cbb8274a5a1 Mon Sep 17 00:00:00 2001 From: Kota Tsuyuzaki Date: Fri, 8 Jan 2016 01:29:11 -0800 Subject: [PATCH 29/52] Add note COPY with conditional headers Swift now supports Range header for COPY (or PUT with X-Copy-From) to make a partial copied object. This patch adds the note to show it obviously supported in Swift community. Change-Id: I6bf28f0932c90e7b305cd61aabce4ed028ae691e Partial-Bug: #1532126 --- swift/proxy/controllers/obj.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py index e5910d312e..1a0d1f187a 100644 --- a/swift/proxy/controllers/obj.py +++ b/swift/proxy/controllers/obj.py @@ -416,6 +416,11 @@ class BaseObjectController(Controller): This method handles copying objects based on values set in the headers 'X-Copy-From' and 'X-Copy-From-Account' + Note that if the incomming request has some conditional headers (e.g. + 'Range', 'If-Match'), *source* object will be evaluated for these + headers. i.e. if PUT with both 'X-Copy-From' and 'Range', Swift will + make a partial copy as a new object. + This method was added as part of the refactoring of the PUT method and the functionality is expected to be moved to middleware """ From fb6751d8ba133c57e1ebb76be71a96f2f120b8ca Mon Sep 17 00:00:00 2001 From: Paul Dardeau Date: Fri, 8 Jan 2016 22:49:05 +0000 Subject: [PATCH 30/52] Look for device holes that can be reused when adding new device. Change-Id: I1980ebdd9dc89848173d8ca2fe2afb74029dcfa2 Closes-Bug: 1532276 --- swift/common/ring/builder.py | 5 +- test/unit/common/ring/test_builder.py | 66 +++++++++++++++++++++++++++ 2 files changed, 70 insertions(+), 1 deletion(-) diff --git a/swift/common/ring/builder.py b/swift/common/ring/builder.py index 7629bbb900..a1a57603f1 100644 --- a/swift/common/ring/builder.py +++ b/swift/common/ring/builder.py @@ -336,7 +336,10 @@ class RingBuilder(object): if 'id' not in dev: dev['id'] = 0 if self.devs: - dev['id'] = max(d['id'] for d in self.devs if d) + 1 + try: + dev['id'] = self.devs.index(None) + except ValueError: + dev['id'] = len(self.devs) if dev['id'] < len(self.devs) and self.devs[dev['id']] is not None: raise exceptions.DuplicateDeviceError( 'Duplicate device id: %d' % dev['id']) diff --git a/test/unit/common/ring/test_builder.py b/test/unit/common/ring/test_builder.py index 99348d445e..57f0ee8649 100644 --- a/test/unit/common/ring/test_builder.py +++ b/test/unit/common/ring/test_builder.py @@ -2318,6 +2318,72 @@ class TestRingBuilder(unittest.TestCase): msg = 'Replica count of 3 requires more than 2 devices' self.assertIn(msg, str(e.exception)) + def _add_dev_delete_first_n(self, add_dev_count, n): + rb = ring.RingBuilder(8, 3, 1) + dev_names = ['sda', 'sdb', 'sdc', 'sdd', 'sde', 'sdf'] + for i in range(add_dev_count): + if i < len(dev_names): + dev_name = dev_names[i] + else: + dev_name = 'sda' + rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', + 'port': 6000, 'weight': 1.0, 'device': dev_name}) + rb.rebalance() + if (n > 0): + rb.pretend_min_part_hours_passed() + # remove first n + for i in range(n): + rb.remove_dev(i) + rb.pretend_min_part_hours_passed() + rb.rebalance() + return rb + + def test_reuse_of_dev_holes_without_id(self): + # try with contiguous holes at beginning + add_dev_count = 6 + rb = self._add_dev_delete_first_n(add_dev_count, add_dev_count - 3) + new_dev_id = rb.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1', + 'port': 6000, 'weight': 1.0, 'device': 'sda'}) + self.assertTrue(new_dev_id < add_dev_count) + + # try with non-contiguous holes + # [0, 1, None, 3, 4, None] + rb2 = ring.RingBuilder(8, 3, 1) + for i in range(6): + rb2.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1', + 'port': 6000, 'weight': 1.0, 'device': 'sda'}) + rb2.rebalance() + rb2.pretend_min_part_hours_passed() + rb2.remove_dev(2) + rb2.remove_dev(5) + rb2.pretend_min_part_hours_passed() + rb2.rebalance() + first = rb2.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1', + 'port': 6000, 'weight': 1.0, 'device': 'sda'}) + second = rb2.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1', + 'port': 6000, 'weight': 1.0, 'device': 'sda'}) + # add a new one (without reusing a hole) + third = rb2.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1', + 'port': 6000, 'weight': 1.0, 'device': 'sda'}) + self.assertEqual(first, 2) + self.assertEqual(second, 5) + self.assertEqual(third, 6) + + def test_reuse_of_dev_holes_with_id(self): + add_dev_count = 6 + rb = self._add_dev_delete_first_n(add_dev_count, add_dev_count - 3) + # add specifying id + exp_new_dev_id = 2 +# [dev, dev, None, dev, dev, None] + try: + new_dev_id = rb.add_dev({'id': exp_new_dev_id, 'region': 0, + 'zone': 0, 'ip': '127.0.0.1', + 'port': 6000, 'weight': 1.0, + 'device': 'sda'}) + self.assertEqual(new_dev_id, exp_new_dev_id) + except exceptions.DuplicateDeviceError: + self.fail("device hole not reused") + class TestGetRequiredOverload(unittest.TestCase): From 167bb5eeb82886d67c1b382417fb22b8ea85f0d3 Mon Sep 17 00:00:00 2001 From: Timur Alperovich Date: Wed, 16 Dec 2015 12:07:27 -0800 Subject: [PATCH 31/52] Fix IPv6 handling in MemcacheConnPool. The patch removes the assumption of IPv4-only addresses in the MemcacheConnPool. The changes are around address handling. Namely, if a server is specified with an address [
]:port (port is optional), it is assumed to be an IPv6 address [1]. If an IPv6 address is specified without "[]", an exception is raised as it is impossible to parse such addresses correctly. For testing, memcache can be configured to listen on the link-local, unique-local, or ::1 (equivalent to 127.0.0.1) addresses. Link-local addresses are assigned by default to each interface and are of the form "fe80::dead:beef". These addresses require a scope ID, which would look like "fe80::dead:beef%eth0" (replacing eth0 with the correct interface). Unique-local addresses are any addresses in the fc00::/7 subnet. To add a ULA to an interface use the "ip" utility. For example: "ip -6 address add fc01::dead:beef dev eth0". Lastly, and probably simplest, memcache can be configured to listen on "::1". The same address would be used in the swift configuration, e.g. "[::1]:11211". Note: only memcached version 1.4.25 or greater supports binding to an IPv6 address. Fixes #1526570 [1] IPv6 host literals: https://tools.ietf.org/html/rfc3986#section-3.2.2 Change-Id: I8408143c1d47d24e70df56a08167c529825276a2 --- swift/common/memcached.py | 51 +++++++++-- test/unit/common/test_memcached.py | 141 +++++++++++++++++++++++++++++ 2 files changed, 184 insertions(+), 8 deletions(-) diff --git a/swift/common/memcached.py b/swift/common/memcached.py index bb359539ae..db73e7b455 100644 --- a/swift/common/memcached.py +++ b/swift/common/memcached.py @@ -47,6 +47,7 @@ http://github.com/memcached/memcached/blob/1.4.2/doc/protocol.txt import six.moves.cPickle as pickle import json import logging +import re import time from bisect import bisect from swift import gettext_ as _ @@ -101,23 +102,57 @@ class MemcachePoolTimeout(Timeout): class MemcacheConnPool(Pool): - """Connection pool for Memcache Connections""" + """ + Connection pool for Memcache Connections + + The *server* parameter can be a hostname, an IPv4 address, or an IPv6 + address with an optional port. If an IPv6 address is specified it **must** + be enclosed in [], like *[::1]* or *[::1]:11211*. This follows the accepted + prescription for IPv6 host literals: + https://tools.ietf.org/html/rfc3986#section-3.2.2. + + Examples: + + * memcache.local:11211 + * 127.0.0.1:11211 + * [::1]:11211 + * [::1] + """ + IPV6_RE = re.compile("^\[(?P
.*)\](:(?P[0-9]+))?$") def __init__(self, server, size, connect_timeout): Pool.__init__(self, max_size=size) self.server = server self._connect_timeout = connect_timeout - def create(self): - if ':' in self.server: - host, port = self.server.split(':') + def _get_addr(self): + port = DEFAULT_MEMCACHED_PORT + # IPv6 addresses must be between '[]' + if self.server.startswith('['): + match = MemcacheConnPool.IPV6_RE.match(self.server) + if not match: + raise ValueError("Invalid IPv6 address: %s" % self.server) + host = match.group('address') + port = match.group('port') or port else: - host = self.server - port = DEFAULT_MEMCACHED_PORT - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + if ':' in self.server: + tokens = self.server.split(':') + if len(tokens) > 2: + raise ValueError("IPv6 addresses must be between '[]'") + host, port = tokens + else: + host = self.server + return (host, port) + + def create(self): + host, port = self._get_addr() + addrs = socket.getaddrinfo(host, port, socket.AF_UNSPEC, + socket.SOCK_STREAM) + family, socktype, proto, canonname, sockaddr = addrs[0] + sock = socket.socket(family, socket.SOCK_STREAM) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) with Timeout(self._connect_timeout): - sock.connect((host, int(port))) + sock.connect(sockaddr) return (sock.makefile(), sock) def get(self): diff --git a/test/unit/common/test_memcached.py b/test/unit/common/test_memcached.py index 1490c02852..d226bd4c9c 100644 --- a/test/unit/common/test_memcached.py +++ b/test/unit/common/test_memcached.py @@ -182,9 +182,121 @@ class TestMemcached(unittest.TestCase): one = False if peeripport == sock2ipport: two = False + self.assertEqual(len(memcache_client._errors[sock1ipport]), 0) + self.assertEqual(len(memcache_client._errors[sock2ip]), 0) finally: memcached.DEFAULT_MEMCACHED_PORT = orig_port + def test_get_conns_v6(self): + if not socket.has_ipv6: + return + try: + sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) + sock.bind(('::1', 0, 0, 0)) + sock.listen(1) + sock_addr = sock.getsockname() + server_socket = '[%s]:%s' % (sock_addr[0], sock_addr[1]) + memcache_client = memcached.MemcacheRing([server_socket]) + key = uuid4().hex + for conn in memcache_client._get_conns(key): + peer_sockaddr = conn[2].getpeername() + peer_socket = '[%s]:%s' % (peer_sockaddr[0], peer_sockaddr[1]) + self.assertEqual(peer_socket, server_socket) + self.assertEqual(len(memcache_client._errors[server_socket]), 0) + finally: + sock.close() + + def test_get_conns_v6_default(self): + if not socket.has_ipv6: + return + try: + sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) + sock.bind(('::1', 0)) + sock.listen(1) + sock_addr = sock.getsockname() + server_socket = '[%s]:%s' % (sock_addr[0], sock_addr[1]) + server_host = '[%s]' % sock_addr[0] + memcached.DEFAULT_MEMCACHED_PORT = sock_addr[1] + memcache_client = memcached.MemcacheRing([server_host]) + key = uuid4().hex + for conn in memcache_client._get_conns(key): + peer_sockaddr = conn[2].getpeername() + peer_socket = '[%s]:%s' % (peer_sockaddr[0], peer_sockaddr[1]) + self.assertEqual(peer_socket, server_socket) + self.assertEqual(len(memcache_client._errors[server_host]), 0) + finally: + sock.close() + + def test_get_conns_bad_v6(self): + if not socket.has_ipv6: + return + try: + sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) + sock.bind(('::1', 0)) + sock.listen(1) + sock_addr = sock.getsockname() + # IPv6 address with missing [] is invalid + server_socket = '%s:%s' % (sock_addr[0], sock_addr[1]) + memcache_client = memcached.MemcacheRing([server_socket]) + key = uuid4().hex + for conn in memcache_client._get_conns(key): + peer_sockaddr = conn[2].getpeername() + peer_socket = '[%s]:%s' % (peer_sockaddr[0], peer_sockaddr[1]) + self.assertEqual(peer_socket, server_socket) + # Expect a parsing error when creating the socket + self.assertEqual(len(memcache_client._errors[server_socket]), 1) + finally: + sock.close() + + def test_get_conns_hostname(self): + with patch('swift.common.memcached.socket.getaddrinfo') as addrinfo: + try: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.bind(('127.0.0.1', 0)) + sock.listen(1) + sock_addr = sock.getsockname() + fqdn = socket.getfqdn() + server_socket = '%s:%s' % (fqdn, sock_addr[1]) + addrinfo.return_value = [(socket.AF_INET, + socket.SOCK_STREAM, 0, '', + ('127.0.0.1', sock_addr[1]))] + memcache_client = memcached.MemcacheRing([server_socket]) + key = uuid4().hex + for conn in memcache_client._get_conns(key): + peer_sockaddr = conn[2].getpeername() + peer_socket = '%s:%s' % (peer_sockaddr[0], + peer_sockaddr[1]) + self.assertEqual(peer_socket, + '127.0.0.1:%d' % sock_addr[1]) + self.assertEqual(len(memcache_client._errors[server_socket]), + 0) + finally: + sock.close() + + def test_get_conns_hostname6(self): + with patch('swift.common.memcached.socket.getaddrinfo') as addrinfo: + try: + sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) + sock.bind(('::1', 0)) + sock.listen(1) + sock_addr = sock.getsockname() + fqdn = socket.getfqdn() + server_socket = '%s:%s' % (fqdn, sock_addr[1]) + addrinfo.return_value = [(socket.AF_INET6, + socket.SOCK_STREAM, 0, '', + ('::1', sock_addr[1]))] + memcache_client = memcached.MemcacheRing([server_socket]) + key = uuid4().hex + for conn in memcache_client._get_conns(key): + peer_sockaddr = conn[2].getpeername() + peer_socket = '[%s]:%s' % (peer_sockaddr[0], + peer_sockaddr[1]) + self.assertEqual(peer_socket, '[::1]:%d' % sock_addr[1]) + self.assertEqual(len(memcache_client._errors[server_socket]), + 0) + finally: + sock.close() + def test_set_get(self): memcache_client = memcached.MemcacheRing(['1.2.3.4:11211']) mock = MockMemcached() @@ -349,6 +461,13 @@ class TestMemcached(unittest.TestCase): def test_connection_pooling(self): with patch('swift.common.memcached.socket') as mock_module: + def mock_getaddrinfo(host, port, family=socket.AF_INET, + socktype=socket.SOCK_STREAM, proto=0, + flags=0): + return [(family, socktype, proto, '', (host, port))] + + mock_module.getaddrinfo = mock_getaddrinfo + # patch socket, stub socket.socket, mock sock mock_sock = mock_module.socket.return_value @@ -462,5 +581,27 @@ class TestMemcached(unittest.TestCase): finally: memcached.MemcacheConnPool = orig_conn_pool + def test_connection_pool_parser(self): + default = memcached.DEFAULT_MEMCACHED_PORT + addrs = [('1.2.3.4', '1.2.3.4', default), + ('1.2.3.4:5000', '1.2.3.4', 5000), + ('[dead:beef::1]', 'dead:beef::1', default), + ('[dead:beef::1]:5000', 'dead:beef::1', 5000), + ('example.com', 'example.com', default), + ('example.com:5000', 'example.com', 5000), + ('foo.1-2-3.bar.com:5000', 'foo.1-2-3.bar.com', 5000), + ('1.2.3.4:10:20', None, None), + ('dead:beef::1:5000', None, None)] + + for addr, expected_host, expected_port in addrs: + pool = memcached.MemcacheConnPool(addr, 1, 0) + if expected_host: + host, port = pool._get_addr() + self.assertEqual(expected_host, host) + self.assertEqual(expected_port, int(port)) + else: + with self.assertRaises(ValueError): + pool._get_addr() + if __name__ == '__main__': unittest.main() From 1f3304c5153e01988b8f4493875b6489e93f76d0 Mon Sep 17 00:00:00 2001 From: Ben Martin Date: Mon, 14 Dec 2015 15:28:17 -0600 Subject: [PATCH 32/52] Print min_part_hours lockout time remaining swift-ring-builder currently only displays min_part_hours and not the amount of time remaining before a rebalance can occur. This information is readily available and has been displayed as a quality of life improvement. Additionally, a bug where the time since the last rebalance was always updated when rebalance was called regardless of if any partitions were reassigned. This can lead to partitions being unable to be reassigned as they never age according to the time since last rebalance. Change-Id: Ie0e2b5e25140cbac7465f31a26a4998beb3892e9 Closes-Bug: #1526017 --- swift/cli/ringbuilder.py | 16 ++++++-- swift/common/ring/builder.py | 13 ++++++- test/unit/cli/test_ringbuilder.py | 61 ++++++++++++++++++++++++++++++- 3 files changed, 84 insertions(+), 6 deletions(-) diff --git a/swift/cli/ringbuilder.py b/swift/cli/ringbuilder.py index 192a788518..072be037ad 100755 --- a/swift/cli/ringbuilder.py +++ b/swift/cli/ringbuilder.py @@ -25,6 +25,7 @@ from os.path import basename, abspath, dirname, exists, join as pathjoin from sys import argv as sys_argv, exit, stderr, stdout from textwrap import wrap from time import time +from datetime import timedelta import optparse import math @@ -444,7 +445,9 @@ swift-ring-builder builder.parts, builder.replicas, regions, zones, dev_count, balance, dispersion_trailer)) print('The minimum number of hours before a partition can be ' - 'reassigned is %s' % builder.min_part_hours) + 'reassigned is %s (%s remaining)' % ( + builder.min_part_hours, + timedelta(seconds=builder.min_part_seconds_left))) print('The overload factor is %0.2f%% (%.6f)' % ( builder.overload * 100, builder.overload)) if builder.devs: @@ -787,6 +790,14 @@ swift-ring-builder rebalance [options] handler.setFormatter(formatter) logger.addHandler(handler) + if builder.min_part_seconds_left > 0 and not options.force: + print('No partitions could be reassigned.') + print('The time between rebalances must be at least ' + 'min_part_hours: %s hours (%s remaining)' % ( + builder.min_part_hours, + timedelta(seconds=builder.min_part_seconds_left))) + exit(EXIT_WARNING) + devs_changed = builder.devs_changed try: last_balance = builder.get_balance() @@ -802,8 +813,7 @@ swift-ring-builder rebalance [options] exit(EXIT_ERROR) if not (parts or options.force or removed_devs): print('No partitions could be reassigned.') - print('Either none need to be or none can be due to ' - 'min_part_hours [%s].' % builder.min_part_hours) + print('There is no need to do so at this time') exit(EXIT_WARNING) # If we set device's weight to zero, currently balance will be set # special value(MAX_BALANCE) until zero weighted device return all diff --git a/swift/common/ring/builder.py b/swift/common/ring/builder.py index 7629bbb900..193302d6e8 100644 --- a/swift/common/ring/builder.py +++ b/swift/common/ring/builder.py @@ -139,6 +139,12 @@ class RingBuilder(object): finally: self.logger.disabled = True + @property + def min_part_seconds_left(self): + """Get the total seconds until a rebalance can be performed""" + elapsed_seconds = int(time() - self._last_part_moves_epoch) + return max((self.min_part_hours * 3600) - elapsed_seconds, 0) + def weight_of_one_part(self): """ Returns the weight of each partition as calculated from the @@ -729,11 +735,12 @@ class RingBuilder(object): def pretend_min_part_hours_passed(self): """ Override min_part_hours by marking all partitions as having been moved - 255 hours ago. This can be used to force a full rebalance on the next - call to rebalance. + 255 hours ago and last move epoch to 'the beginning of time'. This can + be used to force a full rebalance on the next call to rebalance. """ for part in range(self.parts): self._last_part_moves[part] = 0xff + self._last_part_moves_epoch = 0 def get_part_devices(self, part): """ @@ -835,6 +842,8 @@ class RingBuilder(object): more recently than min_part_hours. """ elapsed_hours = int(time() - self._last_part_moves_epoch) / 3600 + if elapsed_hours <= 0: + return for part in range(self.parts): # The "min(self._last_part_moves[part] + elapsed_hours, 0xff)" # which was here showed up in profiling, so it got inlined. diff --git a/test/unit/cli/test_ringbuilder.py b/test/unit/cli/test_ringbuilder.py index 25200b35a9..88e081ee85 100644 --- a/test/unit/cli/test_ringbuilder.py +++ b/test/unit/cli/test_ringbuilder.py @@ -1739,7 +1739,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "64 partitions, 3.000000 replicas, 4 regions, 4 zones, " \ "4 devices, 100.00 balance, 0.00 dispersion\n" \ "The minimum number of hours before a partition can be " \ - "reassigned is 1\n" \ + "reassigned is 1 (0:00:00 remaining)\n" \ "The overload factor is 0.00%% (0.000000)\n" \ "Devices: id region zone ip address port " \ "replication ip replication port name weight " \ @@ -1796,6 +1796,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ring = RingBuilder.load(self.tmpfile) ring.set_dev_weight(3, 0.0) ring.rebalance() + ring.pretend_min_part_hours_passed() ring.remove_dev(3) ring.save(self.tmpfile) @@ -1806,6 +1807,64 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): self.assertTrue(ring.validate()) self.assertEqual(ring.devs[3], None) + def test_rebalance_resets_time_remaining(self): + self.create_sample_ring() + ring = RingBuilder.load(self.tmpfile) + + time_path = 'swift.common.ring.builder.time' + argv = ["", self.tmpfile, "rebalance", "3"] + time = 0 + + # first rebalance, should have 1 hour left before next rebalance + time += 3600 + with mock.patch(time_path, return_value=time): + self.assertEqual(ring.min_part_seconds_left, 0) + self.assertRaises(SystemExit, ringbuilder.main, argv) + ring = RingBuilder.load(self.tmpfile) + self.assertEqual(ring.min_part_seconds_left, 3600) + + # min part hours passed, change ring and save for rebalance + ring.set_dev_weight(0, ring.devs[0]['weight'] * 2) + ring.save(self.tmpfile) + + # second rebalance, should have 1 hour left + time += 3600 + with mock.patch(time_path, return_value=time): + self.assertEqual(ring.min_part_seconds_left, 0) + self.assertRaises(SystemExit, ringbuilder.main, argv) + ring = RingBuilder.load(self.tmpfile) + self.assertTrue(ring.min_part_seconds_left, 3600) + + def test_rebalance_failure_does_not_reset_last_moves_epoch(self): + ring = RingBuilder(8, 3, 1) + ring.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, + 'ip': '127.0.0.1', 'port': 6010, 'device': 'sda1'}) + ring.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1, + 'ip': '127.0.0.1', 'port': 6020, 'device': 'sdb1'}) + ring.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1, + 'ip': '127.0.0.1', 'port': 6030, 'device': 'sdc1'}) + + time_path = 'swift.common.ring.builder.time' + argv = ["", self.tmpfile, "rebalance", "3"] + + with mock.patch(time_path, return_value=0): + ring.rebalance() + ring.save(self.tmpfile) + + # min part hours not passed + with mock.patch(time_path, return_value=(3600 * 0.6)): + self.assertRaises(SystemExit, ringbuilder.main, argv) + ring = RingBuilder.load(self.tmpfile) + self.assertEqual(ring.min_part_seconds_left, 3600 * 0.4) + + ring.save(self.tmpfile) + + # min part hours passed, no partitions need to be moved + with mock.patch(time_path, return_value=(3600 * 1.5)): + self.assertRaises(SystemExit, ringbuilder.main, argv) + ring = RingBuilder.load(self.tmpfile) + self.assertEqual(ring.min_part_seconds_left, 0) + def test_rebalance_with_seed(self): self.create_sample_ring() # Test rebalance using explicit seed parameter From e6194113a3c81563590eabf8f761ccb988bb917c Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Fri, 8 Jan 2016 16:38:31 -0800 Subject: [PATCH 33/52] Validate X-Timestamps Previously, attempting to PUT a new object with an X-Timestamp header less than or equal to zero (ie, for a timestamp on or before 1970-01-01 00:00:00) would cause the object-server to 500. While we're at it, cap X-Timestamp at 9999999999 (2286-11-20 17:46:40) so we don't get an eleventh digit before the decimal point. Closes-Bug: 1532471 Change-Id: I23666ec8a067d829eaf9bfe54bd086c320b3429e --- swift/common/utils.py | 4 ++++ swift/obj/server.py | 2 +- test/unit/obj/test_server.py | 39 ++++++++++++++++++++++++++++++++++++ 3 files changed, 44 insertions(+), 1 deletion(-) diff --git a/swift/common/utils.py b/swift/common/utils.py index 831f651ff1..e36122a098 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -778,6 +778,10 @@ class Timestamp(object): raise ValueError( 'delta must be greater than %d' % (-1 * self.raw)) self.timestamp = float(self.raw * PRECISION) + if self.timestamp < 0: + raise ValueError('timestamp cannot be negative') + if self.timestamp >= 10000000000: + raise ValueError('timestamp too large') def __repr__(self): return INTERNAL_FORMAT % (self.timestamp, self.offset) diff --git a/swift/obj/server.py b/swift/obj/server.py index 2944bccca7..06fb1f564f 100644 --- a/swift/obj/server.py +++ b/swift/obj/server.py @@ -558,7 +558,7 @@ class ObjectController(BaseStorageServer): return HTTPInsufficientStorage(drive=device, request=request) except (DiskFileNotExist, DiskFileQuarantined): orig_metadata = {} - orig_timestamp = 0 + orig_timestamp = Timestamp(0) # Checks for If-None-Match if request.if_none_match is not None and orig_metadata: diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py index ef32f29b06..adc4941fd7 100755 --- a/test/unit/obj/test_server.py +++ b/test/unit/obj/test_server.py @@ -762,6 +762,45 @@ class TestObjectController(unittest.TestCase): self.assertEqual(resp.status_int, 409) self.assertEqual(resp.headers['X-Backend-Timestamp'], orig_timestamp) + def test_PUT_new_object_really_old_timestamp(self): + req = Request.blank( + '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, + headers={'X-Timestamp': '-1', # 1969-12-31 23:59:59 + 'Content-Length': '6', + 'Content-Type': 'application/octet-stream'}) + req.body = 'VERIFY' + resp = req.get_response(self.object_controller) + self.assertEqual(resp.status_int, 400) + + req = Request.blank( + '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, + headers={'X-Timestamp': '1', # 1970-01-01 00:00:01 + 'Content-Length': '6', + 'Content-Type': 'application/octet-stream'}) + req.body = 'VERIFY' + resp = req.get_response(self.object_controller) + self.assertEqual(resp.status_int, 201) + + def test_PUT_object_really_new_timestamp(self): + req = Request.blank( + '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, + headers={'X-Timestamp': '9999999999', # 2286-11-20 17:46:40 + 'Content-Length': '6', + 'Content-Type': 'application/octet-stream'}) + req.body = 'VERIFY' + resp = req.get_response(self.object_controller) + self.assertEqual(resp.status_int, 201) + + # roll over to 11 digits before the decimal + req = Request.blank( + '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, + headers={'X-Timestamp': '10000000000', + 'Content-Length': '6', + 'Content-Type': 'application/octet-stream'}) + req.body = 'VERIFY' + resp = req.get_response(self.object_controller) + self.assertEqual(resp.status_int, 400) + def test_PUT_no_etag(self): req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, From 4ffc4ba411f67c8407ba38d082a3a51a96ad7e04 Mon Sep 17 00:00:00 2001 From: Jonathan Hinson Date: Tue, 12 Jan 2016 11:46:21 -0600 Subject: [PATCH 34/52] Functional tests for if-match with multiple etags Multiple etags can be provided on an if-match or if-none-match request. This is currently being tested in the unit tests, but not in the functional tests. Since these etags can be modified by middleware, we need functional tests to assert multiple-etag requests are handled correctly. Change-Id: Idc409c85e8aa82b59dc2bc28af6ca2617de82699 --- test/functional/tests.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/test/functional/tests.py b/test/functional/tests.py index c55133eb70..2ec182253a 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -2478,6 +2478,15 @@ class TestFileComparison(Base): self.assertRaises(ResponseError, file_item.read, hdrs=hdrs) self.assert_status(412) + def testIfMatchMultipleEtags(self): + for file_item in self.env.files: + hdrs = {'If-Match': '"bogus1", "%s", "bogus2"' % file_item.md5} + self.assertTrue(file_item.read(hdrs=hdrs)) + + hdrs = {'If-Match': '"bogus1", "bogus2", "bogus3"'} + self.assertRaises(ResponseError, file_item.read, hdrs=hdrs) + self.assert_status(412) + def testIfNoneMatch(self): for file_item in self.env.files: hdrs = {'If-None-Match': 'bogus'} @@ -2487,6 +2496,16 @@ class TestFileComparison(Base): self.assertRaises(ResponseError, file_item.read, hdrs=hdrs) self.assert_status(304) + def testIfNoneMatchMultipleEtags(self): + for file_item in self.env.files: + hdrs = {'If-None-Match': '"bogus1", "bogus2", "bogus3"'} + self.assertTrue(file_item.read(hdrs=hdrs)) + + hdrs = {'If-None-Match': + '"bogus1", "bogus2", "%s"' % file_item.md5} + self.assertRaises(ResponseError, file_item.read, hdrs=hdrs) + self.assert_status(304) + def testIfModifiedSince(self): for file_item in self.env.files: hdrs = {'If-Modified-Since': self.env.time_old_f1} From d5ff5447be30b44bf4acc8b912b6241a44f710be Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Tue, 12 Jan 2016 16:42:06 -0800 Subject: [PATCH 35/52] Install liberasurecode packages in SAIO. Change-Id: If673afa2b61a3e388612debf4860d561960963a3 --- doc/source/development_saio.rst | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/doc/source/development_saio.rst b/doc/source/development_saio.rst index 1a282983b6..2dfd0ffbcb 100644 --- a/doc/source/development_saio.rst +++ b/doc/source/development_saio.rst @@ -37,7 +37,8 @@ Installing dependencies sudo apt-get update sudo apt-get install curl gcc memcached rsync sqlite3 xfsprogs \ - git-core libffi-dev python-setuptools + git-core libffi-dev python-setuptools \ + liberasurecode-dev sudo apt-get install python-coverage python-dev python-nose \ python-xattr python-eventlet \ python-greenlet python-pastedeploy \ @@ -48,7 +49,8 @@ Installing dependencies sudo yum update sudo yum install curl gcc memcached rsync sqlite xfsprogs git-core \ - libffi-devel xinetd python-setuptools \ + libffi-devel xinetd liberasurecode-devel \ + python-setuptools \ python-coverage python-devel python-nose \ pyxattr python-eventlet \ python-greenlet python-paste-deploy \ From 33476460239c9cdb08dd8065d22d84a4717da7be Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Fri, 8 Jan 2016 16:15:54 -0800 Subject: [PATCH 36/52] fixups for ipv6 memcache_servers docs Change-Id: I20d91c1e276014eaf210fa9eb43788bc17f4e8df --- doc/manpages/proxy-server.conf.5 | 3 ++- doc/source/deployment_guide.rst | 3 ++- etc/memcache.conf-sample | 1 + etc/proxy-server.conf-sample | 3 ++- swift/common/memcached.py | 15 ++++++++------- 5 files changed, 15 insertions(+), 10 deletions(-) diff --git a/doc/manpages/proxy-server.conf.5 b/doc/manpages/proxy-server.conf.5 index a606c6bcff..fe63165f56 100644 --- a/doc/manpages/proxy-server.conf.5 +++ b/doc/manpages/proxy-server.conf.5 @@ -384,7 +384,8 @@ Sets the maximum number of connections to each memcached server per worker. If not set in the configuration file, the value for memcache_servers will be read from /etc/swift/memcache.conf (see memcache.conf-sample) or lacking that file, it will default to 127.0.0.1:11211. You can specify multiple servers -separated with commas, as in: 10.1.2.3:11211,10.1.2.4:11211. +separated with commas, as in: 10.1.2.3:11211,10.1.2.4:11211. (IPv6 +addresses must follow rfc3986 section-3.2.2, i.e. [::1]:11211) .IP \fBmemcache_serialization_support\fR This sets how memcache values are serialized and deserialized: .RE diff --git a/doc/source/deployment_guide.rst b/doc/source/deployment_guide.rst index f06afc483b..81db76a23e 100644 --- a/doc/source/deployment_guide.rst +++ b/doc/source/deployment_guide.rst @@ -1278,7 +1278,8 @@ object_chunk_size 65536 Chunk size to read from client_chunk_size 65536 Chunk size to read from clients memcache_servers 127.0.0.1:11211 Comma separated list of - memcached servers ip:port + memcached servers + ip:port or [ipv6addr]:port memcache_max_connections 2 Max number of connections to each memcached server per worker diff --git a/etc/memcache.conf-sample b/etc/memcache.conf-sample index 7ec55f100f..813ecf9edb 100644 --- a/etc/memcache.conf-sample +++ b/etc/memcache.conf-sample @@ -2,6 +2,7 @@ # You can use this single conf file instead of having memcache_servers set in # several other conf files under [filter:cache] for example. You can specify # multiple servers separated with commas, as in: 10.1.2.3:11211,10.1.2.4:11211 +# (IPv6 addresses must follow rfc3986 section-3.2.2, i.e. [::1]:11211) # memcache_servers = 127.0.0.1:11211 # # Sets how memcache values are serialized and deserialized: diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample index 125e4b3a3c..f3506476af 100644 --- a/etc/proxy-server.conf-sample +++ b/etc/proxy-server.conf-sample @@ -388,7 +388,8 @@ use = egg:swift#memcache # If not set here, the value for memcache_servers will be read from # memcache.conf (see memcache.conf-sample) or lacking that file, it will # default to the value below. You can specify multiple servers separated with -# commas, as in: 10.1.2.3:11211,10.1.2.4:11211 +# commas, as in: 10.1.2.3:11211,10.1.2.4:11211 (IPv6 addresses must +# follow rfc3986 section-3.2.2, i.e. [::1]:11211) # memcache_servers = 127.0.0.1:11211 # # Sets how memcache values are serialized and deserialized: diff --git a/swift/common/memcached.py b/swift/common/memcached.py index db73e7b455..1bfa424d02 100644 --- a/swift/common/memcached.py +++ b/swift/common/memcached.py @@ -108,15 +108,16 @@ class MemcacheConnPool(Pool): The *server* parameter can be a hostname, an IPv4 address, or an IPv6 address with an optional port. If an IPv6 address is specified it **must** be enclosed in [], like *[::1]* or *[::1]:11211*. This follows the accepted - prescription for IPv6 host literals: - https://tools.ietf.org/html/rfc3986#section-3.2.2. + prescription for `IPv6 host literals`_. - Examples: + Examples:: - * memcache.local:11211 - * 127.0.0.1:11211 - * [::1]:11211 - * [::1] + memcache.local:11211 + 127.0.0.1:11211 + [::1]:11211 + [::1] + + .. _IPv6 host literals: https://tools.ietf.org/html/rfc3986#section-3.2.2 """ IPV6_RE = re.compile("^\[(?P
.*)\](:(?P[0-9]+))?$") From fa5b32d27964478dfcccf71155d2aaa946c561f0 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Tue, 12 Jan 2016 14:18:30 -0800 Subject: [PATCH 37/52] Make object-auditor storage-policy-aware Previously, the object-auditor would always use a (replication) DiskFileManager when walking through AuditLocations, which would cause it to skip EC fragment archives with a warning like: Unexpected file /1452557187.03610#3.data: Invalid Timestamp value in filename '1452557187.03610#3.data' Now, the AuditLocation's policy will be used to find an appropriate manager to get the diskfile. Additionally, several .commit()s were added to the auditor tests so the .durable will be written, which is required when auditing EC fragment archives. Change-Id: I960e7d696fd9ad704ca1872b4ac821f9078838c7 Closes-Bug: 1533002 --- swift/obj/auditor.py | 21 ++++++-- test/unit/obj/test_auditor.py | 99 +++++++++++++++++++++++------------ 2 files changed, 82 insertions(+), 38 deletions(-) diff --git a/swift/obj/auditor.py b/swift/obj/auditor.py index 38fef209e1..705926a8df 100644 --- a/swift/obj/auditor.py +++ b/swift/obj/auditor.py @@ -28,6 +28,7 @@ from swift.common.utils import get_logger, ratelimit_sleep, dump_recon_cache, \ list_from_csv, listdir from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist from swift.common.daemon import Daemon +from swift.common.storage_policy import POLICIES SLEEP_BETWEEN_AUDITS = 30 @@ -39,7 +40,7 @@ class AuditorWorker(object): self.conf = conf self.logger = logger self.devices = devices - self.diskfile_mgr = diskfile.DiskFileManager(conf, self.logger) + self.diskfile_router = diskfile.DiskFileRouter(conf, self.logger) self.max_files_per_second = float(conf.get('files_per_second', 20)) self.max_bytes_per_second = float(conf.get('bytes_per_second', 10000000)) @@ -87,8 +88,16 @@ class AuditorWorker(object): total_quarantines = 0 total_errors = 0 time_auditing = 0 - all_locs = self.diskfile_mgr.object_audit_location_generator( - device_dirs=device_dirs) + # TODO: we should move audit-location generation to the storage policy, + # as we may (conceivably) have a different filesystem layout for each. + # We'd still need to generate the policies to audit from the actual + # directories found on-disk, and have appropriate error reporting if we + # find a directory that doesn't correspond to any known policy. This + # will require a sizable refactor, but currently all diskfile managers + # can find all diskfile locations regardless of policy -- so for now + # just use Policy-0's manager. + all_locs = (self.diskfile_router[POLICIES[0]] + .object_audit_location_generator(device_dirs=device_dirs)) for location in all_locs: loop_time = time.time() self.failsafe_object_audit(location) @@ -187,8 +196,9 @@ class AuditorWorker(object): def raise_dfq(msg): raise DiskFileQuarantined(msg) + diskfile_mgr = self.diskfile_router[location.policy] try: - df = self.diskfile_mgr.get_diskfile_from_audit_location(location) + df = diskfile_mgr.get_diskfile_from_audit_location(location) with df.open(): metadata = df.get_metadata() obj_size = int(metadata['Content-Length']) @@ -261,7 +271,8 @@ class ObjectAuditor(Daemon): try: self.run_audit(**kwargs) except Exception as e: - self.logger.error(_("ERROR: Unable to run auditing: %s") % e) + self.logger.exception( + _("ERROR: Unable to run auditing: %s") % e) finally: sys.exit() diff --git a/test/unit/obj/test_auditor.py b/test/unit/obj/test_auditor.py index 3de4cf239d..23d6a2e19b 100644 --- a/test/unit/obj/test_auditor.py +++ b/test/unit/obj/test_auditor.py @@ -22,16 +22,22 @@ import string from shutil import rmtree from hashlib import md5 from tempfile import mkdtemp -from test.unit import FakeLogger, patch_policies, make_timestamp_iter +from test.unit import FakeLogger, patch_policies, make_timestamp_iter, \ + DEFAULT_TEST_EC_TYPE from swift.obj import auditor from swift.obj.diskfile import DiskFile, write_metadata, invalidate_hash, \ - get_data_dir, DiskFileManager, AuditLocation + get_data_dir, DiskFileManager, ECDiskFileManager, AuditLocation from swift.common.utils import mkdirs, normalize_timestamp, Timestamp -from swift.common.storage_policy import StoragePolicy, POLICIES +from swift.common.storage_policy import ECStoragePolicy, StoragePolicy, \ + POLICIES -_mocked_policies = [StoragePolicy(0, 'zero', False), - StoragePolicy(1, 'one', True)] +_mocked_policies = [ + StoragePolicy(0, 'zero', False), + StoragePolicy(1, 'one', True), + ECStoragePolicy(2, 'two', ec_type=DEFAULT_TEST_EC_TYPE, + ec_ndata=2, ec_nparity=1, ec_segment_size=4096), +] @patch_policies(_mocked_policies) @@ -58,25 +64,38 @@ class TestAuditor(unittest.TestCase): self.objects_2_p1 = os.path.join(self.devices, 'sdb', get_data_dir(POLICIES[1])) os.mkdir(self.objects_p1) + # policy 2 + self.objects_p2 = os.path.join(self.devices, 'sda', + get_data_dir(POLICIES[2])) + self.objects_2_p2 = os.path.join(self.devices, 'sdb', + get_data_dir(POLICIES[2])) + os.mkdir(self.objects_p2) - self.parts = self.parts_p1 = {} + self.parts = {} + self.parts_p1 = {} + self.parts_p2 = {} for part in ['0', '1', '2', '3']: self.parts[part] = os.path.join(self.objects, part) self.parts_p1[part] = os.path.join(self.objects_p1, part) + self.parts_p2[part] = os.path.join(self.objects_p2, part) os.mkdir(os.path.join(self.objects, part)) os.mkdir(os.path.join(self.objects_p1, part)) + os.mkdir(os.path.join(self.objects_p2, part)) self.conf = dict( devices=self.devices, mount_check='false', object_size_stats='10,100,1024,10240') self.df_mgr = DiskFileManager(self.conf, self.logger) + self.ec_df_mgr = ECDiskFileManager(self.conf, self.logger) - # diskfiles for policy 0, 1 + # diskfiles for policy 0, 1, 2 self.disk_file = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o', policy=POLICIES[0]) self.disk_file_p1 = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o', policy=POLICIES[1]) + self.disk_file_ec = self.ec_df_mgr.get_diskfile( + 'sda', '0', 'a', 'c', 'o', policy=POLICIES[2], frag_index=1) def tearDown(self): rmtree(os.path.dirname(self.testdir), ignore_errors=1) @@ -95,7 +114,9 @@ class TestAuditor(unittest.TestCase): auditor_worker = auditor.AuditorWorker(conf, self.logger, self.rcache, self.devices) check_common_defaults() - self.assertEqual(auditor_worker.diskfile_mgr.disk_chunk_size, 65536) + for policy in POLICIES: + mgr = auditor_worker.diskfile_router[policy] + self.assertEqual(mgr.disk_chunk_size, 65536) self.assertEqual(auditor_worker.max_files_per_second, 20) self.assertEqual(auditor_worker.zero_byte_only_at_fps, 0) @@ -105,7 +126,9 @@ class TestAuditor(unittest.TestCase): self.rcache, self.devices, zero_byte_only_at_fps=50) check_common_defaults() - self.assertEqual(auditor_worker.diskfile_mgr.disk_chunk_size, 4096) + for policy in POLICIES: + mgr = auditor_worker.diskfile_router[policy] + self.assertEqual(mgr.disk_chunk_size, 4096) self.assertEqual(auditor_worker.max_files_per_second, 50) self.assertEqual(auditor_worker.zero_byte_only_at_fps, 50) @@ -126,22 +149,24 @@ class TestAuditor(unittest.TestCase): 'Content-Length': str(os.fstat(writer._fd).st_size), } writer.put(metadata) + writer.commit(Timestamp(timestamp)) pre_quarantines = auditor_worker.quarantines auditor_worker.object_audit( AuditLocation(disk_file._datadir, 'sda', '0', - policy=POLICIES.legacy)) + policy=disk_file.policy)) self.assertEqual(auditor_worker.quarantines, pre_quarantines) os.write(writer._fd, 'extra_data') auditor_worker.object_audit( AuditLocation(disk_file._datadir, 'sda', '0', - policy=POLICIES.legacy)) + policy=disk_file.policy)) self.assertEqual(auditor_worker.quarantines, pre_quarantines + 1) run_tests(self.disk_file) run_tests(self.disk_file_p1) + run_tests(self.disk_file_ec) def test_object_audit_diff_data(self): auditor_worker = auditor.AuditorWorker(self.conf, self.logger, @@ -159,6 +184,7 @@ class TestAuditor(unittest.TestCase): 'Content-Length': str(os.fstat(writer._fd).st_size), } writer.put(metadata) + writer.commit(Timestamp(timestamp)) pre_quarantines = auditor_worker.quarantines # remake so it will have metadata @@ -177,6 +203,7 @@ class TestAuditor(unittest.TestCase): with self.disk_file.create() as writer: writer.write(data) writer.put(metadata) + writer.commit(Timestamp(timestamp)) auditor_worker.object_audit( AuditLocation(self.disk_file._datadir, 'sda', '0', @@ -253,6 +280,7 @@ class TestAuditor(unittest.TestCase): 'Content-Length': str(os.fstat(writer._fd).st_size), } writer.put(metadata) + writer.commit(Timestamp(timestamp)) with mock.patch('swift.obj.diskfile.DiskFileManager.diskfile_cls', lambda *_: 1 / 0): auditor_worker.audit_all_objects() @@ -267,59 +295,58 @@ class TestAuditor(unittest.TestCase): data = '0' * 1024 def write_file(df): - etag = md5() with df.create() as writer: writer.write(data) - etag.update(data) - etag = etag.hexdigest() metadata = { - 'ETag': etag, + 'ETag': md5(data).hexdigest(), 'X-Timestamp': timestamp, 'Content-Length': str(os.fstat(writer._fd).st_size), } writer.put(metadata) + writer.commit(Timestamp(timestamp)) # policy 0 write_file(self.disk_file) # policy 1 write_file(self.disk_file_p1) + # policy 2 + write_file(self.disk_file_ec) auditor_worker.audit_all_objects() self.assertEqual(auditor_worker.quarantines, pre_quarantines) # 1 object per policy falls into 1024 bucket - self.assertEqual(auditor_worker.stats_buckets[1024], 2) + self.assertEqual(auditor_worker.stats_buckets[1024], 3) self.assertEqual(auditor_worker.stats_buckets[10240], 0) # pick up some additional code coverage, large file data = '0' * 1024 * 1024 - etag = md5() - with self.disk_file.create() as writer: - writer.write(data) - etag.update(data) - etag = etag.hexdigest() - metadata = { - 'ETag': etag, - 'X-Timestamp': timestamp, - 'Content-Length': str(os.fstat(writer._fd).st_size), - } - writer.put(metadata) + for df in (self.disk_file, self.disk_file_ec): + with df.create() as writer: + writer.write(data) + metadata = { + 'ETag': md5(data).hexdigest(), + 'X-Timestamp': timestamp, + 'Content-Length': str(os.fstat(writer._fd).st_size), + } + writer.put(metadata) + writer.commit(Timestamp(timestamp)) auditor_worker.audit_all_objects(device_dirs=['sda', 'sdb']) self.assertEqual(auditor_worker.quarantines, pre_quarantines) # still have the 1024 byte object left in policy-1 (plus the - # stats from the original 2) - self.assertEqual(auditor_worker.stats_buckets[1024], 3) + # stats from the original 3) + self.assertEqual(auditor_worker.stats_buckets[1024], 4) self.assertEqual(auditor_worker.stats_buckets[10240], 0) # and then policy-0 disk_file was re-written as a larger object - self.assertEqual(auditor_worker.stats_buckets['OVER'], 1) + self.assertEqual(auditor_worker.stats_buckets['OVER'], 2) # pick up even more additional code coverage, misc paths auditor_worker.log_time = -1 auditor_worker.stats_sizes = [] auditor_worker.audit_all_objects(device_dirs=['sda', 'sdb']) self.assertEqual(auditor_worker.quarantines, pre_quarantines) - self.assertEqual(auditor_worker.stats_buckets[1024], 3) + self.assertEqual(auditor_worker.stats_buckets[1024], 4) self.assertEqual(auditor_worker.stats_buckets[10240], 0) - self.assertEqual(auditor_worker.stats_buckets['OVER'], 1) + self.assertEqual(auditor_worker.stats_buckets['OVER'], 2) def test_object_run_logging(self): logger = FakeLogger() @@ -359,6 +386,7 @@ class TestAuditor(unittest.TestCase): } writer.put(metadata) os.write(writer._fd, 'extra_data') + writer.commit(Timestamp(timestamp)) auditor_worker.audit_all_objects() self.assertEqual(auditor_worker.quarantines, pre_quarantines + 1) @@ -381,6 +409,7 @@ class TestAuditor(unittest.TestCase): 'Content-Length': str(os.fstat(writer._fd).st_size), } writer.put(metadata) + writer.commit(Timestamp(timestamp)) auditor_worker.audit_all_objects() self.disk_file = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'ob', policy=POLICIES.legacy) @@ -396,6 +425,7 @@ class TestAuditor(unittest.TestCase): 'Content-Length': str(os.fstat(writer._fd).st_size), } writer.put(metadata) + writer.commit(Timestamp(timestamp)) os.write(writer._fd, 'extra_data') auditor_worker.audit_all_objects() self.assertEqual(auditor_worker.quarantines, pre_quarantines + 1) @@ -409,12 +439,14 @@ class TestAuditor(unittest.TestCase): writer.write(data) etag.update(data) etag = etag.hexdigest() + timestamp = str(normalize_timestamp(time.time())) metadata = { 'ETag': etag, - 'X-Timestamp': str(normalize_timestamp(time.time())), + 'X-Timestamp': timestamp, 'Content-Length': str(os.fstat(writer._fd).st_size), } writer.put(metadata) + writer.commit(Timestamp(timestamp)) etag = md5() etag.update('1' + '0' * 1023) etag = etag.hexdigest() @@ -445,6 +477,7 @@ class TestAuditor(unittest.TestCase): 'Content-Length': 10, } writer.put(metadata) + writer.commit(Timestamp(timestamp)) etag = md5() etag = etag.hexdigest() metadata['ETag'] = etag From edc823e8030640184071fee4920d34f9a1cc6b3e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Nov=C3=BD?= Date: Sun, 29 Nov 2015 18:46:47 +0100 Subject: [PATCH 38/52] Show UTC time in swift-recon. It's not consistent now for example local time in replication part and UTC time at begging of line. Use _ptime in swift-recon for all time printing and this function returns UTC now. Change-Id: I732d9851db157130a08e825e8093b7e244b63e9c --- swift/cli/recon.py | 20 +++++++------------- test/unit/cli/test_recon.py | 14 +++++--------- 2 files changed, 12 insertions(+), 22 deletions(-) diff --git a/swift/cli/recon.py b/swift/cli/recon.py index 3af876b709..6147f4cbad 100644 --- a/swift/cli/recon.py +++ b/swift/cli/recon.py @@ -181,12 +181,12 @@ class SwiftRecon(object): def _ptime(self, timev=None): """ :param timev: a unix timestamp or None - :returns: a pretty string of the current time or provided time + :returns: a pretty string of the current time or provided time in UTC """ if timev: - return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timev)) + return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(timev)) else: - return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()) def _md5_file(self, path): """ @@ -495,16 +495,14 @@ class SwiftRecon(object): elapsed = time.time() - least_recent_time elapsed, elapsed_unit = seconds2timeunit(elapsed) print('Oldest completion was %s (%d %s ago) by %s.' % ( - time.strftime('%Y-%m-%d %H:%M:%S', - time.gmtime(least_recent_time)), + self._ptime(least_recent_time), elapsed, elapsed_unit, host)) if most_recent_url is not None: host = urlparse(most_recent_url).netloc elapsed = time.time() - most_recent_time elapsed, elapsed_unit = seconds2timeunit(elapsed) print('Most recent completion was %s (%d %s ago) by %s.' % ( - time.strftime('%Y-%m-%d %H:%M:%S', - time.gmtime(most_recent_time)), + self._ptime(most_recent_time), elapsed, elapsed_unit, host)) print("=" * 79) @@ -899,12 +897,8 @@ class SwiftRecon(object): continue if (ts_remote < ts_start or ts_remote > ts_end): diff = abs(ts_end - ts_remote) - ts_end_f = time.strftime( - "%Y-%m-%d %H:%M:%S", - time.localtime(ts_end)) - ts_remote_f = time.strftime( - "%Y-%m-%d %H:%M:%S", - time.localtime(ts_remote)) + ts_end_f = self._ptime(ts_end) + ts_remote_f = self._ptime(ts_remote) print("!! %s current time is %s, but remote is %s, " "differs by %.2f sec" % ( diff --git a/test/unit/cli/test_recon.py b/test/unit/cli/test_recon.py index a5b8f05d76..fb625313a3 100644 --- a/test/unit/cli/test_recon.py +++ b/test/unit/cli/test_recon.py @@ -164,17 +164,17 @@ class TestRecon(unittest.TestCase): self.assertEqual(stats.get('perc_none'), 25.0) def test_ptime(self): - with mock.patch('time.localtime') as mock_localtime: - mock_localtime.return_value = time.struct_time( + with mock.patch('time.gmtime') as mock_gmtime: + mock_gmtime.return_value = time.struct_time( (2013, 12, 17, 10, 0, 0, 1, 351, 0)) timestamp = self.recon_instance._ptime(1387274400) self.assertEqual(timestamp, "2013-12-17 10:00:00") - mock_localtime.assert_called_with(1387274400) + mock_gmtime.assert_called_with(1387274400) timestamp2 = self.recon_instance._ptime() self.assertEqual(timestamp2, "2013-12-17 10:00:00") - mock_localtime.assert_called_with() + mock_gmtime.assert_called_with() def test_get_devices(self): ringbuilder = builder.RingBuilder(2, 3, 1) @@ -750,11 +750,7 @@ class TestReconCommands(unittest.TestCase): mock.call('1/2 hosts matched, 0 error[s] while checking hosts.'), ] - def mock_localtime(*args, **kwargs): - return time.gmtime(*args, **kwargs) - - with mock.patch("time.localtime", mock_localtime): - cli.time_check([('127.0.0.1', 6010), ('127.0.0.1', 6020)]) + cli.time_check([('127.0.0.1', 6010), ('127.0.0.1', 6020)]) # We need any_order=True because the order of calls depends on the dict # that is returned from the recon middleware, thus can't rely on it From a4c2fe95ab2fbe59379a69914ed0fac49c28efbb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Nov=C3=BD?= Date: Tue, 12 Jan 2016 21:26:33 +0100 Subject: [PATCH 39/52] Allow to change auditor sleep interval in config Change-Id: Ic451c5e0b686509f8982ed1bf65a223a2d77b9a0 --- doc/source/deployment_guide.rst | 2 ++ etc/object-server.conf-sample | 3 +++ swift/obj/auditor.py | 5 ++--- test/unit/obj/test_auditor.py | 14 ++++++++++++-- 4 files changed, 19 insertions(+), 5 deletions(-) diff --git a/doc/source/deployment_guide.rst b/doc/source/deployment_guide.rst index f06afc483b..1ae7887636 100644 --- a/doc/source/deployment_guide.rst +++ b/doc/source/deployment_guide.rst @@ -719,6 +719,8 @@ log_facility LOG_LOCAL0 Syslog log facility log_level INFO Logging level log_address /dev/log Logging directory log_time 3600 Frequency of status logs in seconds. +interval 30 Time in seconds to wait between + auditor passes disk_chunk_size 65536 Size of chunks read during auditing files_per_second 20 Maximum files audited per second per auditor process. Should be tuned according diff --git a/etc/object-server.conf-sample b/etc/object-server.conf-sample index 815b63cc5d..a7921a68ad 100644 --- a/etc/object-server.conf-sample +++ b/etc/object-server.conf-sample @@ -282,6 +282,9 @@ use = egg:swift#recon # log_level = INFO # log_address = /dev/log # +# Time in seconds to wait between auditor passes +# interval = 30 +# # You can set the disk chunk size that the auditor uses making it larger if # you like for more efficient local auditing of larger objects # disk_chunk_size = 65536 diff --git a/swift/obj/auditor.py b/swift/obj/auditor.py index 38fef209e1..72016143aa 100644 --- a/swift/obj/auditor.py +++ b/swift/obj/auditor.py @@ -29,8 +29,6 @@ from swift.common.utils import get_logger, ratelimit_sleep, dump_recon_cache, \ from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist from swift.common.daemon import Daemon -SLEEP_BETWEEN_AUDITS = 30 - class AuditorWorker(object): """Walk through file system to audit objects""" @@ -230,9 +228,10 @@ class ObjectAuditor(Daemon): self.recon_cache_path = conf.get('recon_cache_path', '/var/cache/swift') self.rcache = os.path.join(self.recon_cache_path, "object.recon") + self.interval = int(conf.get('interval', 30)) def _sleep(self): - time.sleep(SLEEP_BETWEEN_AUDITS) + time.sleep(self.interval) def clear_recon_cache(self, auditor_type): """Clear recon cache entries""" diff --git a/test/unit/obj/test_auditor.py b/test/unit/obj/test_auditor.py index 3de4cf239d..26acd967d5 100644 --- a/test/unit/obj/test_auditor.py +++ b/test/unit/obj/test_auditor.py @@ -533,10 +533,20 @@ class TestAuditor(unittest.TestCase): def test_sleeper(self): with mock.patch( 'time.sleep', mock.MagicMock()) as mock_sleep: - auditor.SLEEP_BETWEEN_AUDITS = 0.10 my_auditor = auditor.ObjectAuditor(self.conf) my_auditor._sleep() - mock_sleep.assert_called_with(auditor.SLEEP_BETWEEN_AUDITS) + mock_sleep.assert_called_with(30) + + my_conf = dict(interval=2) + my_conf.update(self.conf) + my_auditor = auditor.ObjectAuditor(my_conf) + my_auditor._sleep() + mock_sleep.assert_called_with(2) + + my_auditor = auditor.ObjectAuditor(self.conf) + my_auditor.interval = 2 + my_auditor._sleep() + mock_sleep.assert_called_with(2) def test_run_parallel_audit(self): From 70047709fc9885df7019f791e17a3240682cc6cb Mon Sep 17 00:00:00 2001 From: keliang Date: Fri, 15 Jan 2016 00:31:51 +0800 Subject: [PATCH 40/52] Drop python 2.6 support Change-Id: Id6329c863dacb189fccfc304453ed7b6f9607c14 --- swift/common/utils.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/swift/common/utils.py b/swift/common/utils.py index 831f651ff1..a7615220a2 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -862,14 +862,10 @@ def last_modified_date_to_timestamp(last_modified_date_str): start = datetime.datetime.strptime(last_modified_date_str, '%Y-%m-%dT%H:%M:%S.%f') delta = start - EPOCH - # TODO(sam): after we no longer support py2.6, this expression can - # simplify to Timestamp(delta.total_seconds()). - # + # This calculation is based on Python 2.7's Modules/datetimemodule.c, # function delta_to_microseconds(), but written in Python. - return Timestamp(delta.days * 86400 + - delta.seconds + - delta.microseconds / 1000000.0) + return Timestamp(delta.total_seconds()) def normalize_delete_at_timestamp(timestamp): From 3c0cf549f1e822cce8f905b069b317e676cf306b Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Wed, 13 Jan 2016 18:08:45 -0800 Subject: [PATCH 41/52] Speed up get_more_nodes() when there is an empty zone The ring has some optimizations in get_more_nodes() so that it can find handoffs that span all the regions/zones/et cetera and then stop looking. The stopping is the important part. Previously, it would quickly find a handoff in each unused region, then spend way too long looking for more unused regions; the same was true for zones, IPs, and so on. Thus, in commit 9cd7c6c, we started counting regions and zones, then stopping when we found them all. This count included all regions and zones in the ring, regardless of whether or not there were actually any parts assigned or not. In rings with an empty region, i.e. a region for which there are only zero-weight devices, get_more_nodes() would be very slow. This commit ignores devices with no assigned partitions when counting regions, zones, and so forth, thus greatly speeding things up. The output of get_more_nodes() is unchanged. This is purely an optimization. Closes-Bug: 1534303 Change-Id: I4a5c57205e87e1205d40fd5d9458d4114e524332 --- swift/common/ring/ring.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/swift/common/ring/ring.py b/swift/common/ring/ring.py index e00b0db3a9..6ae2aea09f 100644 --- a/swift/common/ring/ring.py +++ b/swift/common/ring/ring.py @@ -203,12 +203,23 @@ class Ring(object): # Do this now, when we know the data has changed, rather than # doing it on every call to get_more_nodes(). + # + # Since this is to speed up the finding of handoffs, we only + # consider devices with at least one partition assigned. This + # way, a region, zone, or server with no partitions assigned + # does not count toward our totals, thereby keeping the early + # bailouts in get_more_nodes() working. + dev_ids_with_parts = set() + for part2dev_id in self._replica2part2dev_id: + for dev_id in part2dev_id: + dev_ids_with_parts.add(dev_id) + regions = set() zones = set() ips = set() self._num_devs = 0 for dev in self._devs: - if dev: + if dev and dev['id'] in dev_ids_with_parts: regions.add(dev['region']) zones.add((dev['region'], dev['zone'])) ips.add((dev['region'], dev['zone'], dev['ip'])) From 5d449471b12c67b31ebb5a383d9bb35bace36213 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Thu, 14 Jan 2016 17:26:01 -0800 Subject: [PATCH 42/52] Remove some Python 2.6 leftovers Change-Id: I798d08722c90327c66759aa0bb4526851ba38d41 --- swift/common/ring/ring.py | 10 +--------- test/unit/common/ring/test_ring.py | 5 ----- test/unit/common/test_utils.py | 4 ---- 3 files changed, 1 insertion(+), 18 deletions(-) diff --git a/swift/common/ring/ring.py b/swift/common/ring/ring.py index e00b0db3a9..2a42df9aeb 100644 --- a/swift/common/ring/ring.py +++ b/swift/common/ring/ring.py @@ -15,7 +15,6 @@ import array import six.moves.cPickle as pickle -import inspect import json from collections import defaultdict from gzip import GzipFile @@ -135,15 +134,8 @@ class RingData(object): # Override the timestamp so that the same ring data creates # the same bytes on disk. This makes a checksum comparison a # good way to see if two rings are identical. - # - # This only works on Python 2.7; on 2.6, we always get the - # current time in the gzip output. tempf = NamedTemporaryFile(dir=".", prefix=filename, delete=False) - if 'mtime' in inspect.getargspec(GzipFile.__init__).args: - gz_file = GzipFile(filename, mode='wb', fileobj=tempf, - mtime=mtime) - else: - gz_file = GzipFile(filename, mode='wb', fileobj=tempf) + gz_file = GzipFile(filename, mode='wb', fileobj=tempf, mtime=mtime) self.serialize_v1(gz_file) gz_file.close() tempf.flush() diff --git a/test/unit/common/ring/test_ring.py b/test/unit/common/ring/test_ring.py index a492b44bd4..3d29648603 100644 --- a/test/unit/common/ring/test_ring.py +++ b/test/unit/common/ring/test_ring.py @@ -16,7 +16,6 @@ import array import six.moves.cPickle as pickle import os -import sys import unittest import stat from contextlib import closing @@ -109,11 +108,7 @@ class TestRingData(unittest.TestCase): def test_deterministic_serialization(self): """ Two identical rings should produce identical .gz files on disk. - - Only true on Python 2.7 or greater. """ - if sys.version_info[0] == 2 and sys.version_info[1] < 7: - return os.mkdir(os.path.join(self.testdir, '1')) os.mkdir(os.path.join(self.testdir, '2')) # These have to have the same filename (not full path, diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index efefb5581e..8f5f82eff0 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -140,10 +140,6 @@ class MockSys(object): self.stdio_fds = [self.stdin.fileno(), self.stdout.fileno(), self.stderr.fileno()] - @property - def version_info(self): - return sys.version_info - def reset_loggers(): if hasattr(utils.get_logger, 'handler4logger'): From 47e226418bad35ccad2a1525f392ba69f6165027 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Mon, 18 Jan 2016 06:20:14 +0000 Subject: [PATCH 43/52] Imported Translations from Zanata For more information about this automatic import see: https://wiki.openstack.org/wiki/Translations/Infrastructure Change-Id: Ic416c9afc8a1c76f552803a7c70fc905eda7b3cb --- swift/locale/de/LC_MESSAGES/swift.po | 17 +- swift/locale/es/LC_MESSAGES/swift.po | 17 +- swift/locale/fr/LC_MESSAGES/swift.po | 17 +- swift/locale/it/LC_MESSAGES/swift.po | 17 +- swift/locale/ja/LC_MESSAGES/swift.po | 17 +- swift/locale/ko_KR/LC_MESSAGES/swift.po | 19 +- swift/locale/pt_BR/LC_MESSAGES/swift.po | 19 +- swift/locale/ru/LC_MESSAGES/swift.po | 128 ++++++- swift/locale/swift.pot | 461 ++++++++++++------------ swift/locale/tr_TR/LC_MESSAGES/swift.po | 19 +- swift/locale/zh_CN/LC_MESSAGES/swift.po | 19 +- swift/locale/zh_TW/LC_MESSAGES/swift.po | 19 +- 12 files changed, 448 insertions(+), 321 deletions(-) diff --git a/swift/locale/de/LC_MESSAGES/swift.po b/swift/locale/de/LC_MESSAGES/swift.po index 630fb41da0..bf7fc1cc73 100644 --- a/swift/locale/de/LC_MESSAGES/swift.po +++ b/swift/locale/de/LC_MESSAGES/swift.po @@ -1,4 +1,4 @@ -# German translations for swift. +# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # @@ -10,18 +10,19 @@ # Tom Cocozzello , 2015. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.5.1.dev70\n" +"Project-Id-Version: swift 2.5.1.dev267\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-10-23 06:34+0000\n" +"POT-Creation-Date: 2016-01-16 12:32+0000\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-08-11 11:22+0000\n" "Last-Translator: openstackjenkins \n" "Language: de\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" +"Generated-By: Babel 2.0\n" +"X-Generator: Zanata 3.7.3\n" "Language-Team: German\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.1.1\n" msgid "" "\n" diff --git a/swift/locale/es/LC_MESSAGES/swift.po b/swift/locale/es/LC_MESSAGES/swift.po index c8583fe2ce..8db73386c9 100644 --- a/swift/locale/es/LC_MESSAGES/swift.po +++ b/swift/locale/es/LC_MESSAGES/swift.po @@ -1,4 +1,4 @@ -# Spanish translations for swift. +# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # @@ -8,18 +8,19 @@ # Tom Cocozzello , 2015. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.5.1.dev70\n" +"Project-Id-Version: swift 2.5.1.dev267\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-10-23 06:34+0000\n" +"POT-Creation-Date: 2016-01-16 12:32+0000\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-09-09 05:36+0000\n" "Last-Translator: Carlos A. Muñoz \n" "Language: es\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" +"Generated-By: Babel 2.0\n" +"X-Generator: Zanata 3.7.3\n" "Language-Team: Spanish\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.1.1\n" msgid "" "\n" diff --git a/swift/locale/fr/LC_MESSAGES/swift.po b/swift/locale/fr/LC_MESSAGES/swift.po index ad1af0385c..ccff50402a 100644 --- a/swift/locale/fr/LC_MESSAGES/swift.po +++ b/swift/locale/fr/LC_MESSAGES/swift.po @@ -1,4 +1,4 @@ -# French translations for swift. +# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # @@ -8,18 +8,19 @@ # Tom Cocozzello , 2015. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.5.1.dev70\n" +"Project-Id-Version: swift 2.5.1.dev267\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-10-23 06:34+0000\n" +"POT-Creation-Date: 2016-01-16 12:32+0000\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-08-11 11:22+0000\n" "Last-Translator: openstackjenkins \n" "Language: fr\n" +"Plural-Forms: nplurals=2; plural=(n > 1);\n" +"Generated-By: Babel 2.0\n" +"X-Generator: Zanata 3.7.3\n" "Language-Team: French\n" -"Plural-Forms: nplurals=2; plural=(n > 1)\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.1.1\n" msgid "" "\n" diff --git a/swift/locale/it/LC_MESSAGES/swift.po b/swift/locale/it/LC_MESSAGES/swift.po index 8169c5a355..5294bb8032 100644 --- a/swift/locale/it/LC_MESSAGES/swift.po +++ b/swift/locale/it/LC_MESSAGES/swift.po @@ -1,4 +1,4 @@ -# Italian translations for swift. +# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # @@ -7,18 +7,19 @@ # Tom Cocozzello , 2015. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.5.1.dev70\n" +"Project-Id-Version: swift 2.5.1.dev267\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-10-23 06:34+0000\n" +"POT-Creation-Date: 2016-01-16 12:32+0000\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-08-11 11:22+0000\n" "Last-Translator: openstackjenkins \n" "Language: it\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" +"Generated-By: Babel 2.0\n" +"X-Generator: Zanata 3.7.3\n" "Language-Team: Italian\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.1.1\n" msgid "" "\n" diff --git a/swift/locale/ja/LC_MESSAGES/swift.po b/swift/locale/ja/LC_MESSAGES/swift.po index 85a1f071eb..ec78f37ae0 100644 --- a/swift/locale/ja/LC_MESSAGES/swift.po +++ b/swift/locale/ja/LC_MESSAGES/swift.po @@ -1,4 +1,4 @@ -# Japanese translations for swift. +# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # @@ -9,18 +9,19 @@ # Tom Cocozzello , 2015. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.5.1.dev70\n" +"Project-Id-Version: swift 2.5.1.dev267\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-10-23 06:34+0000\n" +"POT-Creation-Date: 2016-01-16 12:32+0000\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-09-26 09:26+0000\n" "Last-Translator: Akihiro Motoki \n" "Language: ja\n" +"Plural-Forms: nplurals=1; plural=0;\n" +"Generated-By: Babel 2.0\n" +"X-Generator: Zanata 3.7.3\n" "Language-Team: Japanese\n" -"Plural-Forms: nplurals=1; plural=0\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.1.1\n" msgid "" "\n" diff --git a/swift/locale/ko_KR/LC_MESSAGES/swift.po b/swift/locale/ko_KR/LC_MESSAGES/swift.po index aeb33353f5..1db510bc0b 100644 --- a/swift/locale/ko_KR/LC_MESSAGES/swift.po +++ b/swift/locale/ko_KR/LC_MESSAGES/swift.po @@ -1,4 +1,4 @@ -# Korean (South Korea) translations for swift. +# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # @@ -9,18 +9,19 @@ # OpenStack Infra , 2015. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.5.1.dev70\n" +"Project-Id-Version: swift 2.5.1.dev267\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-10-23 06:34+0000\n" +"POT-Creation-Date: 2016-01-16 12:32+0000\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-09-09 05:10+0000\n" "Last-Translator: Ying Chun Guo \n" -"Language: ko_KR\n" +"Language: ko-KR\n" +"Plural-Forms: nplurals=1; plural=0;\n" +"Generated-By: Babel 2.0\n" +"X-Generator: Zanata 3.7.3\n" "Language-Team: Korean (South Korea)\n" -"Plural-Forms: nplurals=1; plural=0\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.1.1\n" msgid "" "\n" diff --git a/swift/locale/pt_BR/LC_MESSAGES/swift.po b/swift/locale/pt_BR/LC_MESSAGES/swift.po index 3e674ce282..d9bb9909bd 100644 --- a/swift/locale/pt_BR/LC_MESSAGES/swift.po +++ b/swift/locale/pt_BR/LC_MESSAGES/swift.po @@ -1,4 +1,4 @@ -# Portuguese (Brazil) translations for swift. +# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # @@ -11,18 +11,19 @@ # OpenStack Infra , 2015. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.5.1.dev70\n" +"Project-Id-Version: swift 2.5.1.dev267\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-10-23 06:34+0000\n" +"POT-Creation-Date: 2016-01-16 12:32+0000\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-08-11 11:22+0000\n" "Last-Translator: openstackjenkins \n" -"Language: pt_BR\n" +"Language: pt-BR\n" +"Plural-Forms: nplurals=2; plural=(n > 1);\n" +"Generated-By: Babel 2.0\n" +"X-Generator: Zanata 3.7.3\n" "Language-Team: Portuguese (Brazil)\n" -"Plural-Forms: nplurals=2; plural=(n > 1)\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.1.1\n" msgid "" "\n" diff --git a/swift/locale/ru/LC_MESSAGES/swift.po b/swift/locale/ru/LC_MESSAGES/swift.po index 85140ca21d..11436a5f42 100644 --- a/swift/locale/ru/LC_MESSAGES/swift.po +++ b/swift/locale/ru/LC_MESSAGES/swift.po @@ -1,26 +1,28 @@ -# Russian translations for swift. +# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata +# Filatov Sergey , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.5.1.dev70\n" +"Project-Id-Version: swift 2.5.1.dev267\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-10-23 06:34+0000\n" -"PO-Revision-Date: 2015-08-11 11:22+0000\n" -"Last-Translator: openstackjenkins \n" +"POT-Creation-Date: 2016-01-16 12:32+0000\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"PO-Revision-Date: 2016-01-17 10:49+0000\n" +"Last-Translator: Filatov Sergey \n" "Language: ru\n" -"Language-Team: Russian\n" "Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n" -"%100>=11 && n%100<=14)? 2 : 3)\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.1.1\n" +"%100>=11 && n%100<=14)? 2 : 3);\n" +"Generated-By: Babel 2.0\n" +"X-Generator: Zanata 3.7.3\n" +"Language-Team: Russian\n" msgid "" "\n" @@ -52,6 +54,16 @@ msgstr "Ответили как размонтированные: %(ip)s/%(devic msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" +#, python-format +msgid "" +"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of %(device)d/" +"%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in %(time).2fs " +"(%(rate).2f/sec, %(remaining)s remaining)" +msgstr "" +"Реконструированно разделов: %(reconstructed)d/%(total)d (%(percentage).2f%%) " +"partitions of %(device)d/%(dtotal)d (%(dpercentage).2f%%) за время " +"%(time).2fs (%(rate).2f/sec, осталось: %(remaining)s)" + #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " @@ -88,6 +100,10 @@ msgstr "%s не существует" msgid "%s is not mounted" msgstr "%s не смонтирован" +#, python-format +msgid "%s responded as unmounted" +msgstr "%s ответил как размонтированный" + #, python-format msgid "%s running (%s - %s)" msgstr "%s выполняется (%s - %s)" @@ -225,6 +241,14 @@ msgid "" msgstr "" "Путь клиента %(client)s не соответствует пути в метаданных объекта %(meta)s" +msgid "" +"Configuration option internal_client_conf_path not defined. Using default " +"configuration, See internal-client.conf-sample for options" +msgstr "" +"Опция internal_client_conf_path конфигурации не определена. Используется " +"конфигурация по умолчанию. Используйте intenal-client.conf-sample для " +"информации об опциях" + msgid "Connection refused" msgstr "Соединение отклонено" @@ -284,6 +308,10 @@ msgstr "Ошибка загрузки данных: %s" msgid "Devices pass completed: %.02fs" msgstr "Проход устройств выполнен: %.02fs" +#, python-format +msgid "Directory %r does not map to a valid policy (%s)" +msgstr "Каталог %r не связан со стратегией policy (%s)" + #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "Ошибка %(db_file)s: %(validate_sync_to_err)s" @@ -560,6 +588,9 @@ msgstr "" msgid "Exception in top-level replication loop" msgstr "Исключительная ситуация в цикле репликации верхнего уровня" +msgid "Exception in top-levelreconstruction loop" +msgstr "Исключение в цикле реконструкции верхнего уровня" + #, python-format msgid "Exception while deleting container %s %s" msgstr "Исключительная ситуация во время удаления контейнера %s %s" @@ -617,6 +648,10 @@ msgstr "Недопустимый хост %r в X-Container-Sync-To" msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "Недопустимая ожидающая запись %(file)s: %(entry)s" +#, python-format +msgid "Invalid response %(resp)s from %(full_path)s" +msgstr "Недопустимый ответ %(resp)s от %(full_path)s" + #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "Недопустимый ответ %(resp)s от %(ip)s" @@ -652,10 +687,18 @@ msgstr "Отсутствует конечная точка кластера дл msgid "No permission to signal PID %d" msgstr "Нет прав доступа для отправки сигнала в PID %d" +#, python-format +msgid "No policy with index %s" +msgstr "Не найдено стратегии с индексом %s" + #, python-format msgid "No realm key for %r" msgstr "Отсутствует ключ области для %r" +#, python-format +msgid "No space left on device for %s (%s)" +msgstr "Не устройстве %s (%s) закончилось место" + #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Ограниченная ошибка узла %(ip)s:%(port)s (%(device)s)" @@ -668,6 +711,10 @@ msgstr "" "Не найдено: %(sync_from)r => %(sync_to)r - объект " "%(obj_name)r" +#, python-format +msgid "Nothing reconstructed for %s seconds." +msgstr "Ничего не реконструировано за %s с." + #, python-format msgid "Nothing replicated for %s seconds." msgstr "Ничего не реплицировано за %s с." @@ -716,6 +763,10 @@ msgstr "" msgid "Object audit stats: %s" msgstr "Состояние контроля объекта: %s" +#, python-format +msgid "Object reconstruction complete (once). (%.02f minutes)" +msgstr "Реконструкция объекта выполнена (однократно). (%.02f мин.)" + #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "Репликация объекта выполнена (однократно). (%.02f мин.)" @@ -775,6 +826,14 @@ msgstr "Требуется путь в X-Container-Sync-To" msgid "Problem cleaning up %s" msgstr "Неполадка при очистке %s" +#, python-format +msgid "Problem cleaning up %s (%s)" +msgstr "Возникла проблема при очистке %s (%s)" + +#, fuzzy, python-format +msgid "Problem writing durable state file %s (%s)" +msgstr "Возникла проблема при записи файла состояния %s (%s)" + #, python-format msgid "Profiling Error: %s" msgstr "Ошибка профилирования: %s" @@ -818,6 +877,14 @@ msgstr "Удаление объектов %s" msgid "Removing partition: %s" msgstr "Удаление раздела: %s" +#, python-format +msgid "Removing pid file %s with invalid pid" +msgstr "Удаление pid файла %s с неверным pid-ом" + +#, python-format +msgid "Removing pid file %s with wrong pid %d" +msgstr "Удаление pid файла %s с неверным pid-ом %d" + #, python-format msgid "Removing stale pid file %s" msgstr "Удаление устаревшего файла pid %s" @@ -837,6 +904,11 @@ msgstr "" "Возвращено 498 для %(meth)s в %(acc)s/%(cont)s/%(obj)s . Ratelimit " "(максимальная задержка): %(e)s" +msgid "Ring change detected. Aborting current reconstruction pass." +msgstr "" +"Обнаружено изменение кольца. Принудительное завершение текущего прохода " +"реконструкции." + msgid "Ring change detected. Aborting current replication pass." msgstr "" "Обнаружено кольцевое изменение. Принудительное завершение текущего прохода " @@ -846,6 +918,9 @@ msgstr "" msgid "Running %s once" msgstr "Однократное выполнение %s" +msgid "Running object reconstructor in script mode." +msgstr "Запуск утилиты реконструкции объектов в режиме скрипта." + msgid "Running object replicator in script mode." msgstr "Запуск утилиты репликации объектов в режиме сценариев." @@ -889,6 +964,12 @@ msgstr "%s будет пропущен, так как он не смонтиро msgid "Starting %s" msgstr "Запуск %s" +msgid "Starting object reconstruction pass." +msgstr "Запуск прохода реконструкции объектов." + +msgid "Starting object reconstructor in daemon mode." +msgstr "Запуск утилиты реконструкции объектов в режиме демона." + msgid "Starting object replication pass." msgstr "Запуск прохода репликации объектов." @@ -914,10 +995,18 @@ msgstr "" msgid "Timeout %(action)s to memcached: %(server)s" msgstr "Тайм-аут действия %(action)s для сохранения в кэш памяти: %(server)s" +#, python-format +msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" +msgstr "Исключение по таймауту %(ip)s:%(port)s/%(device)s" + #, python-format msgid "Trying to %(method)s %(path)s" msgstr "Попытка выполнения метода %(method)s %(path)s" +#, python-format +msgid "Trying to GET %(full_path)s" +msgstr "Попытка GET-запроса %(full_path)s" + #, python-format msgid "Trying to get final status of PUT to %s" msgstr "Попытка получения конечного состояния PUT в %s" @@ -942,10 +1031,18 @@ msgstr "Необрабатываемая исключительная ситуа msgid "Unable to find %s config section in %s" msgstr "Не удалось найти раздел конфигурации %s в %s" +#, python-format +msgid "Unable to load internal client from config: %r (%s)" +msgstr "Не удалось загрузить клиент из конфигурации: %r (%s)" + #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "Не удалось найти %s в libc. Оставлено как no-op." +#, python-format +msgid "Unable to locate config for %s" +msgstr "Не удалось найти конфигурационный файл для %s" + msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" @@ -970,6 +1067,11 @@ msgstr "Непредвиденный ответ: %s" msgid "Unhandled exception" msgstr "Необработанная исключительная ситуация" +#, python-format +msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" +msgstr "" +"Неизвестное исключение в GET-запросе: %(account)r %(container)r %(object)r" + #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "Отчет об обновлении для %(container)s %(dbfile)s не выполнен" @@ -1004,6 +1106,10 @@ msgstr "" msgid "Waited %s seconds for %s to die; giving up" msgstr "Система ожидала %s секунд для %s завершения; освобождение" +#, python-format +msgid "Waited %s seconds for %s to die; killing" +msgstr "Система ожидала %s секунд для %s завершения; Принудительное завершение" + msgid "Warning: Cannot ratelimit without a memcached client" msgstr "" "Предупреждение: не удается ограничить скорость без клиента с кэшированием " diff --git a/swift/locale/swift.pot b/swift/locale/swift.pot index 1019898f0f..f98fe73f5d 100644 --- a/swift/locale/swift.pot +++ b/swift/locale/swift.pot @@ -1,21 +1,21 @@ # Translations template for swift. -# Copyright (C) 2015 ORGANIZATION +# Copyright (C) 2016 ORGANIZATION # This file is distributed under the same license as the swift project. -# FIRST AUTHOR , 2015. +# FIRST AUTHOR , 2016. # #, fuzzy msgid "" msgstr "" -"Project-Id-Version: swift 2.4.1.dev50\n" +"Project-Id-Version: swift 2.5.1.dev267\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-09-28 06:27+0000\n" +"POT-Creation-Date: 2016-01-18 06:20+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.1.1\n" +"Generated-By: Babel 2.2.0\n" #: swift/account/auditor.py:59 #, python-format @@ -63,109 +63,109 @@ msgstr "" msgid "ERROR Could not get account info %s" msgstr "" -#: swift/account/reaper.py:138 swift/common/utils.py:2147 +#: swift/account/reaper.py:139 swift/common/utils.py:2128 #: swift/obj/diskfile.py:296 swift/obj/updater.py:88 swift/obj/updater.py:131 #, python-format msgid "Skipping %s as it is not mounted" msgstr "" -#: swift/account/reaper.py:142 +#: swift/account/reaper.py:143 msgid "Exception in top-level account reaper loop" msgstr "" -#: swift/account/reaper.py:145 +#: swift/account/reaper.py:146 #, python-format msgid "Devices pass completed: %.02fs" msgstr "" -#: swift/account/reaper.py:253 +#: swift/account/reaper.py:254 #, python-format msgid "Beginning pass on account %s" msgstr "" -#: swift/account/reaper.py:278 +#: swift/account/reaper.py:279 #, python-format msgid "Exception with containers for account %s" msgstr "" -#: swift/account/reaper.py:285 +#: swift/account/reaper.py:286 #, python-format msgid "Exception with account %s" msgstr "" -#: swift/account/reaper.py:286 +#: swift/account/reaper.py:287 #, python-format msgid "Incomplete pass on account %s" msgstr "" -#: swift/account/reaper.py:288 +#: swift/account/reaper.py:289 #, python-format msgid ", %s containers deleted" msgstr "" -#: swift/account/reaper.py:290 +#: swift/account/reaper.py:291 #, python-format msgid ", %s objects deleted" msgstr "" -#: swift/account/reaper.py:292 +#: swift/account/reaper.py:293 #, python-format msgid ", %s containers remaining" msgstr "" -#: swift/account/reaper.py:295 +#: swift/account/reaper.py:296 #, python-format msgid ", %s objects remaining" msgstr "" -#: swift/account/reaper.py:297 +#: swift/account/reaper.py:298 #, python-format msgid ", %s containers possibly remaining" msgstr "" -#: swift/account/reaper.py:300 +#: swift/account/reaper.py:301 #, python-format msgid ", %s objects possibly remaining" msgstr "" -#: swift/account/reaper.py:303 +#: swift/account/reaper.py:304 msgid ", return codes: " msgstr "" -#: swift/account/reaper.py:307 +#: swift/account/reaper.py:308 #, python-format msgid ", elapsed: %.02fs" msgstr "" -#: swift/account/reaper.py:313 +#: swift/account/reaper.py:314 #, python-format msgid "Account %s has not been reaped since %s" msgstr "" -#: swift/account/reaper.py:372 swift/account/reaper.py:426 -#: swift/account/reaper.py:502 swift/container/updater.py:307 +#: swift/account/reaper.py:373 swift/account/reaper.py:427 +#: swift/account/reaper.py:503 swift/container/updater.py:307 #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "" -#: swift/account/reaper.py:379 swift/account/reaper.py:435 -#: swift/account/reaper.py:513 +#: swift/account/reaper.py:380 swift/account/reaper.py:436 +#: swift/account/reaper.py:514 #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "" -#: swift/account/reaper.py:396 +#: swift/account/reaper.py:397 #, python-format msgid "Exception with objects for container %(container)s for account %(account)s" msgstr "" -#: swift/account/server.py:275 swift/container/server.py:586 -#: swift/obj/server.py:944 +#: swift/account/server.py:276 swift/container/server.py:589 +#: swift/obj/server.py:964 #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "" -#: swift/common/bufferedhttp.py:205 swift/common/bufferedhttp.py:210 +#: swift/common/bufferedhttp.py:206 swift/common/bufferedhttp.py:211 #, python-format msgid "Error encoding to UTF-8: %s" msgstr "" @@ -181,16 +181,16 @@ msgstr "" msgid "Error in %r with mtime_check_interval: %s" msgstr "" -#: swift/common/db.py:349 +#: swift/common/db.py:352 #, python-format msgid "Quarantined %s to %s due to %s database" msgstr "" -#: swift/common/db.py:404 +#: swift/common/db.py:407 msgid "Broker error trying to rollback locked connection" msgstr "" -#: swift/common/db.py:607 +#: swift/common/db.py:610 #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "" @@ -200,304 +200,309 @@ msgstr "" msgid "ERROR reading HTTP response from %s" msgstr "" -#: swift/common/db_replicator.py:207 +#: swift/common/db_replicator.py:208 #, python-format msgid "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "" -#: swift/common/db_replicator.py:213 +#: swift/common/db_replicator.py:214 #, python-format msgid "Removed %(remove)d dbs" msgstr "" -#: swift/common/db_replicator.py:214 +#: swift/common/db_replicator.py:215 #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "" -#: swift/common/db_replicator.py:261 +#: swift/common/db_replicator.py:262 #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "" -#: swift/common/db_replicator.py:325 +#: swift/common/db_replicator.py:326 #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "" -#: swift/common/db_replicator.py:491 swift/common/db_replicator.py:755 +#: swift/common/db_replicator.py:496 swift/common/db_replicator.py:766 #, python-format msgid "Quarantining DB %s" msgstr "" -#: swift/common/db_replicator.py:494 +#: swift/common/db_replicator.py:499 #, python-format msgid "ERROR reading db %s" msgstr "" -#: swift/common/db_replicator.py:547 +#: swift/common/db_replicator.py:552 #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "" -#: swift/common/db_replicator.py:549 +#: swift/common/db_replicator.py:554 #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "" -#: swift/common/db_replicator.py:588 +#: swift/common/db_replicator.py:593 #, python-format msgid "ERROR while trying to clean up %s" msgstr "" -#: swift/common/db_replicator.py:616 +#: swift/common/db_replicator.py:621 msgid "ERROR Failed to get my own IPs?" msgstr "" -#: swift/common/db_replicator.py:630 +#: swift/common/db_replicator.py:637 #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "" -#: swift/common/db_replicator.py:639 +#: swift/common/db_replicator.py:650 msgid "Beginning replication run" msgstr "" -#: swift/common/db_replicator.py:644 +#: swift/common/db_replicator.py:655 msgid "Replication run OVER" msgstr "" -#: swift/common/db_replicator.py:657 +#: swift/common/db_replicator.py:668 msgid "ERROR trying to replicate" msgstr "" -#: swift/common/internal_client.py:194 +#: swift/common/internal_client.py:196 #, python-format msgid "Unexpected response: %s" msgstr "" -#: swift/common/manager.py:66 +#: swift/common/manager.py:68 msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "" -#: swift/common/manager.py:73 +#: swift/common/manager.py:75 msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "" -#: swift/common/manager.py:80 +#: swift/common/manager.py:82 msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "" -#: swift/common/manager.py:221 +#: swift/common/manager.py:241 msgid "" "\n" "user quit" msgstr "" -#: swift/common/manager.py:258 swift/common/manager.py:586 +#: swift/common/manager.py:278 swift/common/manager.py:622 #, python-format msgid "No %s running" msgstr "" -#: swift/common/manager.py:271 +#: swift/common/manager.py:291 #, python-format msgid "%s (%s) appears to have stopped" msgstr "" -#: swift/common/manager.py:281 +#: swift/common/manager.py:303 #, python-format -msgid "Waited %s seconds for %s to die; giving up" +msgid "Waited %s seconds for %s to die; killing" msgstr "" -#: swift/common/manager.py:465 -#, python-format -msgid "Unable to locate config number %s for %s" -msgstr "" - -#: swift/common/manager.py:468 -#, python-format -msgid "Unable to locate config for %s" -msgstr "" - -#: swift/common/manager.py:471 -msgid "Found configs:" -msgstr "" - -#: swift/common/manager.py:518 -#, python-format -msgid "Removing pid file %s with invalid pid" -msgstr "" - -#: swift/common/manager.py:523 +#: swift/common/manager.py:307 swift/common/manager.py:559 #, python-format msgid "Signal %s pid: %s signal: %s" msgstr "" -#: swift/common/manager.py:528 +#: swift/common/manager.py:317 +#, python-format +msgid "Waited %s seconds for %s to die; giving up" +msgstr "" + +#: swift/common/manager.py:501 +#, python-format +msgid "Unable to locate config number %s for %s" +msgstr "" + +#: swift/common/manager.py:504 +#, python-format +msgid "Unable to locate config for %s" +msgstr "" + +#: swift/common/manager.py:507 +msgid "Found configs:" +msgstr "" + +#: swift/common/manager.py:554 +#, python-format +msgid "Removing pid file %s with invalid pid" +msgstr "" + +#: swift/common/manager.py:564 #, python-format msgid "Removing pid file %s with wrong pid %d" msgstr "" -#: swift/common/manager.py:535 +#: swift/common/manager.py:571 #, python-format msgid "Removing stale pid file %s" msgstr "" -#: swift/common/manager.py:538 +#: swift/common/manager.py:574 #, python-format msgid "No permission to signal PID %d" msgstr "" -#: swift/common/manager.py:583 +#: swift/common/manager.py:619 #, python-format msgid "%s #%d not running (%s)" msgstr "" -#: swift/common/manager.py:590 swift/common/manager.py:683 -#: swift/common/manager.py:687 +#: swift/common/manager.py:626 swift/common/manager.py:719 +#: swift/common/manager.py:723 #, python-format msgid "%s running (%s - %s)" msgstr "" -#: swift/common/manager.py:690 +#: swift/common/manager.py:726 #, python-format msgid "%s already started..." msgstr "" -#: swift/common/manager.py:699 +#: swift/common/manager.py:735 #, python-format msgid "Running %s once" msgstr "" -#: swift/common/manager.py:701 +#: swift/common/manager.py:737 #, python-format msgid "Starting %s" msgstr "" -#: swift/common/manager.py:708 +#: swift/common/manager.py:744 #, python-format msgid "%s does not exist" msgstr "" -#: swift/common/memcached.py:161 +#: swift/common/memcached.py:197 #, python-format msgid "Timeout %(action)s to memcached: %(server)s" msgstr "" -#: swift/common/memcached.py:164 +#: swift/common/memcached.py:200 #, python-format msgid "Error %(action)s to memcached: %(server)s" msgstr "" -#: swift/common/memcached.py:189 +#: swift/common/memcached.py:225 #, python-format msgid "Error limiting server %s" msgstr "" -#: swift/common/request_helpers.py:104 +#: swift/common/request_helpers.py:107 #, python-format msgid "No policy with index %s" msgstr "" -#: swift/common/request_helpers.py:435 +#: swift/common/request_helpers.py:454 msgid "ERROR: An error occurred while retrieving segments" msgstr "" -#: swift/common/utils.py:391 +#: swift/common/utils.py:392 #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "" -#: swift/common/utils.py:581 +#: swift/common/utils.py:583 msgid "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" -#: swift/common/utils.py:665 +#: swift/common/utils.py:667 #, python-format msgid "Unable to perform fsync() on directory %s: %s" msgstr "" -#: swift/common/utils.py:1077 +#: swift/common/utils.py:1103 #, python-format msgid "%s: Connection reset by peer" msgstr "" -#: swift/common/utils.py:1079 swift/common/utils.py:1082 +#: swift/common/utils.py:1105 swift/common/utils.py:1108 #, python-format msgid "%s: %s" msgstr "" -#: swift/common/utils.py:1317 +#: swift/common/utils.py:1320 msgid "Connection refused" msgstr "" -#: swift/common/utils.py:1319 +#: swift/common/utils.py:1322 msgid "Host unreachable" msgstr "" -#: swift/common/utils.py:1321 +#: swift/common/utils.py:1324 msgid "Connection timeout" msgstr "" -#: swift/common/utils.py:1624 +#: swift/common/utils.py:1602 msgid "UNCAUGHT EXCEPTION" msgstr "" -#: swift/common/utils.py:1679 +#: swift/common/utils.py:1657 msgid "Error: missing config path argument" msgstr "" -#: swift/common/utils.py:1684 +#: swift/common/utils.py:1662 #, python-format msgid "Error: unable to locate %s" msgstr "" -#: swift/common/utils.py:2008 +#: swift/common/utils.py:1986 #, python-format msgid "Unable to read config from %s" msgstr "" -#: swift/common/utils.py:2014 +#: swift/common/utils.py:1992 #, python-format msgid "Unable to find %s config section in %s" msgstr "" -#: swift/common/utils.py:2376 +#: swift/common/utils.py:2357 #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "" -#: swift/common/utils.py:2381 +#: swift/common/utils.py:2362 #, python-format msgid "No realm key for %r" msgstr "" -#: swift/common/utils.py:2385 +#: swift/common/utils.py:2366 #, python-format msgid "No cluster endpoint for %r %r" msgstr "" -#: swift/common/utils.py:2394 +#: swift/common/utils.py:2375 #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" -#: swift/common/utils.py:2398 +#: swift/common/utils.py:2379 msgid "Path required in X-Container-Sync-To" msgstr "" -#: swift/common/utils.py:2401 +#: swift/common/utils.py:2382 msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "" -#: swift/common/utils.py:2406 +#: swift/common/utils.py:2387 #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "" -#: swift/common/utils.py:2598 +#: swift/common/utils.py:2579 msgid "Exception dumping recon cache" msgstr "" @@ -547,27 +552,27 @@ msgstr "" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "" -#: swift/common/middleware/recon.py:84 +#: swift/common/middleware/recon.py:85 msgid "Error reading recon cache file" msgstr "" -#: swift/common/middleware/recon.py:86 +#: swift/common/middleware/recon.py:87 msgid "Error parsing recon cache file" msgstr "" -#: swift/common/middleware/recon.py:88 +#: swift/common/middleware/recon.py:89 msgid "Error retrieving recon data" msgstr "" -#: swift/common/middleware/recon.py:162 +#: swift/common/middleware/recon.py:163 msgid "Error listing devices" msgstr "" -#: swift/common/middleware/recon.py:258 +#: swift/common/middleware/recon.py:259 msgid "Error reading ringfile" msgstr "" -#: swift/common/middleware/recon.py:272 +#: swift/common/middleware/recon.py:273 msgid "Error reading swift.conf" msgstr "" @@ -653,21 +658,21 @@ msgstr "" msgid "ERROR Could not get container info %s" msgstr "" -#: swift/container/server.py:180 +#: swift/container/server.py:181 #, python-format msgid "" "ERROR Account update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "" -#: swift/container/server.py:225 +#: swift/container/server.py:226 #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" -#: swift/container/server.py:234 +#: swift/container/server.py:235 #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " @@ -738,8 +743,8 @@ msgstr "" msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "" -#: swift/container/updater.py:92 swift/obj/reconstructor.py:817 -#: swift/obj/replicator.py:584 swift/obj/replicator.py:696 +#: swift/container/updater.py:92 swift/obj/reconstructor.py:822 +#: swift/obj/replicator.py:590 swift/obj/replicator.py:706 #, python-format msgid "%s is not mounted" msgstr "" @@ -793,22 +798,22 @@ msgid "" "later): " msgstr "" -#: swift/obj/auditor.py:78 +#: swift/obj/auditor.py:80 #, python-format msgid " - parallel, %s" msgstr "" -#: swift/obj/auditor.py:80 +#: swift/obj/auditor.py:82 #, python-format msgid " - %s" msgstr "" -#: swift/obj/auditor.py:81 +#: swift/obj/auditor.py:83 #, python-format msgid "Begin object audit \"%s\" mode (%s%s)" msgstr "" -#: swift/obj/auditor.py:100 +#: swift/obj/auditor.py:110 #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d " @@ -817,7 +822,7 @@ msgid "" "%(audit).2f, Rate: %(audit_rate).2f" msgstr "" -#: swift/obj/auditor.py:134 +#: swift/obj/auditor.py:144 #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. " @@ -826,27 +831,27 @@ msgid "" "Rate: %(audit_rate).2f" msgstr "" -#: swift/obj/auditor.py:149 +#: swift/obj/auditor.py:159 #, python-format msgid "Object audit stats: %s" msgstr "" -#: swift/obj/auditor.py:177 +#: swift/obj/auditor.py:187 #, python-format msgid "ERROR Trying to audit %s" msgstr "" -#: swift/obj/auditor.py:213 +#: swift/obj/auditor.py:224 #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "" -#: swift/obj/auditor.py:263 +#: swift/obj/auditor.py:275 #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "" -#: swift/obj/auditor.py:334 swift/obj/auditor.py:355 +#: swift/obj/auditor.py:346 swift/obj/auditor.py:367 #, python-format msgid "ERROR auditing: %s" msgstr "" @@ -856,97 +861,97 @@ msgstr "" msgid "Directory %r does not map to a valid policy (%s)" msgstr "" -#: swift/obj/diskfile.py:620 +#: swift/obj/diskfile.py:700 #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" -#: swift/obj/diskfile.py:703 +#: swift/obj/diskfile.py:783 msgid "Error hashing suffix" msgstr "" -#: swift/obj/diskfile.py:824 +#: swift/obj/diskfile.py:904 #, python-format msgid "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" -#: swift/obj/diskfile.py:1038 +#: swift/obj/diskfile.py:1134 #, python-format msgid "Problem cleaning up %s" msgstr "" -#: swift/obj/diskfile.py:1345 +#: swift/obj/diskfile.py:1441 #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "" -#: swift/obj/diskfile.py:1626 +#: swift/obj/diskfile.py:1751 #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata " "%(meta)s" msgstr "" -#: swift/obj/diskfile.py:2023 +#: swift/obj/diskfile.py:2140 #, python-format msgid "No space left on device for %s (%s)" msgstr "" -#: swift/obj/diskfile.py:2032 +#: swift/obj/diskfile.py:2149 #, python-format msgid "Problem cleaning up %s (%s)" msgstr "" -#: swift/obj/diskfile.py:2035 +#: swift/obj/diskfile.py:2152 #, python-format msgid "Problem writing durable state file %s (%s)" msgstr "" -#: swift/obj/expirer.py:79 +#: swift/obj/expirer.py:80 #, python-format msgid "Pass completed in %ds; %d objects expired" msgstr "" -#: swift/obj/expirer.py:86 +#: swift/obj/expirer.py:87 #, python-format msgid "Pass so far %ds; %d objects expired" msgstr "" -#: swift/obj/expirer.py:170 +#: swift/obj/expirer.py:171 #, python-format msgid "Pass beginning; %s possible containers; %s possible objects" msgstr "" -#: swift/obj/expirer.py:196 +#: swift/obj/expirer.py:197 #, python-format msgid "Exception while deleting container %s %s" msgstr "" -#: swift/obj/expirer.py:201 swift/obj/expirer.py:218 +#: swift/obj/expirer.py:202 swift/obj/expirer.py:219 msgid "Unhandled exception" msgstr "" -#: swift/obj/expirer.py:268 +#: swift/obj/expirer.py:269 #, python-format msgid "Exception while deleting object %s %s %s" msgstr "" -#: swift/obj/reconstructor.py:209 swift/obj/reconstructor.py:494 +#: swift/obj/reconstructor.py:213 swift/obj/reconstructor.py:499 #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "" -#: swift/obj/reconstructor.py:217 +#: swift/obj/reconstructor.py:221 #, python-format msgid "Trying to GET %(full_path)s" msgstr "" -#: swift/obj/reconstructor.py:323 +#: swift/obj/reconstructor.py:328 #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "" -#: swift/obj/reconstructor.py:350 +#: swift/obj/reconstructor.py:355 #, python-format msgid "" "%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of " @@ -954,153 +959,153 @@ msgid "" "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" -#: swift/obj/reconstructor.py:371 swift/obj/replicator.py:508 +#: swift/obj/reconstructor.py:376 swift/obj/replicator.py:511 #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% " "synced" msgstr "" -#: swift/obj/reconstructor.py:378 swift/obj/replicator.py:515 +#: swift/obj/reconstructor.py:383 swift/obj/replicator.py:518 #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "" -#: swift/obj/reconstructor.py:386 +#: swift/obj/reconstructor.py:391 #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "" -#: swift/obj/reconstructor.py:415 swift/obj/replicator.py:552 +#: swift/obj/reconstructor.py:420 swift/obj/replicator.py:555 msgid "Lockup detected.. killing live coros." msgstr "" -#: swift/obj/reconstructor.py:462 +#: swift/obj/reconstructor.py:467 #, python-format msgid "Trying to sync suffixes with %s" msgstr "" -#: swift/obj/reconstructor.py:487 +#: swift/obj/reconstructor.py:492 #, python-format msgid "%s responded as unmounted" msgstr "" -#: swift/obj/reconstructor.py:888 swift/obj/replicator.py:361 +#: swift/obj/reconstructor.py:893 swift/obj/replicator.py:364 #, python-format msgid "Removing partition: %s" msgstr "" -#: swift/obj/reconstructor.py:904 +#: swift/obj/reconstructor.py:909 msgid "Ring change detected. Aborting current reconstruction pass." msgstr "" -#: swift/obj/reconstructor.py:923 +#: swift/obj/reconstructor.py:928 msgid "Exception in top-levelreconstruction loop" msgstr "" -#: swift/obj/reconstructor.py:933 +#: swift/obj/reconstructor.py:938 msgid "Running object reconstructor in script mode." msgstr "" -#: swift/obj/reconstructor.py:942 +#: swift/obj/reconstructor.py:947 #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "" -#: swift/obj/reconstructor.py:949 +#: swift/obj/reconstructor.py:954 msgid "Starting object reconstructor in daemon mode." msgstr "" -#: swift/obj/reconstructor.py:953 +#: swift/obj/reconstructor.py:958 msgid "Starting object reconstruction pass." msgstr "" -#: swift/obj/reconstructor.py:958 +#: swift/obj/reconstructor.py:963 #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "" -#: swift/obj/replicator.py:181 +#: swift/obj/replicator.py:183 #, python-format msgid "Killing long-running rsync: %s" msgstr "" -#: swift/obj/replicator.py:195 +#: swift/obj/replicator.py:197 #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "" -#: swift/obj/replicator.py:202 swift/obj/replicator.py:206 +#: swift/obj/replicator.py:204 swift/obj/replicator.py:208 #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "" -#: swift/obj/replicator.py:331 +#: swift/obj/replicator.py:334 #, python-format msgid "Removing %s objects" msgstr "" -#: swift/obj/replicator.py:350 +#: swift/obj/replicator.py:353 msgid "Error syncing handoff partition" msgstr "" -#: swift/obj/replicator.py:426 +#: swift/obj/replicator.py:429 #, python-format msgid "%(ip)s/%(device)s responded as unmounted" msgstr "" -#: swift/obj/replicator.py:433 +#: swift/obj/replicator.py:436 #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "" -#: swift/obj/replicator.py:477 +#: swift/obj/replicator.py:480 #, python-format msgid "Error syncing with node: %s" msgstr "" -#: swift/obj/replicator.py:482 +#: swift/obj/replicator.py:485 msgid "Error syncing partition" msgstr "" -#: swift/obj/replicator.py:497 +#: swift/obj/replicator.py:500 #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" -#: swift/obj/replicator.py:523 +#: swift/obj/replicator.py:526 #, python-format msgid "Nothing replicated for %s seconds." msgstr "" -#: swift/obj/replicator.py:699 +#: swift/obj/replicator.py:709 msgid "Ring change detected. Aborting current replication pass." msgstr "" -#: swift/obj/replicator.py:727 +#: swift/obj/replicator.py:737 msgid "Exception in top-level replication loop" msgstr "" -#: swift/obj/replicator.py:737 +#: swift/obj/replicator.py:747 msgid "Running object replicator in script mode." msgstr "" -#: swift/obj/replicator.py:755 +#: swift/obj/replicator.py:765 #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "" -#: swift/obj/replicator.py:766 +#: swift/obj/replicator.py:776 msgid "Starting object replicator in daemon mode." msgstr "" -#: swift/obj/replicator.py:770 +#: swift/obj/replicator.py:780 msgid "Starting object replication pass." msgstr "" -#: swift/obj/replicator.py:775 +#: swift/obj/replicator.py:785 #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "" @@ -1158,96 +1163,96 @@ msgid "" "%(success)s successes, %(fail)s failures" msgstr "" -#: swift/obj/updater.py:179 +#: swift/obj/updater.py:180 #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "" -#: swift/obj/updater.py:209 +#: swift/obj/updater.py:210 #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "" -#: swift/obj/updater.py:274 +#: swift/obj/updater.py:275 #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "" -#: swift/proxy/server.py:414 +#: swift/proxy/server.py:416 msgid "ERROR Unhandled exception in request" msgstr "" -#: swift/proxy/server.py:469 +#: swift/proxy/server.py:471 #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "" -#: swift/proxy/server.py:486 swift/proxy/server.py:504 +#: swift/proxy/server.py:488 swift/proxy/server.py:506 #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "" -#: swift/proxy/server.py:527 +#: swift/proxy/server.py:529 #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "" -#: swift/proxy/controllers/account.py:64 +#: swift/proxy/controllers/account.py:65 msgid "Account" msgstr "" -#: swift/proxy/controllers/base.py:803 swift/proxy/controllers/base.py:842 -#: swift/proxy/controllers/base.py:935 swift/proxy/controllers/obj.py:334 -#: swift/proxy/controllers/obj.py:866 swift/proxy/controllers/obj.py:913 -#: swift/proxy/controllers/obj.py:927 swift/proxy/controllers/obj.py:1742 -#: swift/proxy/controllers/obj.py:1979 swift/proxy/controllers/obj.py:2104 -#: swift/proxy/controllers/obj.py:2298 +#: swift/proxy/controllers/base.py:809 swift/proxy/controllers/base.py:848 +#: swift/proxy/controllers/base.py:940 swift/proxy/controllers/obj.py:338 +#: swift/proxy/controllers/obj.py:875 swift/proxy/controllers/obj.py:924 +#: swift/proxy/controllers/obj.py:938 swift/proxy/controllers/obj.py:1759 +#: swift/proxy/controllers/obj.py:1996 swift/proxy/controllers/obj.py:2121 +#: swift/proxy/controllers/obj.py:2353 msgid "Object" msgstr "" -#: swift/proxy/controllers/base.py:804 swift/proxy/controllers/base.py:843 +#: swift/proxy/controllers/base.py:810 swift/proxy/controllers/base.py:849 msgid "Trying to read during GET (retrying)" msgstr "" -#: swift/proxy/controllers/base.py:936 +#: swift/proxy/controllers/base.py:941 msgid "Trying to read during GET" msgstr "" -#: swift/proxy/controllers/base.py:940 +#: swift/proxy/controllers/base.py:945 #, python-format msgid "Client did not read from proxy within %ss" msgstr "" -#: swift/proxy/controllers/base.py:945 +#: swift/proxy/controllers/base.py:950 msgid "Client disconnected on read" msgstr "" -#: swift/proxy/controllers/base.py:947 +#: swift/proxy/controllers/base.py:952 msgid "Trying to send to client" msgstr "" -#: swift/proxy/controllers/base.py:998 swift/proxy/controllers/base.py:1410 +#: swift/proxy/controllers/base.py:1003 swift/proxy/controllers/base.py:1415 #, python-format msgid "Trying to %(method)s %(path)s" msgstr "" -#: swift/proxy/controllers/base.py:1037 swift/proxy/controllers/base.py:1398 -#: swift/proxy/controllers/obj.py:357 swift/proxy/controllers/obj.py:904 -#: swift/proxy/controllers/obj.py:2096 swift/proxy/controllers/obj.py:2343 +#: swift/proxy/controllers/base.py:1042 swift/proxy/controllers/base.py:1403 +#: swift/proxy/controllers/obj.py:361 swift/proxy/controllers/obj.py:915 +#: swift/proxy/controllers/obj.py:2113 swift/proxy/controllers/obj.py:2398 msgid "ERROR Insufficient Storage" msgstr "" -#: swift/proxy/controllers/base.py:1040 +#: swift/proxy/controllers/base.py:1045 #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "" -#: swift/proxy/controllers/base.py:1401 +#: swift/proxy/controllers/base.py:1406 #, python-format msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" msgstr "" -#: swift/proxy/controllers/base.py:1531 +#: swift/proxy/controllers/base.py:1536 #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "" @@ -1256,73 +1261,79 @@ msgstr "" msgid "Container" msgstr "" -#: swift/proxy/controllers/obj.py:335 +#: swift/proxy/controllers/obj.py:339 #, python-format msgid "Trying to get final status of PUT to %s" msgstr "" -#: swift/proxy/controllers/obj.py:361 swift/proxy/controllers/obj.py:2348 +#: swift/proxy/controllers/obj.py:365 swift/proxy/controllers/obj.py:2403 #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "" -#: swift/proxy/controllers/obj.py:562 +#: swift/proxy/controllers/obj.py:571 #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "" -#: swift/proxy/controllers/obj.py:575 +#: swift/proxy/controllers/obj.py:584 #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" -#: swift/proxy/controllers/obj.py:908 swift/proxy/controllers/obj.py:2099 +#: swift/proxy/controllers/obj.py:919 swift/proxy/controllers/obj.py:2116 #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "" -#: swift/proxy/controllers/obj.py:914 swift/proxy/controllers/obj.py:2105 +#: swift/proxy/controllers/obj.py:925 swift/proxy/controllers/obj.py:2122 #, python-format msgid "Expect: 100-continue on %s" msgstr "" -#: swift/proxy/controllers/obj.py:928 swift/proxy/controllers/obj.py:1743 +#: swift/proxy/controllers/obj.py:939 swift/proxy/controllers/obj.py:1760 #, python-format msgid "Trying to write to %s" msgstr "" -#: swift/proxy/controllers/obj.py:979 swift/proxy/controllers/obj.py:2253 +#: swift/proxy/controllers/obj.py:990 swift/proxy/controllers/obj.py:2287 #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "" -#: swift/proxy/controllers/obj.py:987 swift/proxy/controllers/obj.py:2259 +#: swift/proxy/controllers/obj.py:998 swift/proxy/controllers/obj.py:2293 msgid "Client disconnected without sending last chunk" msgstr "" -#: swift/proxy/controllers/obj.py:992 swift/proxy/controllers/obj.py:2266 +#: swift/proxy/controllers/obj.py:1003 swift/proxy/controllers/obj.py:2300 msgid "ERROR Exception causing client disconnect" msgstr "" -#: swift/proxy/controllers/obj.py:997 swift/proxy/controllers/obj.py:2201 +#: swift/proxy/controllers/obj.py:1007 swift/proxy/controllers/obj.py:2304 +#, python-format +msgid "ERROR Exception transferring data to object servers %s" +msgstr "" + +#: swift/proxy/controllers/obj.py:1013 swift/proxy/controllers/obj.py:2218 msgid "Client disconnected without sending enough data" msgstr "" -#: swift/proxy/controllers/obj.py:1043 +#: swift/proxy/controllers/obj.py:1059 #, python-format msgid "Object servers returned %s mismatched etags" msgstr "" -#: swift/proxy/controllers/obj.py:1047 swift/proxy/controllers/obj.py:2427 +#: swift/proxy/controllers/obj.py:1063 swift/proxy/controllers/obj.py:2264 +#: swift/proxy/controllers/obj.py:2487 msgid "Object PUT" msgstr "" -#: swift/proxy/controllers/obj.py:2240 +#: swift/proxy/controllers/obj.py:2257 #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "" -#: swift/proxy/controllers/obj.py:2299 +#: swift/proxy/controllers/obj.py:2354 #, python-format msgid "Trying to get %s status of PUT to %s" msgstr "" diff --git a/swift/locale/tr_TR/LC_MESSAGES/swift.po b/swift/locale/tr_TR/LC_MESSAGES/swift.po index ddf387fb24..24839836f9 100644 --- a/swift/locale/tr_TR/LC_MESSAGES/swift.po +++ b/swift/locale/tr_TR/LC_MESSAGES/swift.po @@ -1,4 +1,4 @@ -# Turkish (Turkey) translations for swift. +# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # @@ -7,18 +7,19 @@ # OpenStack Infra , 2015. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.5.1.dev70\n" +"Project-Id-Version: swift 2.5.1.dev267\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-10-23 06:34+0000\n" +"POT-Creation-Date: 2016-01-16 12:32+0000\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-09-04 07:42+0000\n" "Last-Translator: İşbaran Akçayır \n" -"Language: tr_TR\n" +"Language: tr-TR\n" +"Plural-Forms: nplurals=1; plural=0;\n" +"Generated-By: Babel 2.0\n" +"X-Generator: Zanata 3.7.3\n" "Language-Team: Turkish (Turkey)\n" -"Plural-Forms: nplurals=1; plural=0\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.1.1\n" msgid "" "\n" diff --git a/swift/locale/zh_CN/LC_MESSAGES/swift.po b/swift/locale/zh_CN/LC_MESSAGES/swift.po index 33ce087a44..37444475e9 100644 --- a/swift/locale/zh_CN/LC_MESSAGES/swift.po +++ b/swift/locale/zh_CN/LC_MESSAGES/swift.po @@ -1,4 +1,4 @@ -# Chinese (Simplified, China) translations for swift. +# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # @@ -8,18 +8,19 @@ # OpenStack Infra , 2015. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.5.1.dev70\n" +"Project-Id-Version: swift 2.5.1.dev267\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-10-23 06:34+0000\n" +"POT-Creation-Date: 2016-01-16 12:32+0000\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-08-11 11:22+0000\n" "Last-Translator: openstackjenkins \n" -"Language: zh_Hans_CN\n" +"Language: zh-CN\n" +"Plural-Forms: nplurals=1; plural=0;\n" +"Generated-By: Babel 2.0\n" +"X-Generator: Zanata 3.7.3\n" "Language-Team: Chinese (China)\n" -"Plural-Forms: nplurals=1; plural=0\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.1.1\n" msgid "" "\n" diff --git a/swift/locale/zh_TW/LC_MESSAGES/swift.po b/swift/locale/zh_TW/LC_MESSAGES/swift.po index f000a918f1..06f319c572 100644 --- a/swift/locale/zh_TW/LC_MESSAGES/swift.po +++ b/swift/locale/zh_TW/LC_MESSAGES/swift.po @@ -1,4 +1,4 @@ -# Chinese (Traditional, Taiwan) translations for swift. +# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # @@ -7,18 +7,19 @@ # OpenStack Infra , 2015. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.5.1.dev70\n" +"Project-Id-Version: swift 2.5.1.dev267\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-10-23 06:34+0000\n" +"POT-Creation-Date: 2016-01-16 12:32+0000\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-08-11 11:22+0000\n" "Last-Translator: openstackjenkins \n" -"Language: zh_Hant_TW\n" +"Language: zh-TW\n" +"Plural-Forms: nplurals=1; plural=0;\n" +"Generated-By: Babel 2.0\n" +"X-Generator: Zanata 3.7.3\n" "Language-Team: Chinese (Taiwan)\n" -"Plural-Forms: nplurals=1; plural=0\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.1.1\n" msgid "" "\n" From bf10974cdefffdaaebc58d21e8a9912638a0405a Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Wed, 16 Dec 2015 15:46:13 -0800 Subject: [PATCH 44/52] Expose token expiration time in tempauth auth response Previously, we gave no indication of when a token would expire. Users would have to just use it until it stopped working, then re-auth. Now, a successful auth response will include a new header, X-Auth-Token-Expires, with the number of seconds remaining until the token is invalid. This allows the client to attempt to re-auth before sending a request that will definitely fail. For comparison, swauth already uses the X-Auth-Token-Expires header with identical semantics. Additionally, Keystone (v2 and v3) already exposes expiration times in its JSON responses. The security impact should be minimal. Change-Id: I5a4a74276bc0df6dda94e4bc150065c0d77de0eb --- swift/common/middleware/tempauth.py | 8 ++++++-- test/unit/common/middleware/test_tempauth.py | 17 +++++++++++++++-- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/swift/common/middleware/tempauth.py b/swift/common/middleware/tempauth.py index 9eec784a6e..a28e450a6f 100644 --- a/swift/common/middleware/tempauth.py +++ b/swift/common/middleware/tempauth.py @@ -38,6 +38,9 @@ from swift.common.utils import config_read_reseller_options from swift.proxy.controllers.base import get_account_info +DEFAULT_TOKEN_LIFE = 86400 + + class TempAuth(object): """ Test authentication and authorization system. @@ -181,7 +184,7 @@ class TempAuth(object): self.auth_prefix = '/' + self.auth_prefix if not self.auth_prefix.endswith('/'): self.auth_prefix += '/' - self.token_life = int(conf.get('token_life', 86400)) + self.token_life = int(conf.get('token_life', DEFAULT_TOKEN_LIFE)) self.allow_overrides = config_true_value( conf.get('allow_overrides', 't')) self.storage_url_scheme = conf.get('storage_url_scheme', 'default') @@ -765,7 +768,8 @@ class TempAuth(object): memcache_client.set(memcache_user_key, token, time=float(expires - time())) resp = Response(request=req, headers={ - 'x-auth-token': token, 'x-storage-token': token}) + 'x-auth-token': token, 'x-storage-token': token, + 'x-auth-token-expires': str(int(expires - time()))}) url = self.users[account_user]['url'].replace('$HOST', resp.host_url) if self.storage_url_scheme != 'default': url = self.storage_url_scheme + ':' + url.split(':', 1)[1] diff --git a/test/unit/common/middleware/test_tempauth.py b/test/unit/common/middleware/test_tempauth.py index cea15b6595..58bcc11c87 100644 --- a/test/unit/common/middleware/test_tempauth.py +++ b/test/unit/common/middleware/test_tempauth.py @@ -515,7 +515,11 @@ class TestAuth(unittest.TestCase): self.assertEqual(resp.status_int, 200) self.assertTrue(resp.headers['x-storage-url'].endswith('/v1/AUTH_ac')) self.assertTrue(resp.headers['x-auth-token'].startswith('AUTH_')) - self.assertTrue(len(resp.headers['x-auth-token']) > 10) + self.assertEqual(resp.headers['x-auth-token'], + resp.headers['x-storage-token']) + self.assertAlmostEqual(int(resp.headers['x-auth-token-expires']), + auth.DEFAULT_TOKEN_LIFE - 0.5, delta=0.5) + self.assertGreater(len(resp.headers['x-auth-token']), 10) def test_use_token_success(self): # Example of how to simulate an authorized request @@ -641,11 +645,16 @@ class TestAuth(unittest.TestCase): req.environ['SERVER_NAME'] = 'bob' req.environ['SERVER_PORT'] = '1234' req.environ['swift.cache'].set('AUTH_/user/test:tester', 'uuid_token') + expires = time() + 180 req.environ['swift.cache'].set('AUTH_/token/uuid_token', - (time() + 180, 'test,test:tester')) + (expires, 'test,test:tester')) resp = req.get_response(self.test_auth) self.assertEqual(resp.status_int, 200) self.assertEqual(resp.headers['x-auth-token'], 'uuid_token') + self.assertEqual(resp.headers['x-auth-token'], + resp.headers['x-storage-token']) + self.assertAlmostEqual(int(resp.headers['x-auth-token-expires']), + 179.5, delta=0.5) def test_old_token_overdate(self): self.test_auth = \ @@ -664,6 +673,8 @@ class TestAuth(unittest.TestCase): self.assertEqual(resp.status_int, 200) self.assertNotEqual(resp.headers['x-auth-token'], 'uuid_token') self.assertEqual(resp.headers['x-auth-token'][:7], 'AUTH_tk') + self.assertAlmostEqual(int(resp.headers['x-auth-token-expires']), + auth.DEFAULT_TOKEN_LIFE - 0.5, delta=0.5) def test_old_token_with_old_data(self): self.test_auth = \ @@ -682,6 +693,8 @@ class TestAuth(unittest.TestCase): self.assertEqual(resp.status_int, 200) self.assertNotEqual(resp.headers['x-auth-token'], 'uuid_token') self.assertEqual(resp.headers['x-auth-token'][:7], 'AUTH_tk') + self.assertAlmostEqual(int(resp.headers['x-auth-token-expires']), + auth.DEFAULT_TOKEN_LIFE - 0.5, delta=0.5) def test_reseller_admin_is_owner(self): orig_authorize = self.test_auth.authorize From 133a3ea601a3fea84af36a42845f27b8182fd901 Mon Sep 17 00:00:00 2001 From: Christopher Bartz Date: Mon, 21 Dec 2015 14:17:00 +0100 Subject: [PATCH 45/52] Use the correct split_path in handle_request Change-Id: I86d423309f0b2091ee2e82b2245caf925b6a75ef Closes-Bug: #1528189 --- swift/common/middleware/tempauth.py | 3 ++- test/unit/common/middleware/test_tempauth.py | 12 ++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/swift/common/middleware/tempauth.py b/swift/common/middleware/tempauth.py index 9eec784a6e..21a451e103 100644 --- a/swift/common/middleware/tempauth.py +++ b/swift/common/middleware/tempauth.py @@ -631,7 +631,8 @@ class TempAuth(object): req.start_time = time() handler = None try: - version, account, user, _junk = req.split_path(1, 4, True) + version, account, user, _junk = split_path(req.path_info, + 1, 4, True) except ValueError: self.logger.increment('errors') return HTTPNotFound(request=req) diff --git a/test/unit/common/middleware/test_tempauth.py b/test/unit/common/middleware/test_tempauth.py index cea15b6595..292ccecbf5 100644 --- a/test/unit/common/middleware/test_tempauth.py +++ b/test/unit/common/middleware/test_tempauth.py @@ -517,6 +517,18 @@ class TestAuth(unittest.TestCase): self.assertTrue(resp.headers['x-auth-token'].startswith('AUTH_')) self.assertTrue(len(resp.headers['x-auth-token']) > 10) + def test_get_token_success_other_auth_prefix(self): + test_auth = auth.filter_factory({'user_ac_user': 'testing', + 'auth_prefix': '/other/'})(FakeApp()) + req = self._make_request( + '/other/v1.0', + headers={'X-Auth-User': 'ac:user', 'X-Auth-Key': 'testing'}) + resp = req.get_response(test_auth) + self.assertEqual(resp.status_int, 200) + self.assertTrue(resp.headers['x-storage-url'].endswith('/v1/AUTH_ac')) + self.assertTrue(resp.headers['x-auth-token'].startswith('AUTH_')) + self.assertTrue(len(resp.headers['x-auth-token']) > 10) + def test_use_token_success(self): # Example of how to simulate an authorized request test_auth = auth.filter_factory({'user_acct_user': 'testing'})( From 999479f9b17b42ccc5da54ce01651960cf7cf970 Mon Sep 17 00:00:00 2001 From: John Dickinson Date: Tue, 19 Jan 2016 10:30:30 -0800 Subject: [PATCH 46/52] Bump eventlet min version to 0.17.4 IPv6 support in Swift is dependent on IPv6 support in eventlet. eventlet itself only claims support for IPv6 post v0.17 (https://github.com/eventlet/eventlet/issues/8). This update matches the OpenStack global requirements version. Change-Id: I9d8433cdd3bf7d7a93b8f50b991cc21721a80d22 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 13b94d9cb9..17e18b4b12 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ dnspython>=1.12.0;python_version<'3.0' dnspython3>=1.12.0;python_version>='3.0' -eventlet>=0.16.1,!=0.17.0 +eventlet>=0.17.4 # MIT greenlet>=0.3.1 netifaces>=0.5,!=0.10.0,!=0.10.1 pastedeploy>=1.3.3 From 3a0486e532f22af0d3c8a5c5d78613c22e786ff6 Mon Sep 17 00:00:00 2001 From: Sivasathurappan Radhakrishnan Date: Fri, 4 Dec 2015 17:43:00 +0000 Subject: [PATCH 47/52] Deleted comment about part power in FakeRing Deleted comment about parameter part power in Class FakeRing as its behavior got dropped in I8bfc388a04eff6491038991cdfd7686c9d961545. Change-Id: Iec7d2565a77e48493b0056021066d8d8eab65d0b Closes-Bug: #1488704 --- test/unit/__init__.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/test/unit/__init__.py b/test/unit/__init__.py index ec6a2a0985..6834eebfc5 100644 --- a/test/unit/__init__.py +++ b/test/unit/__init__.py @@ -203,13 +203,6 @@ class FakeRing(Ring): def __init__(self, replicas=3, max_more_nodes=0, part_power=0, base_port=1000): - """ - :param part_power: make part calculation based on the path - - If you set a part_power when you setup your FakeRing the parts you get - out of ring methods will actually be based on the path - otherwise we - exercise the real ring code, but ignore the result and return 1. - """ self._base_port = base_port self.max_more_nodes = max_more_nodes self._part_shift = 32 - part_power From bc4b298b6e208d3188641712c9d66ae82d172c14 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Tue, 19 Jan 2016 15:33:13 -0800 Subject: [PATCH 48/52] Fix a comment's indentation Change-Id: I34514525b606cf82767ddce7769bc42fa5457717 --- test/unit/common/ring/test_builder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/unit/common/ring/test_builder.py b/test/unit/common/ring/test_builder.py index 57f0ee8649..f62902108f 100644 --- a/test/unit/common/ring/test_builder.py +++ b/test/unit/common/ring/test_builder.py @@ -2374,7 +2374,7 @@ class TestRingBuilder(unittest.TestCase): rb = self._add_dev_delete_first_n(add_dev_count, add_dev_count - 3) # add specifying id exp_new_dev_id = 2 -# [dev, dev, None, dev, dev, None] + # [dev, dev, None, dev, dev, None] try: new_dev_id = rb.add_dev({'id': exp_new_dev_id, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', From 58359269b0e971e52f0eb7f97221566ca2148014 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Tue, 8 Dec 2015 16:36:05 -0800 Subject: [PATCH 49/52] Fix memory/socket leak in proxy on truncated SLO/DLO GET When a client disconnected while consuming an SLO or DLO GET response, the proxy would leak a socket. This could be observed via strace as a socket that had shutdown() called on it, but was never closed. It could also be observed by counting entries in /proc//fd, where is the pid of a proxy server worker process. This is due to a memory leak in SegmentedIterable. A SegmentedIterable has an 'app_iter' attribute, which is a generator. That generator references 'self' (the SegmentedIterable object). This creates a cyclic reference: the generator refers to the SegmentedIterable, and the SegmentedIterable refers to the generator. Python can normally handle cyclic garbage; reference counting won't reclaim it, but the garbage collector will. However, objects with finalizers will stop the garbage collector from collecting them* and the cycle of which they are part. For most objects, "has finalizer" is synonymous with "has a __del__ method". However, a generator has a finalizer once it's started running and before it finishes: basically, while it has stack frames associated with it**. When a client disconnects mid-stream, we get a memory leak. We have our SegmentedIterable object (call it "si"), and its associated generator. si.app_iter is the generator, and the generator closes over si, so we have a cycle; and the generator has started but not yet finished, so the generator needs finalization; hence, the garbage collector won't ever clean it up. The socket leak comes in because the generator *also* refers to the request's WSGI environment, which contains wsgi.input, which ultimately refers to a _socket object from the standard library. Python's _socket objects only close their underlying file descriptor when their reference counts fall to 0***. This commit makes SegmentedIterable.close() call self.app_iter.close(), thereby unwinding its generator's stack and making it eligible for garbage collection. * in Python < 3.4, at least. See PEP 442. ** see PyGen_NeedsFinalizing() in Objects/genobject.c and also has_finalizer() in Modules/gcmodule.c in Python. *** see sock_dealloc() in Modules/socketmodule.c in Python. See sock_close() in the same file for the other half of the sad story. This closes CVE-2016-0738. Closes-Bug: 1493303 Co-Authored-By: Kota Tsuyuzaki Change-Id: Ib86c4c45641485ce1034212bf6f53bb84f02f612 --- swift/common/request_helpers.py | 6 ++- test/unit/common/middleware/test_slo.py | 64 ++++++++++++++++++++++++- 2 files changed, 67 insertions(+), 3 deletions(-) diff --git a/swift/common/request_helpers.py b/swift/common/request_helpers.py index f12f876534..d42356b58b 100644 --- a/swift/common/request_helpers.py +++ b/swift/common/request_helpers.py @@ -454,6 +454,9 @@ class SegmentedIterable(object): self.logger.exception(_('ERROR: An error occurred ' 'while retrieving segments')) raise + finally: + if self.current_resp: + close_if_possible(self.current_resp.app_iter) def app_iter_range(self, *a, **kw): """ @@ -496,5 +499,4 @@ class SegmentedIterable(object): Called when the client disconnect. Ensure that the connection to the backend server is closed. """ - if self.current_resp: - close_if_possible(self.current_resp.app_iter) + close_if_possible(self.app_iter) diff --git a/test/unit/common/middleware/test_slo.py b/test/unit/common/middleware/test_slo.py index 32d49547d4..f1bafe11cf 100644 --- a/test/unit/common/middleware/test_slo.py +++ b/test/unit/common/middleware/test_slo.py @@ -26,7 +26,7 @@ from swift.common import swob, utils from swift.common.exceptions import ListingIterError, SegmentError from swift.common.middleware import slo from swift.common.swob import Request, Response, HTTPException -from swift.common.utils import quote, closing_if_possible +from swift.common.utils import quote, closing_if_possible, close_if_possible from test.unit.common.middleware.helpers import FakeSwift @@ -1944,6 +1944,68 @@ class TestSloGetManifest(SloTestCase): self.assertEqual(headers['X-Object-Meta-Fish'], 'Bass') self.assertEqual(body, '') + def test_generator_closure(self): + # Test that the SLO WSGI iterable closes its internal .app_iter when + # it receives a close() message. + # + # This is sufficient to fix a memory leak. The memory leak arises + # due to cyclic references involving a running generator; a running + # generator sometimes preventes the GC from collecting it in the + # same way that an object with a defined __del__ does. + # + # There are other ways to break the cycle and fix the memory leak as + # well; calling .close() on the generator is sufficient, but not + # necessary. However, having this test is better than nothing for + # preventing regressions. + leaks = [0] + + class LeakTracker(object): + def __init__(self, inner_iter): + leaks[0] += 1 + self.inner_iter = iter(inner_iter) + + def __iter__(self): + return self + + def next(self): + return next(self.inner_iter) + + def close(self): + leaks[0] -= 1 + close_if_possible(self.inner_iter) + + class LeakTrackingSegmentedIterable(slo.SegmentedIterable): + def _internal_iter(self, *a, **kw): + it = super( + LeakTrackingSegmentedIterable, self)._internal_iter( + *a, **kw) + return LeakTracker(it) + + status = [None] + headers = [None] + + def start_response(s, h, ei=None): + status[0] = s + headers[0] = h + + req = Request.blank( + '/v1/AUTH_test/gettest/manifest-abcd', + environ={'REQUEST_METHOD': 'GET', + 'HTTP_ACCEPT': 'application/json'}) + + # can't self.call_slo() here since we don't want to consume the + # whole body + with patch.object(slo, 'SegmentedIterable', + LeakTrackingSegmentedIterable): + app_resp = self.slo(req.environ, start_response) + self.assertEqual(status[0], '200 OK') # sanity check + body_iter = iter(app_resp) + chunk = next(body_iter) + self.assertEqual(chunk, 'aaaaa') # sanity check + + app_resp.close() + self.assertEqual(0, leaks[0]) + def test_head_manifest_is_efficient(self): req = Request.blank( '/v1/AUTH_test/gettest/manifest-abcd', From 221f94fdd39fd2dcd9a2e5565adceab615d55913 Mon Sep 17 00:00:00 2001 From: John Dickinson Date: Tue, 19 Jan 2016 14:50:24 -0800 Subject: [PATCH 50/52] authors and changelog updates for 2.6.0 Change-Id: Idd0ff9e70abc0773be183c37cd6125fe852da7c0 --- .mailmap | 6 ++++ AUTHORS | 32 +++++++++++++++----- CHANGELOG | 89 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 119 insertions(+), 8 deletions(-) diff --git a/.mailmap b/.mailmap index 00ee2a3459..6827a2d4a3 100644 --- a/.mailmap +++ b/.mailmap @@ -87,3 +87,9 @@ Donagh McCabe Eamonn O'Toole Gerry Drudy Mark Seger +Timur Alperovich +Mehdi Abaakouk +Richard Hawkins +Ondrej Novy +Peter Lisak +Ke Liang diff --git a/AUTHORS b/AUTHORS index 7efcfaca64..76cd276d19 100644 --- a/AUTHORS +++ b/AUTHORS @@ -25,7 +25,7 @@ Chuck Thier (cthier@gmail.com) Contributors ------------ -Mehdi Abaakouk (mehdi.abaakouk@enovance.com) +Mehdi Abaakouk (sileht@redhat.com) Timur Alperovich (timur.alperovich@gmail.com) Jesse Andrews (anotherjesse@gmail.com) Joe Arnold (joe@swiftstack.com) @@ -41,7 +41,7 @@ James E. Blair (jeblair@openstack.org) Fabien Boucher (fabien.boucher@enovance.com) Clark Boylan (clark.boylan@gmail.com) Pádraig Brady (pbrady@redhat.com) -Lorcan Browne (lorcan.browne@hp.com) +Lorcan Browne (lorcan.browne@hpe.com) Russell Bryant (rbryant@redhat.com) Jay S. Bryant (jsbryant@us.ibm.com) Tim Burke (tim.burke@gmail.com) @@ -56,15 +56,17 @@ François Charlier (francois.charlier@enovance.com) Ray Chen (oldsharp@163.com) Harshit Chitalia (harshit@acelio.com) Brian Cline (bcline@softlayer.com) -Alistair Coles (alistair.coles@hp.com) +Alistair Coles (alistair.coles@hpe.com) Clément Contini (ccontini@cloudops.com) Brian Curtin (brian.curtin@rackspace.com) Thiago da Silva (thiago@redhat.com) Julien Danjou (julien@danjou.info) +Paul Dardeau (paul.dardeau@intel.com) +Zack M. Davis (zdavis@swiftstack.com) Ksenia Demina (kdemina@mirantis.com) Dan Dillinger (dan.dillinger@sonian.net) Cedric Dos Santos (cedric.dos.sant@gmail.com) -Gerry Drudy (gerry.drudy@hp.com) +Gerry Drudy (gerry.drudy@hpe.com) Morgan Fainberg (morgan.fainberg@gmail.com) ZhiQiang Fan (aji.zqfan@gmail.com) Oshrit Feder (oshritf@il.ibm.com) @@ -85,6 +87,7 @@ David Goetz (david.goetz@rackspace.com) Tushar Gohad (tushar.gohad@intel.com) Jonathan Gonzalez V (jonathan.abdiel@gmail.com) Joe Gordon (jogo@cloudscaling.com) +ChangBo Guo(gcb) (eric.guo@easystack.cn) David Hadas (davidh@il.ibm.com) Andrew Hale (andy@wwwdata.eu) Soren Hansen (soren@linux2go.dk) @@ -92,9 +95,12 @@ Richard Hawkins (richard.hawkins@rackspace.com) Gregory Haynes (greg@greghaynes.net) Doug Hellmann (doug.hellmann@dreamhost.com) Dan Hersam (dan.hersam@hp.com) +hgangwx (hgangwx@cn.ibm.com) Derek Higgins (derekh@redhat.com) +Jonathan Hinson (jlhinson@us.ibm.com) Alex Holden (alex@alexjonasholden.com) Edward Hope-Morley (opentastic@gmail.com) +Ferenc Horváth (hferenc@inf.u-szeged.hu) Charles Hsu (charles0126@gmail.com) Joanna H. Huang (joanna.huitzu.huang@gmail.com) Kun Huang (gareth@unitedstack.com) @@ -111,6 +117,7 @@ Jason Johnson (jajohnson@softlayer.com) Brian K. Jones (bkjones@gmail.com) Arnaud JOST (arnaud.jost@ovh.net) Kiyoung Jung (kiyoung.jung@kt.com) +Harshada Mangesh Kakad (harshadak@metsi.co.uk) Takashi Kajinami (kajinamit@nttdata.co.jp) Matt Kassawara (mkassawara@gmail.com) Morita Kazutaka (morita.kazutaka@gmail.com) @@ -136,6 +143,8 @@ Eohyung Lee (liquidnuker@gmail.com) Zhao Lei (zhaolei@cn.fujitsu.com) Jamie Lennox (jlennox@redhat.com) Tong Li (litong01@us.ibm.com) +Ke Liang (ke.liang@easystack.cn) +Peter Lisak (peter.lisak@firma.seznam.cz) Changbin Liu (changbin.liu@gmail.com) Jing Liuqing (jing.liuqing@99cloud.net) Victor Lowther (victor.lowther@gmail.com) @@ -143,6 +152,7 @@ Sergey Lukjanov (slukjanov@mirantis.com) Zhongyue Luo (zhongyue.nah@intel.com) Paul Luse (paul.e.luse@intel.com) Christopher MacGown (chris@pistoncloud.com) +Ganesh Maharaj Mahalingam (ganesh.mahalingam@intel.com) Dragos Manolescu (dragosm@hp.com) Ben Martin (blmartin@us.ibm.com) Steve Martinelli (stevemar@ca.ibm.com) @@ -152,7 +162,7 @@ Nakagawa Masaaki (nakagawamsa@nttdata.co.jp) Dolph Mathews (dolph.mathews@gmail.com) Kenichiro Matsuda (matsuda_kenichi@jp.fujitsu.com) Michael Matur (michael.matur@gmail.com) -Donagh McCabe (donagh.mccabe@hp.com) +Donagh McCabe (donagh.mccabe@hpe.com) Andy McCrae (andy.mccrae@gmail.com) Paul McMillan (paul.mcmillan@nebula.com) Ewan Mellor (ewan.mellor@citrix.com) @@ -168,19 +178,22 @@ Maru Newby (mnewby@internap.com) Newptone (xingchao@unitedstack.com) Colin Nicholson (colin.nicholson@iomart.com) Zhenguo Niu (zhenguo@unitedstack.com) +Catherine Northcott (catherine@northcott.nz) Ondrej Novy (ondrej.novy@firma.seznam.cz) Timothy Okwii (tokwii@cisco.com) Matthew Oliver (matt@oliver.net.au) Hisashi Osanai (osanai.hisashi@jp.fujitsu.com) -Eamonn O'Toole (eamonn.otoole@hp.com) +Eamonn O'Toole (eamonn.otoole@hpe.com) James Page (james.page@ubuntu.com) Prashanth Pai (ppai@redhat.com) +Venkateswarlu Pallamala (p.venkatesh551@gmail.com) Pawel Palucki (pawel.palucki@gmail.com) Alex Pecoraro (alex.pecoraro@emc.com) Sascha Peilicke (saschpe@gmx.de) Constantine Peresypkin (constantine.peresypk@rackspace.com) Dieter Plaetinck (dieter@vimeo.com) Dan Prince (dprince@redhat.com) +Sivasathurappan Radhakrishnan (siva.radhakrishnan@intel.com) Sarvesh Ranjan (saranjan@cisco.com) Falk Reimann (falk.reimann@sap.com) Brian Reitz (brian.reitz@oracle.com) @@ -198,7 +211,7 @@ Shilla Saebi (shilla.saebi@gmail.com) Atsushi Sakai (sakaia@jp.fujitsu.com) Cristian A Sanchez (cristian.a.sanchez@intel.com) Christian Schwede (cschwede@redhat.com) -Mark Seger (Mark.Seger@hp.com) +Mark Seger (mark.seger@hpe.com) Azhagu Selvan SP (tamizhgeek@gmail.com) Alexandra Settle (alexandra.settle@rackspace.com) Andrew Clay Shafer (acs@parvuscaptus.com) @@ -212,6 +225,7 @@ Pradeep Kumar Singh (pradeep.singh@nectechnologies.in) Liu Siqi (meizu647@gmail.com) Adrian Smith (adrian_f_smith@dell.com) Jon Snitow (otherjon@swiftstack.com) +Emile Snyder (emile.snyder@gmail.com) Emett Speer (speer.emett@gmail.com) TheSriram (sriram@klusterkloud.com) Jeremy Stanley (fungi@yuggoth.org) @@ -234,7 +248,9 @@ Dmitry Ukov (dukov@mirantis.com) Vincent Untz (vuntz@suse.com) Daniele Valeriani (daniele@dvaleriani.net) Koert van der Veer (koert@cloudvps.com) +Béla Vancsics (vancsics@inf.u-szeged.hu) Vladimir Vechkanov (vvechkanov@mirantis.com) +venkatamahesh (venkatamaheshkotha@gmail.com) Gil Vernik (gilv@il.ibm.com) Hou Ming Wang (houming.wang@easystack.cn) Shane Wang (shane.wang@intel.com) @@ -248,7 +264,7 @@ Ye Jia Xu (xyj.asmy@gmail.com) Alex Yang (alex890714@gmail.com) Lin Yang (lin.a.yang@intel.com) Yee (mail.zhang.yee@gmail.com) -Guang Yee (guang.yee@hp.com) +Guang Yee (guang.yee@hpe.com) Pete Zaitcev (zaitcev@kotori.zaitcev.us) Hua Zhang (zhuadl@cn.ibm.com) Jian Zhang (jian.zhang@intel.com) diff --git a/CHANGELOG b/CHANGELOG index 0eb08b11a1..c1b335d548 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,3 +1,92 @@ +swift (2.6.0) + + * Dependency changes + - Updated minimum version of eventlet to 0.17.4 to support IPv6. + + - Updated the minimum version of PyECLib to 1.0.7. + + * The ring rebalancing algorithm was updated to better handle edge cases + and to give better (more balanced) rings in the general case. New rings + will have better initial placement, capacity adjustments will move less + data for better balance, and existing rings that were imbalanced should + start to become better balanced as they go through rebalance cycles. + + * Added container and account reverse listings. + + A GET request to an account or container resource with a "reverse=true" + query parameter will return the listing in reverse order. When + iterating over pages of reverse listings, the relative order of marker + and end_marker are swapped. + + * Storage policies now support having more than one name. + + This allows operators to fix a typo without breaking existing clients, + or, alternatively, have "short names" for policies. This is implemented + with the "aliases" config key in the storage policy config in + swift.conf. The aliases value is a list of names that the storage + policy may also be identified by. The storage policy "name" is used to + report the policy to users (eg in container headers). The aliases have + the same naming restrictions as the policy's primary name. + + * The object auditor learned the "interval" config value to control the + time between each audit pass. + + * `swift-recon --all` now includes the config checksum check. + + * `swift-init` learned the --kill-after-timeout option to force a service + to quit (SIGKILL) after a designated time. + + * `swift-recon` now correctly shows timestamps in UTC instead of local + time. + + * Fixed bug where `swift-ring-builder` couldn't select device id 0. + + * Documented the previously undocumented + `swift-ring-builder pretend_min_part_hours_passed` command. + + * The "node_timeout" config value now accepts decimal values. + + * `swift-ring-builder` now properly removes devices with zero weight. + + * `swift-init` return codes are updated via "--strict" and "--non-strict" + options. Please see the usage string for more information. + + * `swift-ring-builder` now reports the min_part_hours lockout time + remaining + + * Container sync has been improved to more quickly find and iterate over + the containers to be synced. This reduced server load and lowers the + time required to see data propagate between two clusters. Please see + http://swift.openstack.org/overview_container_sync.html for more details + about the new on-disk structure for tracking synchronized containers. + + * A container POST will now update that container's put-timestamp value. + + * TempURL header restrictions are now exposed in /info. + + * Error messages on static large object manifest responses have been + greatly improved. + + * Closed a bug where an unfinished read of a large object would leak a + socket file descriptor and a small amount of memory. (CVE-2016-0738) + + * Fixed an issue where a zero-byte object PUT with an incorrect Etag + would return a 503. + + * Fixed an error when a static large object manifest references the same + object more than once. + + * Improved performance of finding handoff nodes if a zone is empty. + + * Fixed duplication of headers in Access-Control-Expose-Headers on CORS + requests. + + * Fixed handling of IPv6 connections to memcache pools. + + * Continued work towards python 3 compatibility. + + * Various other minor bug fixes and improvements. + swift (2.5.0, OpenStack Liberty) * Added the ability to specify ranges for Static Large Object (SLO) From 0a404def7d54d1ef1c85c11a378052260c4fda4c Mon Sep 17 00:00:00 2001 From: John Dickinson Date: Wed, 20 Jan 2016 15:19:35 -0800 Subject: [PATCH 51/52] remove unneeded duplicate dict keys Change-Id: I926d7aaa9df093418aaae54fe26e8f7bc8210645 --- test/unit/common/ring/test_builder.py | 162 +++++++++++++------------- 1 file changed, 81 insertions(+), 81 deletions(-) diff --git a/test/unit/common/ring/test_builder.py b/test/unit/common/ring/test_builder.py index 99348d445e..d70f92c5d1 100644 --- a/test/unit/common/ring/test_builder.py +++ b/test/unit/common/ring/test_builder.py @@ -1319,11 +1319,11 @@ class TestRingBuilder(unittest.TestCase): def test_fractional_replicas_rebalance(self): rb = ring.RingBuilder(8, 2.5, 0) - rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) - rb.add_dev({'id': 1, 'region': 0, 'region': 0, 'zone': 1, 'weight': 1, + rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) - rb.add_dev({'id': 2, 'region': 0, 'region': 0, 'zone': 2, 'weight': 1, + rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) rb.rebalance() # passes by not crashing rb.validate() # also passes by not crashing @@ -1332,13 +1332,13 @@ class TestRingBuilder(unittest.TestCase): def test_create_add_dev_add_replica_rebalance(self): rb = ring.RingBuilder(8, 3, 1) - rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 3, + rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 3, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) - rb.add_dev({'id': 1, 'region': 0, 'region': 0, 'zone': 0, 'weight': 3, + rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 3, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) - rb.add_dev({'id': 2, 'region': 0, 'region': 0, 'zone': 0, 'weight': 3, + rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 3, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) - rb.add_dev({'id': 3, 'region': 0, 'region': 0, 'zone': 0, 'weight': 3, + rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 3, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) rb.set_replicas(4) rb.rebalance() # this would crash since parts_wanted was not set @@ -1348,15 +1348,15 @@ class TestRingBuilder(unittest.TestCase): rb = ring.RingBuilder(8, 3, 1) # 5 devices: 5 is the smallest number that does not divide 3 * 2^8, # which forces some rounding to happen. - rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) - rb.add_dev({'id': 1, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) - rb.add_dev({'id': 2, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'}) - rb.add_dev({'id': 3, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'}) - rb.add_dev({'id': 4, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'}) rb.rebalance() rb.validate() @@ -1381,31 +1381,31 @@ class TestRingBuilder(unittest.TestCase): def test_add_replicas_then_rebalance_respects_weight(self): rb = ring.RingBuilder(8, 3, 1) - rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 3, + rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 3, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) - rb.add_dev({'id': 1, 'region': 0, 'region': 0, 'zone': 0, 'weight': 3, + rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 3, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) - rb.add_dev({'id': 2, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'}) - rb.add_dev({'id': 3, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'}) - rb.add_dev({'id': 4, 'region': 0, 'region': 0, 'zone': 1, 'weight': 3, + rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'weight': 3, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'}) - rb.add_dev({'id': 5, 'region': 0, 'region': 0, 'zone': 1, 'weight': 3, + rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'weight': 3, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf'}) - rb.add_dev({'id': 6, 'region': 0, 'region': 0, 'zone': 1, 'weight': 1, + rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdg'}) - rb.add_dev({'id': 7, 'region': 0, 'region': 0, 'zone': 1, 'weight': 1, + rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdh'}) - rb.add_dev({'id': 8, 'region': 0, 'region': 0, 'zone': 2, 'weight': 3, + rb.add_dev({'id': 8, 'region': 0, 'zone': 2, 'weight': 3, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdi'}) - rb.add_dev({'id': 9, 'region': 0, 'region': 0, 'zone': 2, 'weight': 3, + rb.add_dev({'id': 9, 'region': 0, 'zone': 2, 'weight': 3, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdj'}) - rb.add_dev({'id': 10, 'region': 0, 'region': 0, 'zone': 2, 'weight': 1, + rb.add_dev({'id': 10, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdk'}) - rb.add_dev({'id': 11, 'region': 0, 'region': 0, 'zone': 2, 'weight': 1, + rb.add_dev({'id': 11, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdl'}) rb.rebalance(seed=1) @@ -1439,31 +1439,31 @@ class TestRingBuilder(unittest.TestCase): def test_overload(self): rb = ring.RingBuilder(8, 3, 1) - rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) - rb.add_dev({'id': 3, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'}) - rb.add_dev({'id': 4, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'}) - rb.add_dev({'id': 5, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf'}) - rb.add_dev({'id': 1, 'region': 0, 'region': 0, 'zone': 1, 'weight': 1, + rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb'}) - rb.add_dev({'id': 6, 'region': 0, 'region': 0, 'zone': 1, 'weight': 1, + rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sdg'}) - rb.add_dev({'id': 7, 'region': 0, 'region': 0, 'zone': 1, 'weight': 1, + rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sdh'}) - rb.add_dev({'id': 8, 'region': 0, 'region': 0, 'zone': 1, 'weight': 1, + rb.add_dev({'id': 8, 'region': 0, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sdi'}) - rb.add_dev({'id': 2, 'region': 0, 'region': 0, 'zone': 2, 'weight': 2, + rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 2, 'ip': '127.0.0.2', 'port': 10002, 'device': 'sdc'}) - rb.add_dev({'id': 9, 'region': 0, 'region': 0, 'zone': 2, 'weight': 2, + rb.add_dev({'id': 9, 'region': 0, 'zone': 2, 'weight': 2, 'ip': '127.0.0.2', 'port': 10002, 'device': 'sdj'}) - rb.add_dev({'id': 10, 'region': 0, 'region': 0, 'zone': 2, 'weight': 2, + rb.add_dev({'id': 10, 'region': 0, 'zone': 2, 'weight': 2, 'ip': '127.0.0.2', 'port': 10002, 'device': 'sdk'}) - rb.add_dev({'id': 11, 'region': 0, 'region': 0, 'zone': 2, 'weight': 2, + rb.add_dev({'id': 11, 'region': 0, 'zone': 2, 'weight': 2, 'ip': '127.0.0.2', 'port': 10002, 'device': 'sdl'}) rb.rebalance(seed=12345) @@ -1517,31 +1517,31 @@ class TestRingBuilder(unittest.TestCase): # Overload doesn't prevent optimal balancing. rb = ring.RingBuilder(8, 3, 1) rb.set_overload(0.125) - rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) - rb.add_dev({'id': 3, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) - rb.add_dev({'id': 4, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) - rb.add_dev({'id': 5, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) - rb.add_dev({'id': 1, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'}) - rb.add_dev({'id': 6, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'}) - rb.add_dev({'id': 7, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + rb.add_dev({'id': 7, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'}) - rb.add_dev({'id': 8, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + rb.add_dev({'id': 8, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'}) - rb.add_dev({'id': 2, 'region': 0, 'region': 0, 'zone': 0, 'weight': 2, + rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 2, 'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'}) - rb.add_dev({'id': 9, 'region': 0, 'region': 0, 'zone': 0, 'weight': 2, + rb.add_dev({'id': 9, 'region': 0, 'zone': 0, 'weight': 2, 'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'}) - rb.add_dev({'id': 10, 'region': 0, 'region': 0, 'zone': 0, 'weight': 2, + rb.add_dev({'id': 10, 'region': 0, 'zone': 0, 'weight': 2, 'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'}) - rb.add_dev({'id': 11, 'region': 0, 'region': 0, 'zone': 0, 'weight': 2, + rb.add_dev({'id': 11, 'region': 0, 'zone': 0, 'weight': 2, 'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'}) rb.rebalance(seed=12345) @@ -1577,16 +1577,16 @@ class TestRingBuilder(unittest.TestCase): self.assertEqual(part_counts['127.0.0.3'], 256) # Add a new server: balance stays optimal - rb.add_dev({'id': 12, 'region': 0, 'region': 0, 'zone': 0, + rb.add_dev({'id': 12, 'region': 0, 'zone': 0, 'weight': 2, 'ip': '127.0.0.4', 'port': 10000, 'device': 'sdd'}) - rb.add_dev({'id': 13, 'region': 0, 'region': 0, 'zone': 0, + rb.add_dev({'id': 13, 'region': 0, 'zone': 0, 'weight': 2, 'ip': '127.0.0.4', 'port': 10000, 'device': 'sde'}) - rb.add_dev({'id': 14, 'region': 0, 'region': 0, 'zone': 0, + rb.add_dev({'id': 14, 'region': 0, 'zone': 0, 'weight': 2, 'ip': '127.0.0.4', 'port': 10000, 'device': 'sdf'}) - rb.add_dev({'id': 15, 'region': 0, 'region': 0, 'zone': 0, + rb.add_dev({'id': 15, 'region': 0, 'zone': 0, 'weight': 2, 'ip': '127.0.0.4', 'port': 10000, 'device': 'sdf'}) @@ -1609,29 +1609,29 @@ class TestRingBuilder(unittest.TestCase): def test_overload_keeps_balanceable_things_balanced_initially(self): rb = ring.RingBuilder(8, 3, 1) - rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 8, + rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 8, 'ip': '10.0.0.1', 'port': 10000, 'device': 'sda'}) - rb.add_dev({'id': 1, 'region': 0, 'region': 0, 'zone': 0, 'weight': 8, + rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 8, 'ip': '10.0.0.1', 'port': 10000, 'device': 'sdb'}) - rb.add_dev({'id': 2, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4, + rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.2', 'port': 10000, 'device': 'sda'}) - rb.add_dev({'id': 3, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4, + rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.2', 'port': 10000, 'device': 'sdb'}) - rb.add_dev({'id': 4, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4, + rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.3', 'port': 10000, 'device': 'sda'}) - rb.add_dev({'id': 5, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4, + rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.3', 'port': 10000, 'device': 'sdb'}) - rb.add_dev({'id': 6, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4, + rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.4', 'port': 10000, 'device': 'sda'}) - rb.add_dev({'id': 7, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4, + rb.add_dev({'id': 7, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.4', 'port': 10000, 'device': 'sdb'}) - rb.add_dev({'id': 8, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4, + rb.add_dev({'id': 8, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.5', 'port': 10000, 'device': 'sda'}) - rb.add_dev({'id': 9, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4, + rb.add_dev({'id': 9, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.5', 'port': 10000, 'device': 'sdb'}) rb.set_overload(99999) @@ -1653,29 +1653,29 @@ class TestRingBuilder(unittest.TestCase): def test_overload_keeps_balanceable_things_balanced_on_rebalance(self): rb = ring.RingBuilder(8, 3, 1) - rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 8, + rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 8, 'ip': '10.0.0.1', 'port': 10000, 'device': 'sda'}) - rb.add_dev({'id': 1, 'region': 0, 'region': 0, 'zone': 0, 'weight': 8, + rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 8, 'ip': '10.0.0.1', 'port': 10000, 'device': 'sdb'}) - rb.add_dev({'id': 2, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4, + rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.2', 'port': 10000, 'device': 'sda'}) - rb.add_dev({'id': 3, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4, + rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.2', 'port': 10000, 'device': 'sdb'}) - rb.add_dev({'id': 4, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4, + rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.3', 'port': 10000, 'device': 'sda'}) - rb.add_dev({'id': 5, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4, + rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.3', 'port': 10000, 'device': 'sdb'}) - rb.add_dev({'id': 6, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4, + rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.4', 'port': 10000, 'device': 'sda'}) - rb.add_dev({'id': 7, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4, + rb.add_dev({'id': 7, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.4', 'port': 10000, 'device': 'sdb'}) - rb.add_dev({'id': 8, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4, + rb.add_dev({'id': 8, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.5', 'port': 10000, 'device': 'sda'}) - rb.add_dev({'id': 9, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4, + rb.add_dev({'id': 9, 'region': 0, 'zone': 0, 'weight': 4, 'ip': '10.0.0.5', 'port': 10000, 'device': 'sdb'}) rb.set_overload(99999) @@ -1719,28 +1719,28 @@ class TestRingBuilder(unittest.TestCase): def test_server_per_port(self): # 3 servers, 3 disks each, with each disk on its own port rb = ring.RingBuilder(8, 3, 1) - rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '10.0.0.1', 'port': 10000, 'device': 'sdx'}) - rb.add_dev({'id': 1, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '10.0.0.1', 'port': 10001, 'device': 'sdy'}) - rb.add_dev({'id': 3, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '10.0.0.2', 'port': 10000, 'device': 'sdx'}) - rb.add_dev({'id': 4, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '10.0.0.2', 'port': 10001, 'device': 'sdy'}) - rb.add_dev({'id': 6, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '10.0.0.3', 'port': 10000, 'device': 'sdx'}) - rb.add_dev({'id': 7, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + rb.add_dev({'id': 7, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '10.0.0.3', 'port': 10001, 'device': 'sdy'}) rb.rebalance(seed=1) - rb.add_dev({'id': 2, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '10.0.0.1', 'port': 10002, 'device': 'sdz'}) - rb.add_dev({'id': 5, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '10.0.0.2', 'port': 10002, 'device': 'sdz'}) - rb.add_dev({'id': 8, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + rb.add_dev({'id': 8, 'region': 0, 'zone': 0, 'weight': 1, 'ip': '10.0.0.3', 'port': 10002, 'device': 'sdz'}) rb.pretend_min_part_hours_passed() From e13a03c379273ee10e678818078b9c40a96a7dc9 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Wed, 20 Jan 2016 16:06:26 -0800 Subject: [PATCH 52/52] Stop overriding builtin range Change-Id: I315f8b554bb9e96659b455f4158f074961bd6498 --- swift/common/middleware/slo.py | 6 +- test/unit/common/test_swob.py | 114 +++++++++++++++++---------------- 2 files changed, 61 insertions(+), 59 deletions(-) diff --git a/swift/common/middleware/slo.py b/swift/common/middleware/slo.py index 048d8b5add..49aa44ecd2 100644 --- a/swift/common/middleware/slo.py +++ b/swift/common/middleware/slo.py @@ -461,13 +461,13 @@ class SloGetContext(WSGIContext): # no bytes are needed from this or any future segment break - range = seg_dict.get('range') - if range is None: + seg_range = seg_dict.get('range') + if seg_range is None: range_start, range_end = 0, seg_length - 1 else: # We already validated and supplied concrete values # for the range on upload - range_start, range_end = map(int, range.split('-')) + range_start, range_end = map(int, seg_range.split('-')) if config_true_value(seg_dict.get('sub_slo')): # do this check here so that we can avoid fetching this last diff --git a/test/unit/common/test_swob.py b/test/unit/common/test_swob.py index 8a0b21fd25..c03b400d9a 100644 --- a/test/unit/common/test_swob.py +++ b/test/unit/common/test_swob.py @@ -158,120 +158,122 @@ class TestHeaderKeyDict(unittest.TestCase): class TestRange(unittest.TestCase): def test_range(self): - range = swift.common.swob.Range('bytes=1-7') - self.assertEqual(range.ranges[0], (1, 7)) + swob_range = swift.common.swob.Range('bytes=1-7') + self.assertEqual(swob_range.ranges[0], (1, 7)) def test_upsidedown_range(self): - range = swift.common.swob.Range('bytes=5-10') - self.assertEqual(range.ranges_for_length(2), []) + swob_range = swift.common.swob.Range('bytes=5-10') + self.assertEqual(swob_range.ranges_for_length(2), []) def test_str(self): for range_str in ('bytes=1-7', 'bytes=1-', 'bytes=-1', 'bytes=1-7,9-12', 'bytes=-7,9-'): - range = swift.common.swob.Range(range_str) - self.assertEqual(str(range), range_str) + swob_range = swift.common.swob.Range(range_str) + self.assertEqual(str(swob_range), range_str) def test_ranges_for_length(self): - range = swift.common.swob.Range('bytes=1-7') - self.assertEqual(range.ranges_for_length(10), [(1, 8)]) - self.assertEqual(range.ranges_for_length(5), [(1, 5)]) - self.assertEqual(range.ranges_for_length(None), None) + swob_range = swift.common.swob.Range('bytes=1-7') + self.assertEqual(swob_range.ranges_for_length(10), [(1, 8)]) + self.assertEqual(swob_range.ranges_for_length(5), [(1, 5)]) + self.assertEqual(swob_range.ranges_for_length(None), None) def test_ranges_for_large_length(self): - range = swift.common.swob.Range('bytes=-1000000000000000000000000000') - self.assertEqual(range.ranges_for_length(100), [(0, 100)]) + swob_range = swift.common.swob.Range('bytes=-100000000000000000000000') + self.assertEqual(swob_range.ranges_for_length(100), [(0, 100)]) def test_ranges_for_length_no_end(self): - range = swift.common.swob.Range('bytes=1-') - self.assertEqual(range.ranges_for_length(10), [(1, 10)]) - self.assertEqual(range.ranges_for_length(5), [(1, 5)]) - self.assertEqual(range.ranges_for_length(None), None) + swob_range = swift.common.swob.Range('bytes=1-') + self.assertEqual(swob_range.ranges_for_length(10), [(1, 10)]) + self.assertEqual(swob_range.ranges_for_length(5), [(1, 5)]) + self.assertEqual(swob_range.ranges_for_length(None), None) # This used to freak out: - range = swift.common.swob.Range('bytes=100-') - self.assertEqual(range.ranges_for_length(5), []) - self.assertEqual(range.ranges_for_length(None), None) + swob_range = swift.common.swob.Range('bytes=100-') + self.assertEqual(swob_range.ranges_for_length(5), []) + self.assertEqual(swob_range.ranges_for_length(None), None) - range = swift.common.swob.Range('bytes=4-6,100-') - self.assertEqual(range.ranges_for_length(5), [(4, 5)]) + swob_range = swift.common.swob.Range('bytes=4-6,100-') + self.assertEqual(swob_range.ranges_for_length(5), [(4, 5)]) def test_ranges_for_length_no_start(self): - range = swift.common.swob.Range('bytes=-7') - self.assertEqual(range.ranges_for_length(10), [(3, 10)]) - self.assertEqual(range.ranges_for_length(5), [(0, 5)]) - self.assertEqual(range.ranges_for_length(None), None) + swob_range = swift.common.swob.Range('bytes=-7') + self.assertEqual(swob_range.ranges_for_length(10), [(3, 10)]) + self.assertEqual(swob_range.ranges_for_length(5), [(0, 5)]) + self.assertEqual(swob_range.ranges_for_length(None), None) - range = swift.common.swob.Range('bytes=4-6,-100') - self.assertEqual(range.ranges_for_length(5), [(4, 5), (0, 5)]) + swob_range = swift.common.swob.Range('bytes=4-6,-100') + self.assertEqual(swob_range.ranges_for_length(5), [(4, 5), (0, 5)]) def test_ranges_for_length_multi(self): - range = swift.common.swob.Range('bytes=-20,4-') - self.assertEqual(len(range.ranges_for_length(200)), 2) + swob_range = swift.common.swob.Range('bytes=-20,4-') + self.assertEqual(len(swob_range.ranges_for_length(200)), 2) # the actual length greater than each range element - self.assertEqual(range.ranges_for_length(200), [(180, 200), (4, 200)]) + self.assertEqual(swob_range.ranges_for_length(200), + [(180, 200), (4, 200)]) - range = swift.common.swob.Range('bytes=30-150,-10') - self.assertEqual(len(range.ranges_for_length(200)), 2) + swob_range = swift.common.swob.Range('bytes=30-150,-10') + self.assertEqual(len(swob_range.ranges_for_length(200)), 2) # the actual length lands in the middle of a range - self.assertEqual(range.ranges_for_length(90), [(30, 90), (80, 90)]) + self.assertEqual(swob_range.ranges_for_length(90), + [(30, 90), (80, 90)]) # the actual length greater than any of the range - self.assertEqual(range.ranges_for_length(200), + self.assertEqual(swob_range.ranges_for_length(200), [(30, 151), (190, 200)]) - self.assertEqual(range.ranges_for_length(None), None) + self.assertEqual(swob_range.ranges_for_length(None), None) def test_ranges_for_length_edges(self): - range = swift.common.swob.Range('bytes=0-1, -7') - self.assertEqual(range.ranges_for_length(10), + swob_range = swift.common.swob.Range('bytes=0-1, -7') + self.assertEqual(swob_range.ranges_for_length(10), [(0, 2), (3, 10)]) - range = swift.common.swob.Range('bytes=-7, 0-1') - self.assertEqual(range.ranges_for_length(10), + swob_range = swift.common.swob.Range('bytes=-7, 0-1') + self.assertEqual(swob_range.ranges_for_length(10), [(3, 10), (0, 2)]) - range = swift.common.swob.Range('bytes=-7, 0-1') - self.assertEqual(range.ranges_for_length(5), + swob_range = swift.common.swob.Range('bytes=-7, 0-1') + self.assertEqual(swob_range.ranges_for_length(5), [(0, 5), (0, 2)]) def test_ranges_for_length_overlapping(self): # Fewer than 3 overlaps is okay - range = swift.common.swob.Range('bytes=10-19,15-24') - self.assertEqual(range.ranges_for_length(100), + swob_range = swift.common.swob.Range('bytes=10-19,15-24') + self.assertEqual(swob_range.ranges_for_length(100), [(10, 20), (15, 25)]) - range = swift.common.swob.Range('bytes=10-19,15-24,20-29') - self.assertEqual(range.ranges_for_length(100), + swob_range = swift.common.swob.Range('bytes=10-19,15-24,20-29') + self.assertEqual(swob_range.ranges_for_length(100), [(10, 20), (15, 25), (20, 30)]) # Adjacent ranges, though suboptimal, don't overlap - range = swift.common.swob.Range('bytes=10-19,20-29,30-39') - self.assertEqual(range.ranges_for_length(100), + swob_range = swift.common.swob.Range('bytes=10-19,20-29,30-39') + self.assertEqual(swob_range.ranges_for_length(100), [(10, 20), (20, 30), (30, 40)]) # Ranges that share a byte do overlap - range = swift.common.swob.Range('bytes=10-20,20-30,30-40,40-50') - self.assertEqual(range.ranges_for_length(100), []) + swob_range = swift.common.swob.Range('bytes=10-20,20-30,30-40,40-50') + self.assertEqual(swob_range.ranges_for_length(100), []) # With suffix byte range specs (e.g. bytes=-2), make sure that we # correctly determine overlapping-ness based on the entity length - range = swift.common.swob.Range('bytes=10-15,15-20,30-39,-9') - self.assertEqual(range.ranges_for_length(100), + swob_range = swift.common.swob.Range('bytes=10-15,15-20,30-39,-9') + self.assertEqual(swob_range.ranges_for_length(100), [(10, 16), (15, 21), (30, 40), (91, 100)]) - self.assertEqual(range.ranges_for_length(20), []) + self.assertEqual(swob_range.ranges_for_length(20), []) def test_ranges_for_length_nonascending(self): few_ranges = ("bytes=100-109,200-209,300-309,500-509," "400-409,600-609,700-709") many_ranges = few_ranges + ",800-809" - range = swift.common.swob.Range(few_ranges) - self.assertEqual(range.ranges_for_length(100000), + swob_range = swift.common.swob.Range(few_ranges) + self.assertEqual(swob_range.ranges_for_length(100000), [(100, 110), (200, 210), (300, 310), (500, 510), (400, 410), (600, 610), (700, 710)]) - range = swift.common.swob.Range(many_ranges) - self.assertEqual(range.ranges_for_length(100000), []) + swob_range = swift.common.swob.Range(many_ranges) + self.assertEqual(swob_range.ranges_for_length(100000), []) def test_ranges_for_length_too_many(self): at_the_limit_ranges = (