Python 3 compatibility: fix xrange/range issues

xrange is not defined in python3.
Rename xrange() to range().

Change-Id: Ifb1c9cfd863ce6dfe3cced3eca7ea8e539d8a5e9
This commit is contained in:
zhulingjie 2018-10-07 23:13:54 -04:00 committed by Pete Zaitcev
parent 4ae9ce76e7
commit 83a7ce8ce0
7 changed files with 43 additions and 43 deletions

View File

@ -164,7 +164,7 @@ you have lost.
>>> builder._replica2part2dev = ring._replica2part2dev_id
>>> builder._last_part_moves_epoch = 0
>>> from array import array
>>> builder._last_part_moves = array('B', (0 for _ in xrange(partitions)))
>>> builder._last_part_moves = array('B', (0 for _ in range(partitions)))
>>> builder._set_parts_wanted()
>>> for d in builder._iter_devs():
d['parts'] = 0

View File

@ -46,7 +46,7 @@ Here is a simple example of this in action:
DATA_ID_COUNT = 10000000
node_counts = [0] * NODE_COUNT
for data_id in xrange(DATA_ID_COUNT):
for data_id in range(DATA_ID_COUNT):
data_id = str(data_id)
# This just pulls part of the hash out as an integer
hsh = unpack_from('>I', md5(data_id).digest())[0]
@ -107,7 +107,7 @@ Let's examine this at a larger scale:
DATA_ID_COUNT = 10000000
moved_ids = 0
for data_id in xrange(DATA_ID_COUNT):
for data_id in range(DATA_ID_COUNT):
data_id = str(data_id)
hsh = unpack_from('>I', md5(str(data_id)).digest())[0]
node_id = hsh % NODE_COUNT
@ -146,15 +146,15 @@ Let's examine this at a larger scale:
DATA_ID_COUNT = 10000000
node_range_starts = []
for node_id in xrange(NODE_COUNT):
for node_id in range(NODE_COUNT):
node_range_starts.append(DATA_ID_COUNT /
NODE_COUNT * node_id)
new_node_range_starts = []
for new_node_id in xrange(NEW_NODE_COUNT):
for new_node_id in range(NEW_NODE_COUNT):
new_node_range_starts.append(DATA_ID_COUNT /
NEW_NODE_COUNT * new_node_id)
moved_ids = 0
for data_id in xrange(DATA_ID_COUNT):
for data_id in range(DATA_ID_COUNT):
data_id = str(data_id)
hsh = unpack_from('>I', md5(str(data_id)).digest())[0]
node_id = bisect_left(node_range_starts,
@ -192,7 +192,7 @@ be done by creating "virtual nodes" for each node. So 100 nodes might have
vnode_range_starts = []
vnode2node = []
for vnode_id in xrange(VNODE_COUNT):
for vnode_id in range(VNODE_COUNT):
vnode_range_starts.append(DATA_ID_COUNT /
VNODE_COUNT * vnode_id)
vnode2node.append(vnode_id % NODE_COUNT)
@ -201,7 +201,7 @@ be done by creating "virtual nodes" for each node. So 100 nodes might have
NEW_NODE_COUNT = NODE_COUNT + 1
vnodes_to_reassign = VNODE_COUNT / NEW_NODE_COUNT
while vnodes_to_reassign > 0:
for node_to_take_from in xrange(NODE_COUNT):
for node_to_take_from in range(NODE_COUNT):
for vnode_id, node_id in enumerate(new_vnode2node):
if node_id == node_to_take_from:
new_vnode2node[vnode_id] = new_node_id
@ -210,7 +210,7 @@ be done by creating "virtual nodes" for each node. So 100 nodes might have
if vnodes_to_reassign <= 0:
break
moved_ids = 0
for data_id in xrange(DATA_ID_COUNT):
for data_id in range(DATA_ID_COUNT):
data_id = str(data_id)
hsh = unpack_from('>I', md5(str(data_id)).digest())[0]
vnode_id = bisect_left(vnode_range_starts,
@ -242,13 +242,13 @@ optimize that out.
VNODE_COUNT = 1000
vnode2node = []
for vnode_id in xrange(VNODE_COUNT):
for vnode_id in range(VNODE_COUNT):
vnode2node.append(vnode_id % NODE_COUNT)
new_vnode2node = list(vnode2node)
new_node_id = NODE_COUNT
vnodes_to_reassign = VNODE_COUNT / (NODE_COUNT + 1)
while vnodes_to_reassign > 0:
for node_to_take_from in xrange(NODE_COUNT):
for node_to_take_from in range(NODE_COUNT):
for vnode_id, node_id in enumerate(vnode2node):
if node_id == node_to_take_from:
vnode2node[vnode_id] = new_node_id
@ -257,7 +257,7 @@ optimize that out.
if vnodes_to_reassign <= 0:
break
moved_ids = 0
for data_id in xrange(DATA_ID_COUNT):
for data_id in range(DATA_ID_COUNT):
data_id = str(data_id)
hsh = unpack_from('>I', md5(str(data_id)).digest())[0]
vnode_id = hsh % VNODE_COUNT
@ -356,10 +356,10 @@ distribution to make sure we haven't broken anything.
DATA_ID_COUNT = 100000000
part2node = array('H')
for part in xrange(2 ** PARTITION_POWER):
for part in range(2 ** PARTITION_POWER):
part2node.append(part % NODE_COUNT)
node_counts = [0] * NODE_COUNT
for data_id in xrange(DATA_ID_COUNT):
for data_id in range(DATA_ID_COUNT):
data_id = str(data_id)
part = unpack_from('>I',
md5(str(data_id)).digest())[0] >> PARTITION_SHIFT
@ -437,16 +437,16 @@ testing):
DATA_ID_COUNT = 10000000
part2node = array('H')
for part in xrange(2 ** PARTITION_POWER):
for part in range(2 ** PARTITION_POWER):
part2node.append(part % NODE_COUNT)
node_counts = [0] * NODE_COUNT
for data_id in xrange(DATA_ID_COUNT):
for data_id in range(DATA_ID_COUNT):
data_id = str(data_id)
part = unpack_from('>I',
md5(str(data_id)).digest())[0] >> PARTITION_SHIFT
node_ids = [part2node[part]]
node_counts[node_ids[0]] += 1
for replica in xrange(1, REPLICAS):
for replica in range(1, REPLICAS):
while part2node[part] in node_ids:
part += 1
if part > PARTITION_MAX:
@ -511,12 +511,12 @@ in distinct zones.
node2zone.append(zone)
zone += 1
part2node = array('H')
for part in xrange(2 ** PARTITION_POWER):
for part in range(2 ** PARTITION_POWER):
part2node.append(part % NODE_COUNT)
shuffle(part2node)
node_counts = [0] * NODE_COUNT
zone_counts = [0] * ZONE_COUNT
for data_id in xrange(DATA_ID_COUNT):
for data_id in range(DATA_ID_COUNT):
data_id = str(data_id)
part = unpack_from('>I',
md5(str(data_id)).digest())[0] >> PARTITION_SHIFT
@ -524,7 +524,7 @@ in distinct zones.
zones = [node2zone[node_ids[0]]]
node_counts[node_ids[0]] += 1
zone_counts[zones[0]] += 1
for replica in xrange(1, REPLICAS):
for replica in range(1, REPLICAS):
while part2node[part] in node_ids and \
node2zone[part2node[part]] in zones:
part += 1
@ -596,8 +596,8 @@ each real node is assigned multiple anchors.
zone += 1
hash2index = []
index2node = []
for node in xrange(NODE_COUNT):
for vnode in xrange(VNODE_COUNT):
for node in range(NODE_COUNT):
for vnode in range(VNODE_COUNT):
hsh = unpack_from('>I', md5(str(node)).digest())[0]
index = bisect_left(hash2index, hsh)
if index > len(hash2index):
@ -606,7 +606,7 @@ each real node is assigned multiple anchors.
index2node.insert(index, node)
node_counts = [0] * NODE_COUNT
zone_counts = [0] * ZONE_COUNT
for data_id in xrange(DATA_ID_COUNT):
for data_id in range(DATA_ID_COUNT):
data_id = str(data_id)
hsh = unpack_from('>I', md5(str(data_id)).digest())[0]
index = bisect_left(hash2index, hsh)
@ -616,7 +616,7 @@ each real node is assigned multiple anchors.
zones = [node2zone[node_ids[0]]]
node_counts[node_ids[0]] += 1
zone_counts[zones[0]] += 1
for replica in xrange(1, REPLICAS):
for replica in range(1, REPLICAS):
while index2node[index] in node_ids and \
node2zone[index2node[index]] in zones:
index += 1
@ -703,7 +703,7 @@ the ring and its testing.
md5(data_id).digest())[0] >> self.partition_shift
node_ids = [self.part2node[part]]
zones = [self.nodes[node_ids[0]]]
for replica in xrange(1, self.replicas):
for replica in range(1, self.replicas):
while self.part2node[part] in node_ids and \
self.nodes[self.part2node[part]] in zones:
part += 1
@ -716,7 +716,7 @@ the ring and its testing.
def build_ring(nodes, partition_power, replicas):
begin = time()
part2node = array('H')
for part in xrange(2 ** partition_power):
for part in range(2 ** partition_power):
part2node.append(part % len(nodes))
shuffle(part2node)
ring = Ring(nodes, part2node, replicas)
@ -728,7 +728,7 @@ the ring and its testing.
DATA_ID_COUNT = 10000000
node_counts = {}
zone_counts = {}
for data_id in xrange(DATA_ID_COUNT):
for data_id in range(DATA_ID_COUNT):
for node in ring.get_nodes(data_id):
node_counts[node['id']] = \
node_counts.get(node['id'], 0) + 1
@ -832,7 +832,7 @@ we've changed so much I'll just post the entire module again:
md5(data_id).digest())[0] >> self.partition_shift
node_ids = [self.part2node[part]]
zones = [self.nodes[node_ids[0]]]
for replica in xrange(1, self.replicas):
for replica in range(1, self.replicas):
while self.part2node[part] in node_ids and \
self.nodes[self.part2node[part]] in zones:
part += 1
@ -851,7 +851,7 @@ we've changed so much I'll just post the entire module again:
node['desired_parts'] = \
parts / total_weight * node['weight']
part2node = array('H')
for part in xrange(2 ** partition_power):
for part in range(2 ** partition_power):
for node in nodes.values():
if node['desired_parts'] >= 1:
node['desired_parts'] -= 1
@ -873,7 +873,7 @@ we've changed so much I'll just post the entire module again:
DATA_ID_COUNT = 10000000
node_counts = {}
zone_counts = {}
for data_id in xrange(DATA_ID_COUNT):
for data_id in range(DATA_ID_COUNT):
for node in ring.get_nodes(data_id):
node_counts[node['id']] = \
node_counts.get(node['id'], 0) + 1

View File

@ -61,7 +61,7 @@ class TestS3ApiMultiDelete(S3ApiBase):
def test_delete_multi_objects(self):
bucket = 'bucket'
put_objects = ['obj%s' % var for var in xrange(4)]
put_objects = ['obj%s' % var for var in range(4)]
self._prepare_test_delete_multi_objects(bucket, put_objects)
query = 'delete'
@ -174,7 +174,7 @@ class TestS3ApiMultiDelete(S3ApiBase):
'max_multi_delete_objects', 1000)
# specified number of objects are over max_multi_delete_objects
# (Default 1000), but xml size is relatively small
req_objects = ['obj%s' for var in xrange(max_deletes + 1)]
req_objects = ['obj%s' for var in range(max_deletes + 1)]
xml = self._gen_multi_delete_xml(req_objects)
content_md5 = calculate_md5(xml)
status, headers, body = \
@ -186,7 +186,7 @@ class TestS3ApiMultiDelete(S3ApiBase):
# specified xml size is large, but number of objects are
# smaller than max_multi_delete_objects.
obj = 'a' * 102400
req_objects = [obj + str(var) for var in xrange(max_deletes - 1)]
req_objects = [obj + str(var) for var in range(max_deletes - 1)]
xml = self._gen_multi_delete_xml(req_objects)
content_md5 = calculate_md5(xml)
status, headers, body = \

View File

@ -67,7 +67,7 @@ class TestS3ApiMultiUpload(S3ApiBase):
self.conn.make_request('PUT', bucket)
query = 'uploads'
for key, key_headers in izip_longest(keys, headers):
for i in xrange(trials):
for i in range(trials):
status, resp_headers, body = \
self.conn.make_request('POST', bucket, key,
headers=key_headers, query=query)
@ -547,7 +547,7 @@ class TestS3ApiMultiUpload(S3ApiBase):
upload_id = elem.find('UploadId').text
etags = []
for i in xrange(1, 3):
for i in range(1, 3):
query = 'partNumber=%s&uploadId=%s' % (i, upload_id)
status, headers, body = \
self.conn.make_request('PUT', bucket, keys[0], query=query)
@ -621,7 +621,7 @@ class TestS3ApiMultiUpload(S3ApiBase):
# multi parts with no body
etags = []
for i in xrange(1, 3):
for i in range(1, 3):
query = 'partNumber=%s&uploadId=%s' % (i, upload_id)
status, headers, body = \
self.conn.make_request('PUT', bucket, key, query=query)
@ -636,7 +636,7 @@ class TestS3ApiMultiUpload(S3ApiBase):
# multi parts with all parts less than min segment size
etags = []
for i in xrange(1, 3):
for i in range(1, 3):
query = 'partNumber=%s&uploadId=%s' % (i, upload_id)
status, headers, body = \
self.conn.make_request('PUT', bucket, key, query=query,
@ -675,7 +675,7 @@ class TestS3ApiMultiUpload(S3ApiBase):
etags = []
body_size = [self.min_segment_size, self.min_segment_size - 1, 2]
for i in xrange(1, 3):
for i in range(1, 3):
query = 'partNumber=%s&uploadId=%s' % (i, upload_id)
status, headers, body = \
self.conn.make_request('PUT', bucket, key, query=query,
@ -699,7 +699,7 @@ class TestS3ApiMultiUpload(S3ApiBase):
etags = []
body_size = [self.min_segment_size, self.min_segment_size, 2]
for i in xrange(1, 3):
for i in range(1, 3):
query = 'partNumber=%s&uploadId=%s' % (i, upload_id)
status, headers, body = \
self.conn.make_request('PUT', bucket, key, query=query,
@ -724,7 +724,7 @@ class TestS3ApiMultiUpload(S3ApiBase):
upload_id = elem.find('UploadId').text
etags = []
for i in xrange(1, 4):
for i in range(1, 4):
query = 'partNumber=%s&uploadId=%s' % (i, upload_id)
status, headers, body = \
self.conn.make_request('PUT', bucket, key,

View File

@ -503,7 +503,7 @@ class TestS3ApiBucket(S3ApiTestCase):
code = self._test_method_error('PUT', '/b', swob.HTTPCreated)
self.assertEqual(code, 'InvalidBucketName')
code = self._test_method_error(
'PUT', '/%s' % ''.join(['b' for x in xrange(64)]),
'PUT', '/%s' % ''.join(['b' for x in range(64)]),
swob.HTTPCreated)
self.assertEqual(code, 'InvalidBucketName')

View File

@ -322,7 +322,7 @@ class TestRequest(S3ApiTestCase):
'swift.common.middleware.s3api.s3request.get_container_info',
return_value={'status': 204}) as mock_info:
# Then all calls goes to get_container_info
for x in xrange(10):
for x in range(10):
info = s3_req.get_container_info(self.swift)
self.assertTrue('status' in info) # sanity
self.assertEqual(204, info['status']) # sanity

View File

@ -5042,7 +5042,7 @@ class TestStatsdLogging(unittest.TestCase):
class UnsafeXrange(object):
"""
Like xrange(limit), but with extra context switching to screw things up.
Like range(limit), but with extra context switching to screw things up.
"""
def __init__(self, upper_bound):
self.current = 0