Replace dict.iteritems() with dict.items()
The iteritems() of Python 2 dictionaries has been renamed to items() on Python 3. According to a discussion on the openstack-dev mailing list, the overhead of creating a temporary list using dict.items() on Python 2 is very low because most dictionaries are small: http://lists.openstack.org/pipermail/openstack-dev/2015-June/066391.html Patch generated by the following command: sed -i 's,iteritems,items,g' \ $(find swift -name "*.py") \ $(find test -name "*.py") Change-Id: I6070bb6c684be76e8e77222a7d280ec6edd43496
This commit is contained in:
parent
5370526b57
commit
e70b66586e
@ -153,7 +153,7 @@ class AccountController(BaseStorageServer):
|
||||
return HTTPConflict(request=req)
|
||||
metadata = {}
|
||||
metadata.update((key, (value, timestamp.internal))
|
||||
for key, value in req.headers.iteritems()
|
||||
for key, value in req.headers.items()
|
||||
if is_sys_or_user_meta('account', key))
|
||||
if metadata:
|
||||
broker.update_metadata(metadata, validate_metadata=True)
|
||||
@ -246,7 +246,7 @@ class AccountController(BaseStorageServer):
|
||||
return self._deleted_response(broker, req, HTTPNotFound)
|
||||
metadata = {}
|
||||
metadata.update((key, (value, req_timestamp.internal))
|
||||
for key, value in req.headers.iteritems()
|
||||
for key, value in req.headers.items()
|
||||
if is_sys_or_user_meta('account', key))
|
||||
if metadata:
|
||||
broker.update_metadata(metadata, validate_metadata=True)
|
||||
|
@ -64,7 +64,7 @@ def get_response_headers(broker):
|
||||
resp_headers[header_name] = value
|
||||
resp_headers.update((key, value)
|
||||
for key, (value, timestamp) in
|
||||
broker.metadata.iteritems() if value != '')
|
||||
broker.metadata.items() if value != '')
|
||||
return resp_headers
|
||||
|
||||
|
||||
|
@ -212,13 +212,13 @@ def print_db_info_metadata(db_type, info, metadata):
|
||||
raise ValueError('Info is incomplete: %s' % e)
|
||||
|
||||
meta_prefix = 'x_' + db_type + '_'
|
||||
for key, value in info.iteritems():
|
||||
for key, value in info.items():
|
||||
if key.lower().startswith(meta_prefix):
|
||||
title = key.replace('_', '-').title()
|
||||
print ' %s: %s' % (title, value)
|
||||
user_metadata = {}
|
||||
sys_metadata = {}
|
||||
for key, (value, timestamp) in metadata.iteritems():
|
||||
for key, (value, timestamp) in metadata.items():
|
||||
if is_user_meta(db_type, key):
|
||||
user_metadata[strip_user_meta_prefix(db_type, key)] = value
|
||||
elif is_sys_meta(db_type, key):
|
||||
@ -284,7 +284,7 @@ def print_obj_metadata(metadata):
|
||||
else:
|
||||
print 'Timestamp: Not found in metadata'
|
||||
|
||||
for key, value in metadata.iteritems():
|
||||
for key, value in metadata.items():
|
||||
if is_user_meta('Object', key):
|
||||
user_metadata[key] = value
|
||||
elif is_sys_meta('Object', key):
|
||||
|
@ -97,7 +97,7 @@ def _find_parts(devs):
|
||||
|
||||
# Sort by number of found replicas to keep the output format
|
||||
sorted_partition_count = sorted(
|
||||
partition_count.iteritems(), key=itemgetter(1), reverse=True)
|
||||
partition_count.items(), key=itemgetter(1), reverse=True)
|
||||
|
||||
return sorted_partition_count
|
||||
|
||||
@ -1189,7 +1189,7 @@ def main(arguments=None):
|
||||
globals()
|
||||
print Commands.default.__doc__.strip()
|
||||
print
|
||||
cmds = [c for c, f in Commands.__dict__.iteritems()
|
||||
cmds = [c for c, f in Commands.__dict__.items()
|
||||
if f.__doc__ and c[0] != '_' and c != 'default']
|
||||
cmds.sort()
|
||||
for cmd in cmds:
|
||||
|
@ -235,7 +235,7 @@ def http_connect_raw(ipaddr, port, method, path, headers=None,
|
||||
conn.path = path
|
||||
conn.putrequest(method, path, skip_host=(headers and 'Host' in headers))
|
||||
if headers:
|
||||
for header, value in headers.iteritems():
|
||||
for header, value in headers.items():
|
||||
conn.putheader(header, str(value))
|
||||
conn.endheaders()
|
||||
return conn
|
||||
|
@ -120,7 +120,7 @@ def check_metadata(req, target_type):
|
||||
prefix = 'x-%s-meta-' % target_type.lower()
|
||||
meta_count = 0
|
||||
meta_size = 0
|
||||
for key, value in req.headers.iteritems():
|
||||
for key, value in req.headers.items():
|
||||
if isinstance(value, basestring) and len(value) > MAX_HEADER_SIZE:
|
||||
return HTTPBadRequest(body='Header value too long: %s' %
|
||||
key[:MAX_META_NAME_LENGTH],
|
||||
|
@ -734,7 +734,7 @@ class DatabaseBroker(object):
|
||||
"""
|
||||
meta_count = 0
|
||||
meta_size = 0
|
||||
for key, (value, timestamp) in metadata.iteritems():
|
||||
for key, (value, timestamp) in metadata.items():
|
||||
key = key.lower()
|
||||
if value != '' and (key.startswith('x-account-meta') or
|
||||
key.startswith('x-container-meta')):
|
||||
@ -762,7 +762,7 @@ class DatabaseBroker(object):
|
||||
"""
|
||||
old_metadata = self.metadata
|
||||
if set(metadata_updates).issubset(set(old_metadata)):
|
||||
for key, (value, timestamp) in metadata_updates.iteritems():
|
||||
for key, (value, timestamp) in metadata_updates.items():
|
||||
if timestamp > old_metadata[key][1]:
|
||||
break
|
||||
else:
|
||||
@ -780,7 +780,7 @@ class DatabaseBroker(object):
|
||||
ALTER TABLE %s_stat
|
||||
ADD COLUMN metadata TEXT DEFAULT '' """ % self.db_type)
|
||||
md = {}
|
||||
for key, value_timestamp in metadata_updates.iteritems():
|
||||
for key, value_timestamp in metadata_updates.items():
|
||||
value, timestamp = value_timestamp
|
||||
if key not in md or timestamp > md[key][1]:
|
||||
md[key] = value_timestamp
|
||||
@ -844,7 +844,7 @@ class DatabaseBroker(object):
|
||||
if md:
|
||||
md = json.loads(md)
|
||||
keys_to_delete = []
|
||||
for key, (value, value_timestamp) in md.iteritems():
|
||||
for key, (value, value_timestamp) in md.items():
|
||||
if value == '' and value_timestamp < timestamp:
|
||||
keys_to_delete.append(key)
|
||||
if keys_to_delete:
|
||||
|
@ -390,7 +390,7 @@ def direct_put_object(node, part, account, container, name, contents,
|
||||
if content_length is not None:
|
||||
headers['Content-Length'] = str(content_length)
|
||||
else:
|
||||
for n, v in headers.iteritems():
|
||||
for n, v in headers.items():
|
||||
if n.lower() == 'content-length':
|
||||
content_length = int(v)
|
||||
if content_type is not None:
|
||||
|
@ -223,7 +223,7 @@ class InternalClient(object):
|
||||
resp = self.make_request('HEAD', path, headers, acceptable_statuses)
|
||||
metadata_prefix = metadata_prefix.lower()
|
||||
metadata = {}
|
||||
for k, v in resp.headers.iteritems():
|
||||
for k, v in resp.headers.items():
|
||||
if k.lower().startswith(metadata_prefix):
|
||||
metadata[k[len(metadata_prefix):].lower()] = v
|
||||
return metadata
|
||||
@ -307,7 +307,7 @@ class InternalClient(object):
|
||||
"""
|
||||
|
||||
headers = {}
|
||||
for k, v in metadata.iteritems():
|
||||
for k, v in metadata.items():
|
||||
if k.lower().startswith(metadata_prefix):
|
||||
headers[k] = v
|
||||
else:
|
||||
|
@ -426,7 +426,7 @@ class MemcacheRing(object):
|
||||
server_key = md5hash(server_key)
|
||||
timeout = sanitize_timeout(time or timeout)
|
||||
msg = ''
|
||||
for key, value in mapping.iteritems():
|
||||
for key, value in mapping.items():
|
||||
key = md5hash(key)
|
||||
flags = 0
|
||||
if serialize and self._allow_pickle:
|
||||
|
@ -161,7 +161,7 @@ def get_tempurl_keys_from_metadata(meta):
|
||||
meta = get_account_info(...)['meta']
|
||||
keys = get_tempurl_keys_from_metadata(meta)
|
||||
"""
|
||||
return [get_valid_utf8_str(value) for key, value in meta.iteritems()
|
||||
return [get_valid_utf8_str(value) for key, value in meta.items()
|
||||
if key.lower() in ('temp-url-key', 'temp-url-key-2')]
|
||||
|
||||
|
||||
|
@ -941,7 +941,7 @@ class RingBuilder(object):
|
||||
reassign_parts.update(spread_out_parts)
|
||||
reassign_parts.update(removed_dev_parts)
|
||||
|
||||
reassign_parts_list = list(reassign_parts.iteritems())
|
||||
reassign_parts_list = list(reassign_parts.items())
|
||||
# We shuffle the partitions to reassign so we get a more even
|
||||
# distribution later. There has been discussion of trying to distribute
|
||||
# partitions more "regularly" because that would actually reduce risk
|
||||
|
@ -875,7 +875,7 @@ class Request(object):
|
||||
elif 'wsgi.input' not in env:
|
||||
env['wsgi.input'] = WsgiStringIO('')
|
||||
req = Request(env)
|
||||
for key, val in headers.iteritems():
|
||||
for key, val in headers.items():
|
||||
req.headers[key] = val
|
||||
for key, val in kwargs.items():
|
||||
prop = getattr(Request, key, None)
|
||||
@ -1141,7 +1141,7 @@ class Response(object):
|
||||
self.headers.update(headers)
|
||||
if self.status_int == 401 and 'www-authenticate' not in self.headers:
|
||||
self.headers.update({'www-authenticate': self.www_authenticate()})
|
||||
for key, value in kw.iteritems():
|
||||
for key, value in kw.items():
|
||||
setattr(self, key, value)
|
||||
# When specifying both 'content_type' and 'charset' in the kwargs,
|
||||
# charset needs to be applied *after* content_type, otherwise charset
|
||||
|
@ -225,7 +225,7 @@ def register_swift_info(name='swift', admin=False, **kwargs):
|
||||
if "." in name:
|
||||
raise ValueError('Cannot use "." in a swift_info key: %s' % name)
|
||||
dict_to_use[name] = {}
|
||||
for key, val in kwargs.iteritems():
|
||||
for key, val in kwargs.items():
|
||||
if "." in key:
|
||||
raise ValueError('Cannot use "." in a swift_info key: %s' % key)
|
||||
dict_to_use[name][key] = val
|
||||
|
@ -580,12 +580,12 @@ class PortPidState(object):
|
||||
:returns: The port the socket is bound to.
|
||||
"""
|
||||
|
||||
for port, sock_data in self.sock_data_by_port.iteritems():
|
||||
for port, sock_data in self.sock_data_by_port.items():
|
||||
if sock_data['sock'] == sock:
|
||||
return port
|
||||
|
||||
def _pid_to_port_and_index(self, pid):
|
||||
for port, sock_data in self.sock_data_by_port.iteritems():
|
||||
for port, sock_data in self.sock_data_by_port.items():
|
||||
for server_idx, a_pid in enumerate(sock_data['pids']):
|
||||
if pid == a_pid:
|
||||
return port, server_idx
|
||||
@ -597,7 +597,7 @@ class PortPidState(object):
|
||||
"""
|
||||
|
||||
current_port_index_pairs = set()
|
||||
for port, pid_state in self.sock_data_by_port.iteritems():
|
||||
for port, pid_state in self.sock_data_by_port.items():
|
||||
current_port_index_pairs |= set(
|
||||
(port, i)
|
||||
for i, pid in enumerate(pid_state['pids'])
|
||||
|
@ -367,7 +367,7 @@ class ContainerController(BaseStorageServer):
|
||||
metadata = {}
|
||||
metadata.update(
|
||||
(key, (value, req_timestamp.internal))
|
||||
for key, value in req.headers.iteritems()
|
||||
for key, value in req.headers.items()
|
||||
if key.lower() in self.save_headers or
|
||||
is_sys_or_user_meta('container', key))
|
||||
if 'X-Container-Sync-To' in metadata:
|
||||
@ -406,7 +406,7 @@ class ContainerController(BaseStorageServer):
|
||||
return HTTPNotFound(request=req, headers=headers)
|
||||
headers.update(
|
||||
(key, value)
|
||||
for key, (value, timestamp) in broker.metadata.iteritems()
|
||||
for key, (value, timestamp) in broker.metadata.items()
|
||||
if value != '' and (key.lower() in self.save_headers or
|
||||
is_sys_or_user_meta('container', key)))
|
||||
headers['Content-Type'] = out_content_type
|
||||
@ -473,7 +473,7 @@ class ContainerController(BaseStorageServer):
|
||||
|
||||
def create_listing(self, req, out_content_type, info, resp_headers,
|
||||
metadata, container_list, container):
|
||||
for key, (value, timestamp) in metadata.iteritems():
|
||||
for key, (value, timestamp) in metadata.items():
|
||||
if value and (key.lower() in self.save_headers or
|
||||
is_sys_or_user_meta('container', key)):
|
||||
resp_headers[key] = value
|
||||
@ -547,7 +547,7 @@ class ContainerController(BaseStorageServer):
|
||||
metadata = {}
|
||||
metadata.update(
|
||||
(key, (value, req_timestamp.internal))
|
||||
for key, value in req.headers.iteritems()
|
||||
for key, value in req.headers.items()
|
||||
if key.lower() in self.save_headers or
|
||||
is_sys_or_user_meta('container', key))
|
||||
if metadata:
|
||||
|
@ -322,7 +322,7 @@ class ContainerSync(Daemon):
|
||||
user_key = None
|
||||
sync_point1 = info['x_container_sync_point1']
|
||||
sync_point2 = info['x_container_sync_point2']
|
||||
for key, (value, timestamp) in broker.metadata.iteritems():
|
||||
for key, (value, timestamp) in broker.metadata.items():
|
||||
if key.lower() == 'x-container-sync-to':
|
||||
sync_to = value
|
||||
elif key.lower() == 'x-container-sync-key':
|
||||
|
@ -121,7 +121,7 @@ class ContainerUpdater(Daemon):
|
||||
begin = time.time()
|
||||
now = time.time()
|
||||
expired_suppressions = \
|
||||
[a for a, u in self.account_suppressions.iteritems()
|
||||
[a for a, u in self.account_suppressions.items()
|
||||
if u < now]
|
||||
for account in expired_suppressions:
|
||||
del self.account_suppressions[account]
|
||||
|
@ -1620,7 +1620,7 @@ class DiskFile(object):
|
||||
if meta_file:
|
||||
self._metadata = self._failsafe_read_metadata(meta_file, meta_file)
|
||||
sys_metadata = dict(
|
||||
[(key, val) for key, val in datafile_metadata.iteritems()
|
||||
[(key, val) for key, val in datafile_metadata.items()
|
||||
if key.lower() in DATAFILE_SYSTEM_META
|
||||
or is_sys_meta('object', key)])
|
||||
self._metadata.update(sys_metadata)
|
||||
|
@ -419,7 +419,7 @@ class ObjectReconstructor(Daemon):
|
||||
:returns: a list of strings, the suffix dirs to sync
|
||||
"""
|
||||
suffixes = []
|
||||
for suffix, sub_dict_local in local_suff.iteritems():
|
||||
for suffix, sub_dict_local in local_suff.items():
|
||||
sub_dict_remote = remote_suff.get(suffix, {})
|
||||
if (sub_dict_local.get(None) != sub_dict_remote.get(None) or
|
||||
sub_dict_local.get(local_index) !=
|
||||
|
@ -273,7 +273,7 @@ class ObjectReplicator(Daemon):
|
||||
synced_remote_regions[node['region']] = \
|
||||
candidates.keys()
|
||||
responses.append(success)
|
||||
for region, cand_objs in synced_remote_regions.iteritems():
|
||||
for region, cand_objs in synced_remote_regions.items():
|
||||
if delete_objs is None:
|
||||
delete_objs = cand_objs
|
||||
else:
|
||||
|
@ -411,7 +411,7 @@ class ObjectController(BaseStorageServer):
|
||||
raise HTTPBadRequest("invalid JSON for footer doc")
|
||||
|
||||
def _check_container_override(self, update_headers, metadata):
|
||||
for key, val in metadata.iteritems():
|
||||
for key, val in metadata.items():
|
||||
override_prefix = 'x-backend-container-update-override-'
|
||||
if key.lower().startswith(override_prefix):
|
||||
override = key.lower().replace(override_prefix, 'x-')
|
||||
@ -446,7 +446,7 @@ class ObjectController(BaseStorageServer):
|
||||
request=request,
|
||||
headers={'X-Backend-Timestamp': orig_timestamp.internal})
|
||||
metadata = {'X-Timestamp': req_timestamp.internal}
|
||||
metadata.update(val for val in request.headers.iteritems()
|
||||
metadata.update(val for val in request.headers.items()
|
||||
if is_user_meta('object', val[0]))
|
||||
for header_key in self.allowed_headers:
|
||||
if header_key in request.headers:
|
||||
@ -614,9 +614,9 @@ class ObjectController(BaseStorageServer):
|
||||
'ETag': etag,
|
||||
'Content-Length': str(upload_size),
|
||||
}
|
||||
metadata.update(val for val in request.headers.iteritems()
|
||||
metadata.update(val for val in request.headers.items()
|
||||
if is_sys_or_user_meta('object', val[0]))
|
||||
metadata.update(val for val in footer_meta.iteritems()
|
||||
metadata.update(val for val in footer_meta.items()
|
||||
if is_sys_or_user_meta('object', val[0]))
|
||||
headers_to_copy = (
|
||||
request.headers.get(
|
||||
@ -712,7 +712,7 @@ class ObjectController(BaseStorageServer):
|
||||
conditional_etag=conditional_etag)
|
||||
response.headers['Content-Type'] = metadata.get(
|
||||
'Content-Type', 'application/octet-stream')
|
||||
for key, value in metadata.iteritems():
|
||||
for key, value in metadata.items():
|
||||
if is_sys_or_user_meta('object', key) or \
|
||||
key.lower() in self.allowed_headers:
|
||||
response.headers[key] = value
|
||||
@ -767,7 +767,7 @@ class ObjectController(BaseStorageServer):
|
||||
conditional_etag=conditional_etag)
|
||||
response.headers['Content-Type'] = metadata.get(
|
||||
'Content-Type', 'application/octet-stream')
|
||||
for key, value in metadata.iteritems():
|
||||
for key, value in metadata.items():
|
||||
if is_sys_or_user_meta('object', key) or \
|
||||
key.lower() in self.allowed_headers:
|
||||
response.headers[key] = value
|
||||
|
@ -332,7 +332,7 @@ class Sender(object):
|
||||
"""
|
||||
msg = ['PUT ' + url_path, 'Content-Length: ' + str(df.content_length)]
|
||||
# Sorted to make it easier to test.
|
||||
for key, value in sorted(df.get_metadata().iteritems()):
|
||||
for key, value in sorted(df.get_metadata().items()):
|
||||
if key not in ('name', 'Content-Length'):
|
||||
msg.append('%s: %s' % (key, value))
|
||||
msg = '\r\n'.join(msg) + '\r\n\r\n'
|
||||
|
@ -122,7 +122,7 @@ def _prep_headers_to_info(headers, server_type):
|
||||
meta = {}
|
||||
sysmeta = {}
|
||||
other = {}
|
||||
for key, val in dict(headers).iteritems():
|
||||
for key, val in dict(headers).items():
|
||||
lkey = key.lower()
|
||||
if is_user_meta(server_type, lkey):
|
||||
meta[strip_user_meta_prefix(server_type, lkey)] = val
|
||||
@ -1166,7 +1166,7 @@ class Controller(object):
|
||||
k.lower() in self._x_remove_headers())
|
||||
|
||||
dst_headers.update((k.lower(), v)
|
||||
for k, v in src_headers.iteritems()
|
||||
for k, v in src_headers.items()
|
||||
if k.lower() in self.pass_through_headers or
|
||||
is_sys_or_user_meta(st, k))
|
||||
|
||||
@ -1488,7 +1488,7 @@ class Controller(object):
|
||||
# transfer any x-account-sysmeta headers from original request
|
||||
# to the autocreate PUT
|
||||
headers.update((k, v)
|
||||
for k, v in req.headers.iteritems()
|
||||
for k, v in req.headers.items()
|
||||
if is_sys_meta('account', k))
|
||||
resp = self.make_requests(Request.blank('/v1' + path),
|
||||
self.app.account_ring, partition, 'PUT',
|
||||
|
@ -334,7 +334,7 @@ class Connection(object):
|
||||
port=self.storage_port)
|
||||
#self.connection.set_debuglevel(3)
|
||||
self.connection.putrequest('PUT', path)
|
||||
for key, value in headers.iteritems():
|
||||
for key, value in headers.items():
|
||||
self.connection.putheader(key, value)
|
||||
self.connection.endheaders()
|
||||
|
||||
|
@ -130,13 +130,13 @@ def kill_server(ipport, ipport2server, pids):
|
||||
|
||||
def kill_nonprimary_server(primary_nodes, ipport2server, pids):
|
||||
primary_ipports = [(n['ip'], n['port']) for n in primary_nodes]
|
||||
for ipport, server in ipport2server.iteritems():
|
||||
for ipport, server in ipport2server.items():
|
||||
if ipport in primary_ipports:
|
||||
server_type = server[:-1]
|
||||
break
|
||||
else:
|
||||
raise Exception('Cannot figure out server type for %r' % primary_nodes)
|
||||
for ipport, server in list(ipport2server.iteritems()):
|
||||
for ipport, server in list(ipport2server.items()):
|
||||
if server[:-1] == server_type and ipport not in primary_ipports:
|
||||
kill_server(ipport, ipport2server, pids)
|
||||
return ipport
|
||||
@ -182,7 +182,7 @@ def get_ring(ring_name, required_replicas, required_devices,
|
||||
|
||||
repl_name = '%s-replicator' % server
|
||||
repl_configs = {i: readconf(c, section_name=repl_name)
|
||||
for i, c in config_paths[repl_name].iteritems()}
|
||||
for i, c in config_paths[repl_name].items()}
|
||||
servers_per_port = any(int(c.get('servers_per_port', '0'))
|
||||
for c in repl_configs.values())
|
||||
|
||||
|
@ -98,7 +98,7 @@ def build_dir_tree(start_path, tree_obj):
|
||||
for obj in tree_obj:
|
||||
build_dir_tree(start_path, obj)
|
||||
if isinstance(tree_obj, dict):
|
||||
for dir_name, obj in tree_obj.iteritems():
|
||||
for dir_name, obj in tree_obj.items():
|
||||
dir_path = os.path.join(start_path, dir_name)
|
||||
os.mkdir(dir_path)
|
||||
build_dir_tree(dir_path, obj)
|
||||
@ -115,7 +115,7 @@ def build_tar_tree(tar, start_path, tree_obj, base_path=''):
|
||||
for obj in tree_obj:
|
||||
build_tar_tree(tar, start_path, obj, base_path=base_path)
|
||||
if isinstance(tree_obj, dict):
|
||||
for dir_name, obj in tree_obj.iteritems():
|
||||
for dir_name, obj in tree_obj.items():
|
||||
dir_path = os.path.join(start_path, dir_name)
|
||||
tar_info = tarfile.TarInfo(dir_path[len(base_path):])
|
||||
tar_info.type = tarfile.DIRTYPE
|
||||
|
@ -187,7 +187,7 @@ class TestProxyLogging(unittest.TestCase):
|
||||
'/v1/a/c/o/p/p2': 'object',
|
||||
}
|
||||
with mock.patch("time.time", stub_time):
|
||||
for path, exp_type in path_types.iteritems():
|
||||
for path, exp_type in path_types.items():
|
||||
# GET
|
||||
app = proxy_logging.ProxyLoggingMiddleware(
|
||||
FakeApp(body='7654321', response_str='321 Fubar'), {})
|
||||
@ -257,7 +257,7 @@ class TestProxyLogging(unittest.TestCase):
|
||||
'DELETE': 'DELETE',
|
||||
'OPTIONS': 'OPTIONS',
|
||||
}
|
||||
for method, exp_method in method_map.iteritems():
|
||||
for method, exp_method in method_map.items():
|
||||
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
|
||||
app.access_logger = FakeLogger()
|
||||
req = Request.blank('/v1/a/', environ={'REQUEST_METHOD': method})
|
||||
@ -281,7 +281,7 @@ class TestProxyLogging(unittest.TestCase):
|
||||
# this conf var supports optional leading access_
|
||||
for conf_key in ['access_log_statsd_valid_http_methods',
|
||||
'log_statsd_valid_http_methods']:
|
||||
for method, exp_method in method_map.iteritems():
|
||||
for method, exp_method in method_map.items():
|
||||
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {
|
||||
conf_key: 'SPECIAL, GET,PUT ', # crazy spaces ok
|
||||
})
|
||||
|
@ -994,7 +994,7 @@ class TestTempURL(unittest.TestCase):
|
||||
hdrs = HeaderKeyDict(tempurl.TempURL(
|
||||
None,
|
||||
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
|
||||
)._clean_outgoing_headers(hdrs.iteritems()))
|
||||
)._clean_outgoing_headers(hdrs.items()))
|
||||
self.assertTrue('test-header' in hdrs)
|
||||
|
||||
orh = 'test-header'
|
||||
@ -1003,7 +1003,7 @@ class TestTempURL(unittest.TestCase):
|
||||
hdrs = HeaderKeyDict(tempurl.TempURL(
|
||||
None,
|
||||
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
|
||||
)._clean_outgoing_headers(hdrs.iteritems()))
|
||||
)._clean_outgoing_headers(hdrs.items()))
|
||||
self.assertTrue('test-header' not in hdrs)
|
||||
|
||||
orh = 'test-header-*'
|
||||
@ -1013,7 +1013,7 @@ class TestTempURL(unittest.TestCase):
|
||||
hdrs = HeaderKeyDict(tempurl.TempURL(
|
||||
None,
|
||||
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
|
||||
)._clean_outgoing_headers(hdrs.iteritems()))
|
||||
)._clean_outgoing_headers(hdrs.items()))
|
||||
self.assertTrue('test-header-one' not in hdrs)
|
||||
self.assertTrue('test-header-two' not in hdrs)
|
||||
|
||||
@ -1024,7 +1024,7 @@ class TestTempURL(unittest.TestCase):
|
||||
hdrs = HeaderKeyDict(tempurl.TempURL(
|
||||
None,
|
||||
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
|
||||
)._clean_outgoing_headers(hdrs.iteritems()))
|
||||
)._clean_outgoing_headers(hdrs.items()))
|
||||
self.assertTrue('test-header-one' not in hdrs)
|
||||
self.assertTrue('test-header-two' in hdrs)
|
||||
|
||||
@ -1038,7 +1038,7 @@ class TestTempURL(unittest.TestCase):
|
||||
hdrs = HeaderKeyDict(tempurl.TempURL(
|
||||
None,
|
||||
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
|
||||
)._clean_outgoing_headers(hdrs.iteritems()))
|
||||
)._clean_outgoing_headers(hdrs.items()))
|
||||
self.assertTrue('test-header-one' not in hdrs)
|
||||
self.assertTrue('test-header-two' in hdrs)
|
||||
self.assertTrue('test-other-header' not in hdrs)
|
||||
|
@ -425,7 +425,7 @@ class TestRingBuilder(unittest.TestCase):
|
||||
raise AssertionError(
|
||||
"Partition %d not evenly distributed (got %r)" %
|
||||
(part, counts['zone']))
|
||||
for dev_id, replica_count in counts['dev_id'].iteritems():
|
||||
for dev_id, replica_count in counts['dev_id'].items():
|
||||
if replica_count > 1:
|
||||
raise AssertionError(
|
||||
"Partition %d is on device %d more than once (%r)" %
|
||||
@ -462,7 +462,7 @@ class TestRingBuilder(unittest.TestCase):
|
||||
raise AssertionError(
|
||||
"Partition %d not evenly distributed (got %r)" %
|
||||
(part, counts['zone']))
|
||||
for dev_id, replica_count in counts['dev_id'].iteritems():
|
||||
for dev_id, replica_count in counts['dev_id'].items():
|
||||
if replica_count != 1:
|
||||
raise AssertionError(
|
||||
"Partition %d is on device %d %d times, not 1 (%r)" %
|
||||
@ -497,12 +497,12 @@ class TestRingBuilder(unittest.TestCase):
|
||||
counts['dev_id'][dev['id']] += 1
|
||||
|
||||
self.assertEquals(8, sum(counts['zone'].values()))
|
||||
for zone, replica_count in counts['zone'].iteritems():
|
||||
for zone, replica_count in counts['zone'].items():
|
||||
if replica_count not in (2, 3):
|
||||
raise AssertionError(
|
||||
"Partition %d not evenly distributed (got %r)" %
|
||||
(part, counts['zone']))
|
||||
for dev_id, replica_count in counts['dev_id'].iteritems():
|
||||
for dev_id, replica_count in counts['dev_id'].items():
|
||||
if replica_count not in (1, 2):
|
||||
raise AssertionError(
|
||||
"Partition %d is on device %d %d times, "
|
||||
|
@ -2382,7 +2382,7 @@ class TestContainerController(unittest.TestCase):
|
||||
'headers': headers, 'query_string': query_string}
|
||||
|
||||
http_connect_args.append(
|
||||
dict((k, v) for k, v in captured_args.iteritems()
|
||||
dict((k, v) for k, v in captured_args.items()
|
||||
if v is not None))
|
||||
|
||||
req = Request.blank(
|
||||
|
@ -2998,7 +2998,7 @@ class TestObjectController(unittest.TestCase):
|
||||
'headers': headers, 'query_string': query_string}
|
||||
|
||||
http_connect_args.append(
|
||||
dict((k, v) for k, v in captured_args.iteritems()
|
||||
dict((k, v) for k, v in captured_args.items()
|
||||
if v is not None))
|
||||
|
||||
return SuccessfulFakeConn()
|
||||
@ -3116,7 +3116,7 @@ class TestObjectController(unittest.TestCase):
|
||||
'headers': headers, 'query_string': query_string}
|
||||
|
||||
http_connect_args.append(
|
||||
dict((k, v) for k, v in captured_args.iteritems()
|
||||
dict((k, v) for k, v in captured_args.items()
|
||||
if v is not None))
|
||||
|
||||
return SuccessfulFakeConn()
|
||||
|
@ -1371,7 +1371,7 @@ class TestBaseSsync(BaseTestSender):
|
||||
# sanity check, they are not the same ondisk files!
|
||||
self.assertNotEqual(tx_df._datadir, rx_df._datadir)
|
||||
rx_metadata = dict(rx_df.get_metadata())
|
||||
for k, v in tx_df.get_metadata().iteritems():
|
||||
for k, v in tx_df.get_metadata().items():
|
||||
if k == 'X-Object-Sysmeta-Ec-Frag-Index':
|
||||
# if tx_df had a frag_index then rx_df should also have one
|
||||
self.assertTrue(k in rx_metadata)
|
||||
@ -1481,7 +1481,7 @@ class TestBaseSsync(BaseTestSender):
|
||||
have been used as a source for sync'ing
|
||||
:param rx_frag_index: the fragment index of expected rx diskfiles
|
||||
"""
|
||||
for o_name, diskfiles in tx_objs.iteritems():
|
||||
for o_name, diskfiles in tx_objs.items():
|
||||
for tx_df in diskfiles:
|
||||
if tx_frag_index is None or tx_df._frag_index == tx_frag_index:
|
||||
# this diskfile should have been sync'd,
|
||||
@ -1503,7 +1503,7 @@ class TestBaseSsync(BaseTestSender):
|
||||
|
||||
def _verify_tombstones(self, tx_objs, policy):
|
||||
# verify tx and rx tombstones that should be in sync
|
||||
for o_name, diskfiles in tx_objs.iteritems():
|
||||
for o_name, diskfiles in tx_objs.items():
|
||||
for tx_df_ in diskfiles:
|
||||
try:
|
||||
self._open_tx_diskfile(o_name, policy)
|
||||
@ -1703,7 +1703,7 @@ class TestSsyncEC(TestBaseSsync):
|
||||
failed_path = reconstruct_fa_calls[1][3]['name']
|
||||
expect_sync_paths.remove(failed_path)
|
||||
failed_obj = None
|
||||
for obj, diskfiles in tx_objs.iteritems():
|
||||
for obj, diskfiles in tx_objs.items():
|
||||
if diskfiles[0]._name == failed_path:
|
||||
failed_obj = obj
|
||||
# sanity check
|
||||
|
@ -157,7 +157,7 @@ class TestObjectUpdater(unittest.TestCase):
|
||||
}
|
||||
|
||||
expected = set()
|
||||
for o, timestamps in objects.iteritems():
|
||||
for o, timestamps in objects.items():
|
||||
ohash = hash_path('account', 'container', o)
|
||||
for t in timestamps:
|
||||
o_path = os.path.join(prefix_dir, ohash + '-' +
|
||||
|
@ -633,7 +633,7 @@ class TestFuncs(unittest.TestCase):
|
||||
expected_headers = {'x-base-meta-owner': '',
|
||||
'x-base-meta-size': '151M',
|
||||
'connection': 'close'}
|
||||
for k, v in expected_headers.iteritems():
|
||||
for k, v in expected_headers.items():
|
||||
self.assertTrue(k in dst_headers)
|
||||
self.assertEqual(v, dst_headers[k])
|
||||
self.assertFalse('new-owner' in dst_headers)
|
||||
@ -647,10 +647,10 @@ class TestFuncs(unittest.TestCase):
|
||||
hdrs.update(bad_hdrs)
|
||||
req = Request.blank('/v1/a/c/o', headers=hdrs)
|
||||
dst_headers = base.generate_request_headers(req, transfer=True)
|
||||
for k, v in good_hdrs.iteritems():
|
||||
for k, v in good_hdrs.items():
|
||||
self.assertTrue(k.lower() in dst_headers)
|
||||
self.assertEqual(v, dst_headers[k.lower()])
|
||||
for k, v in bad_hdrs.iteritems():
|
||||
for k, v in bad_hdrs.items():
|
||||
self.assertFalse(k.lower() in dst_headers)
|
||||
|
||||
def test_client_chunk_size(self):
|
||||
|
@ -7682,7 +7682,7 @@ class TestContainerController(unittest.TestCase):
|
||||
find_header = \
|
||||
find_header.lower().replace('-remove', '', 1)
|
||||
find_value = ''
|
||||
for k, v in headers.iteritems():
|
||||
for k, v in headers.items():
|
||||
if k.lower() == find_header.lower() and \
|
||||
v == find_value:
|
||||
break
|
||||
@ -8636,7 +8636,7 @@ class TestAccountController(unittest.TestCase):
|
||||
find_header = \
|
||||
find_header.lower().replace('-remove', '', 1)
|
||||
find_value = ''
|
||||
for k, v in headers.iteritems():
|
||||
for k, v in headers.items():
|
||||
if k.lower() == find_header.lower() and \
|
||||
v == find_value:
|
||||
break
|
||||
|
@ -113,7 +113,7 @@ class TestObjectSysmeta(unittest.TestCase):
|
||||
% (expected, resp.status))
|
||||
|
||||
def _assertInHeaders(self, resp, expected):
|
||||
for key, val in expected.iteritems():
|
||||
for key, val in expected.items():
|
||||
self.assertTrue(key in resp.headers,
|
||||
'Header %s missing from %s' % (key, resp.headers))
|
||||
self.assertEqual(val, resp.headers[key],
|
||||
@ -121,7 +121,7 @@ class TestObjectSysmeta(unittest.TestCase):
|
||||
% (key, val, key, resp.headers[key]))
|
||||
|
||||
def _assertNotInHeaders(self, resp, unexpected):
|
||||
for key, val in unexpected.iteritems():
|
||||
for key, val in unexpected.items():
|
||||
self.assertFalse(key in resp.headers,
|
||||
'Header %s not expected in %s'
|
||||
% (key, resp.headers))
|
||||
|
@ -57,7 +57,7 @@ class TestTranslations(unittest.TestCase):
|
||||
threading._DummyThread._Thread__stop = lambda x: 42
|
||||
|
||||
def tearDown(self):
|
||||
for var, val in self.orig_env.iteritems():
|
||||
for var, val in self.orig_env.items():
|
||||
if val is not None:
|
||||
os.environ[var] = val
|
||||
else:
|
||||
|
Loading…
Reference in New Issue
Block a user