Replace dict.iteritems() with dict.items()
The iteritems() of Python 2 dictionaries has been renamed to items() on Python 3. According to a discussion on the openstack-dev mailing list, the overhead of creating a temporary list using dict.items() on Python 2 is very low because most dictionaries are small: http://lists.openstack.org/pipermail/openstack-dev/2015-June/066391.html Patch generated by the following command: sed -i 's,iteritems,items,g' \ $(find swift -name "*.py") \ $(find test -name "*.py") Change-Id: I6070bb6c684be76e8e77222a7d280ec6edd43496
This commit is contained in:
parent
5370526b57
commit
e70b66586e
@ -153,7 +153,7 @@ class AccountController(BaseStorageServer):
|
|||||||
return HTTPConflict(request=req)
|
return HTTPConflict(request=req)
|
||||||
metadata = {}
|
metadata = {}
|
||||||
metadata.update((key, (value, timestamp.internal))
|
metadata.update((key, (value, timestamp.internal))
|
||||||
for key, value in req.headers.iteritems()
|
for key, value in req.headers.items()
|
||||||
if is_sys_or_user_meta('account', key))
|
if is_sys_or_user_meta('account', key))
|
||||||
if metadata:
|
if metadata:
|
||||||
broker.update_metadata(metadata, validate_metadata=True)
|
broker.update_metadata(metadata, validate_metadata=True)
|
||||||
@ -246,7 +246,7 @@ class AccountController(BaseStorageServer):
|
|||||||
return self._deleted_response(broker, req, HTTPNotFound)
|
return self._deleted_response(broker, req, HTTPNotFound)
|
||||||
metadata = {}
|
metadata = {}
|
||||||
metadata.update((key, (value, req_timestamp.internal))
|
metadata.update((key, (value, req_timestamp.internal))
|
||||||
for key, value in req.headers.iteritems()
|
for key, value in req.headers.items()
|
||||||
if is_sys_or_user_meta('account', key))
|
if is_sys_or_user_meta('account', key))
|
||||||
if metadata:
|
if metadata:
|
||||||
broker.update_metadata(metadata, validate_metadata=True)
|
broker.update_metadata(metadata, validate_metadata=True)
|
||||||
|
@ -64,7 +64,7 @@ def get_response_headers(broker):
|
|||||||
resp_headers[header_name] = value
|
resp_headers[header_name] = value
|
||||||
resp_headers.update((key, value)
|
resp_headers.update((key, value)
|
||||||
for key, (value, timestamp) in
|
for key, (value, timestamp) in
|
||||||
broker.metadata.iteritems() if value != '')
|
broker.metadata.items() if value != '')
|
||||||
return resp_headers
|
return resp_headers
|
||||||
|
|
||||||
|
|
||||||
|
@ -212,13 +212,13 @@ def print_db_info_metadata(db_type, info, metadata):
|
|||||||
raise ValueError('Info is incomplete: %s' % e)
|
raise ValueError('Info is incomplete: %s' % e)
|
||||||
|
|
||||||
meta_prefix = 'x_' + db_type + '_'
|
meta_prefix = 'x_' + db_type + '_'
|
||||||
for key, value in info.iteritems():
|
for key, value in info.items():
|
||||||
if key.lower().startswith(meta_prefix):
|
if key.lower().startswith(meta_prefix):
|
||||||
title = key.replace('_', '-').title()
|
title = key.replace('_', '-').title()
|
||||||
print ' %s: %s' % (title, value)
|
print ' %s: %s' % (title, value)
|
||||||
user_metadata = {}
|
user_metadata = {}
|
||||||
sys_metadata = {}
|
sys_metadata = {}
|
||||||
for key, (value, timestamp) in metadata.iteritems():
|
for key, (value, timestamp) in metadata.items():
|
||||||
if is_user_meta(db_type, key):
|
if is_user_meta(db_type, key):
|
||||||
user_metadata[strip_user_meta_prefix(db_type, key)] = value
|
user_metadata[strip_user_meta_prefix(db_type, key)] = value
|
||||||
elif is_sys_meta(db_type, key):
|
elif is_sys_meta(db_type, key):
|
||||||
@ -284,7 +284,7 @@ def print_obj_metadata(metadata):
|
|||||||
else:
|
else:
|
||||||
print 'Timestamp: Not found in metadata'
|
print 'Timestamp: Not found in metadata'
|
||||||
|
|
||||||
for key, value in metadata.iteritems():
|
for key, value in metadata.items():
|
||||||
if is_user_meta('Object', key):
|
if is_user_meta('Object', key):
|
||||||
user_metadata[key] = value
|
user_metadata[key] = value
|
||||||
elif is_sys_meta('Object', key):
|
elif is_sys_meta('Object', key):
|
||||||
|
@ -97,7 +97,7 @@ def _find_parts(devs):
|
|||||||
|
|
||||||
# Sort by number of found replicas to keep the output format
|
# Sort by number of found replicas to keep the output format
|
||||||
sorted_partition_count = sorted(
|
sorted_partition_count = sorted(
|
||||||
partition_count.iteritems(), key=itemgetter(1), reverse=True)
|
partition_count.items(), key=itemgetter(1), reverse=True)
|
||||||
|
|
||||||
return sorted_partition_count
|
return sorted_partition_count
|
||||||
|
|
||||||
@ -1189,7 +1189,7 @@ def main(arguments=None):
|
|||||||
globals()
|
globals()
|
||||||
print Commands.default.__doc__.strip()
|
print Commands.default.__doc__.strip()
|
||||||
print
|
print
|
||||||
cmds = [c for c, f in Commands.__dict__.iteritems()
|
cmds = [c for c, f in Commands.__dict__.items()
|
||||||
if f.__doc__ and c[0] != '_' and c != 'default']
|
if f.__doc__ and c[0] != '_' and c != 'default']
|
||||||
cmds.sort()
|
cmds.sort()
|
||||||
for cmd in cmds:
|
for cmd in cmds:
|
||||||
|
@ -235,7 +235,7 @@ def http_connect_raw(ipaddr, port, method, path, headers=None,
|
|||||||
conn.path = path
|
conn.path = path
|
||||||
conn.putrequest(method, path, skip_host=(headers and 'Host' in headers))
|
conn.putrequest(method, path, skip_host=(headers and 'Host' in headers))
|
||||||
if headers:
|
if headers:
|
||||||
for header, value in headers.iteritems():
|
for header, value in headers.items():
|
||||||
conn.putheader(header, str(value))
|
conn.putheader(header, str(value))
|
||||||
conn.endheaders()
|
conn.endheaders()
|
||||||
return conn
|
return conn
|
||||||
|
@ -120,7 +120,7 @@ def check_metadata(req, target_type):
|
|||||||
prefix = 'x-%s-meta-' % target_type.lower()
|
prefix = 'x-%s-meta-' % target_type.lower()
|
||||||
meta_count = 0
|
meta_count = 0
|
||||||
meta_size = 0
|
meta_size = 0
|
||||||
for key, value in req.headers.iteritems():
|
for key, value in req.headers.items():
|
||||||
if isinstance(value, basestring) and len(value) > MAX_HEADER_SIZE:
|
if isinstance(value, basestring) and len(value) > MAX_HEADER_SIZE:
|
||||||
return HTTPBadRequest(body='Header value too long: %s' %
|
return HTTPBadRequest(body='Header value too long: %s' %
|
||||||
key[:MAX_META_NAME_LENGTH],
|
key[:MAX_META_NAME_LENGTH],
|
||||||
|
@ -734,7 +734,7 @@ class DatabaseBroker(object):
|
|||||||
"""
|
"""
|
||||||
meta_count = 0
|
meta_count = 0
|
||||||
meta_size = 0
|
meta_size = 0
|
||||||
for key, (value, timestamp) in metadata.iteritems():
|
for key, (value, timestamp) in metadata.items():
|
||||||
key = key.lower()
|
key = key.lower()
|
||||||
if value != '' and (key.startswith('x-account-meta') or
|
if value != '' and (key.startswith('x-account-meta') or
|
||||||
key.startswith('x-container-meta')):
|
key.startswith('x-container-meta')):
|
||||||
@ -762,7 +762,7 @@ class DatabaseBroker(object):
|
|||||||
"""
|
"""
|
||||||
old_metadata = self.metadata
|
old_metadata = self.metadata
|
||||||
if set(metadata_updates).issubset(set(old_metadata)):
|
if set(metadata_updates).issubset(set(old_metadata)):
|
||||||
for key, (value, timestamp) in metadata_updates.iteritems():
|
for key, (value, timestamp) in metadata_updates.items():
|
||||||
if timestamp > old_metadata[key][1]:
|
if timestamp > old_metadata[key][1]:
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
@ -780,7 +780,7 @@ class DatabaseBroker(object):
|
|||||||
ALTER TABLE %s_stat
|
ALTER TABLE %s_stat
|
||||||
ADD COLUMN metadata TEXT DEFAULT '' """ % self.db_type)
|
ADD COLUMN metadata TEXT DEFAULT '' """ % self.db_type)
|
||||||
md = {}
|
md = {}
|
||||||
for key, value_timestamp in metadata_updates.iteritems():
|
for key, value_timestamp in metadata_updates.items():
|
||||||
value, timestamp = value_timestamp
|
value, timestamp = value_timestamp
|
||||||
if key not in md or timestamp > md[key][1]:
|
if key not in md or timestamp > md[key][1]:
|
||||||
md[key] = value_timestamp
|
md[key] = value_timestamp
|
||||||
@ -844,7 +844,7 @@ class DatabaseBroker(object):
|
|||||||
if md:
|
if md:
|
||||||
md = json.loads(md)
|
md = json.loads(md)
|
||||||
keys_to_delete = []
|
keys_to_delete = []
|
||||||
for key, (value, value_timestamp) in md.iteritems():
|
for key, (value, value_timestamp) in md.items():
|
||||||
if value == '' and value_timestamp < timestamp:
|
if value == '' and value_timestamp < timestamp:
|
||||||
keys_to_delete.append(key)
|
keys_to_delete.append(key)
|
||||||
if keys_to_delete:
|
if keys_to_delete:
|
||||||
|
@ -390,7 +390,7 @@ def direct_put_object(node, part, account, container, name, contents,
|
|||||||
if content_length is not None:
|
if content_length is not None:
|
||||||
headers['Content-Length'] = str(content_length)
|
headers['Content-Length'] = str(content_length)
|
||||||
else:
|
else:
|
||||||
for n, v in headers.iteritems():
|
for n, v in headers.items():
|
||||||
if n.lower() == 'content-length':
|
if n.lower() == 'content-length':
|
||||||
content_length = int(v)
|
content_length = int(v)
|
||||||
if content_type is not None:
|
if content_type is not None:
|
||||||
|
@ -223,7 +223,7 @@ class InternalClient(object):
|
|||||||
resp = self.make_request('HEAD', path, headers, acceptable_statuses)
|
resp = self.make_request('HEAD', path, headers, acceptable_statuses)
|
||||||
metadata_prefix = metadata_prefix.lower()
|
metadata_prefix = metadata_prefix.lower()
|
||||||
metadata = {}
|
metadata = {}
|
||||||
for k, v in resp.headers.iteritems():
|
for k, v in resp.headers.items():
|
||||||
if k.lower().startswith(metadata_prefix):
|
if k.lower().startswith(metadata_prefix):
|
||||||
metadata[k[len(metadata_prefix):].lower()] = v
|
metadata[k[len(metadata_prefix):].lower()] = v
|
||||||
return metadata
|
return metadata
|
||||||
@ -307,7 +307,7 @@ class InternalClient(object):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
headers = {}
|
headers = {}
|
||||||
for k, v in metadata.iteritems():
|
for k, v in metadata.items():
|
||||||
if k.lower().startswith(metadata_prefix):
|
if k.lower().startswith(metadata_prefix):
|
||||||
headers[k] = v
|
headers[k] = v
|
||||||
else:
|
else:
|
||||||
|
@ -426,7 +426,7 @@ class MemcacheRing(object):
|
|||||||
server_key = md5hash(server_key)
|
server_key = md5hash(server_key)
|
||||||
timeout = sanitize_timeout(time or timeout)
|
timeout = sanitize_timeout(time or timeout)
|
||||||
msg = ''
|
msg = ''
|
||||||
for key, value in mapping.iteritems():
|
for key, value in mapping.items():
|
||||||
key = md5hash(key)
|
key = md5hash(key)
|
||||||
flags = 0
|
flags = 0
|
||||||
if serialize and self._allow_pickle:
|
if serialize and self._allow_pickle:
|
||||||
|
@ -161,7 +161,7 @@ def get_tempurl_keys_from_metadata(meta):
|
|||||||
meta = get_account_info(...)['meta']
|
meta = get_account_info(...)['meta']
|
||||||
keys = get_tempurl_keys_from_metadata(meta)
|
keys = get_tempurl_keys_from_metadata(meta)
|
||||||
"""
|
"""
|
||||||
return [get_valid_utf8_str(value) for key, value in meta.iteritems()
|
return [get_valid_utf8_str(value) for key, value in meta.items()
|
||||||
if key.lower() in ('temp-url-key', 'temp-url-key-2')]
|
if key.lower() in ('temp-url-key', 'temp-url-key-2')]
|
||||||
|
|
||||||
|
|
||||||
|
@ -941,7 +941,7 @@ class RingBuilder(object):
|
|||||||
reassign_parts.update(spread_out_parts)
|
reassign_parts.update(spread_out_parts)
|
||||||
reassign_parts.update(removed_dev_parts)
|
reassign_parts.update(removed_dev_parts)
|
||||||
|
|
||||||
reassign_parts_list = list(reassign_parts.iteritems())
|
reassign_parts_list = list(reassign_parts.items())
|
||||||
# We shuffle the partitions to reassign so we get a more even
|
# We shuffle the partitions to reassign so we get a more even
|
||||||
# distribution later. There has been discussion of trying to distribute
|
# distribution later. There has been discussion of trying to distribute
|
||||||
# partitions more "regularly" because that would actually reduce risk
|
# partitions more "regularly" because that would actually reduce risk
|
||||||
|
@ -875,7 +875,7 @@ class Request(object):
|
|||||||
elif 'wsgi.input' not in env:
|
elif 'wsgi.input' not in env:
|
||||||
env['wsgi.input'] = WsgiStringIO('')
|
env['wsgi.input'] = WsgiStringIO('')
|
||||||
req = Request(env)
|
req = Request(env)
|
||||||
for key, val in headers.iteritems():
|
for key, val in headers.items():
|
||||||
req.headers[key] = val
|
req.headers[key] = val
|
||||||
for key, val in kwargs.items():
|
for key, val in kwargs.items():
|
||||||
prop = getattr(Request, key, None)
|
prop = getattr(Request, key, None)
|
||||||
@ -1141,7 +1141,7 @@ class Response(object):
|
|||||||
self.headers.update(headers)
|
self.headers.update(headers)
|
||||||
if self.status_int == 401 and 'www-authenticate' not in self.headers:
|
if self.status_int == 401 and 'www-authenticate' not in self.headers:
|
||||||
self.headers.update({'www-authenticate': self.www_authenticate()})
|
self.headers.update({'www-authenticate': self.www_authenticate()})
|
||||||
for key, value in kw.iteritems():
|
for key, value in kw.items():
|
||||||
setattr(self, key, value)
|
setattr(self, key, value)
|
||||||
# When specifying both 'content_type' and 'charset' in the kwargs,
|
# When specifying both 'content_type' and 'charset' in the kwargs,
|
||||||
# charset needs to be applied *after* content_type, otherwise charset
|
# charset needs to be applied *after* content_type, otherwise charset
|
||||||
|
@ -225,7 +225,7 @@ def register_swift_info(name='swift', admin=False, **kwargs):
|
|||||||
if "." in name:
|
if "." in name:
|
||||||
raise ValueError('Cannot use "." in a swift_info key: %s' % name)
|
raise ValueError('Cannot use "." in a swift_info key: %s' % name)
|
||||||
dict_to_use[name] = {}
|
dict_to_use[name] = {}
|
||||||
for key, val in kwargs.iteritems():
|
for key, val in kwargs.items():
|
||||||
if "." in key:
|
if "." in key:
|
||||||
raise ValueError('Cannot use "." in a swift_info key: %s' % key)
|
raise ValueError('Cannot use "." in a swift_info key: %s' % key)
|
||||||
dict_to_use[name][key] = val
|
dict_to_use[name][key] = val
|
||||||
|
@ -580,12 +580,12 @@ class PortPidState(object):
|
|||||||
:returns: The port the socket is bound to.
|
:returns: The port the socket is bound to.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
for port, sock_data in self.sock_data_by_port.iteritems():
|
for port, sock_data in self.sock_data_by_port.items():
|
||||||
if sock_data['sock'] == sock:
|
if sock_data['sock'] == sock:
|
||||||
return port
|
return port
|
||||||
|
|
||||||
def _pid_to_port_and_index(self, pid):
|
def _pid_to_port_and_index(self, pid):
|
||||||
for port, sock_data in self.sock_data_by_port.iteritems():
|
for port, sock_data in self.sock_data_by_port.items():
|
||||||
for server_idx, a_pid in enumerate(sock_data['pids']):
|
for server_idx, a_pid in enumerate(sock_data['pids']):
|
||||||
if pid == a_pid:
|
if pid == a_pid:
|
||||||
return port, server_idx
|
return port, server_idx
|
||||||
@ -597,7 +597,7 @@ class PortPidState(object):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
current_port_index_pairs = set()
|
current_port_index_pairs = set()
|
||||||
for port, pid_state in self.sock_data_by_port.iteritems():
|
for port, pid_state in self.sock_data_by_port.items():
|
||||||
current_port_index_pairs |= set(
|
current_port_index_pairs |= set(
|
||||||
(port, i)
|
(port, i)
|
||||||
for i, pid in enumerate(pid_state['pids'])
|
for i, pid in enumerate(pid_state['pids'])
|
||||||
|
@ -367,7 +367,7 @@ class ContainerController(BaseStorageServer):
|
|||||||
metadata = {}
|
metadata = {}
|
||||||
metadata.update(
|
metadata.update(
|
||||||
(key, (value, req_timestamp.internal))
|
(key, (value, req_timestamp.internal))
|
||||||
for key, value in req.headers.iteritems()
|
for key, value in req.headers.items()
|
||||||
if key.lower() in self.save_headers or
|
if key.lower() in self.save_headers or
|
||||||
is_sys_or_user_meta('container', key))
|
is_sys_or_user_meta('container', key))
|
||||||
if 'X-Container-Sync-To' in metadata:
|
if 'X-Container-Sync-To' in metadata:
|
||||||
@ -406,7 +406,7 @@ class ContainerController(BaseStorageServer):
|
|||||||
return HTTPNotFound(request=req, headers=headers)
|
return HTTPNotFound(request=req, headers=headers)
|
||||||
headers.update(
|
headers.update(
|
||||||
(key, value)
|
(key, value)
|
||||||
for key, (value, timestamp) in broker.metadata.iteritems()
|
for key, (value, timestamp) in broker.metadata.items()
|
||||||
if value != '' and (key.lower() in self.save_headers or
|
if value != '' and (key.lower() in self.save_headers or
|
||||||
is_sys_or_user_meta('container', key)))
|
is_sys_or_user_meta('container', key)))
|
||||||
headers['Content-Type'] = out_content_type
|
headers['Content-Type'] = out_content_type
|
||||||
@ -473,7 +473,7 @@ class ContainerController(BaseStorageServer):
|
|||||||
|
|
||||||
def create_listing(self, req, out_content_type, info, resp_headers,
|
def create_listing(self, req, out_content_type, info, resp_headers,
|
||||||
metadata, container_list, container):
|
metadata, container_list, container):
|
||||||
for key, (value, timestamp) in metadata.iteritems():
|
for key, (value, timestamp) in metadata.items():
|
||||||
if value and (key.lower() in self.save_headers or
|
if value and (key.lower() in self.save_headers or
|
||||||
is_sys_or_user_meta('container', key)):
|
is_sys_or_user_meta('container', key)):
|
||||||
resp_headers[key] = value
|
resp_headers[key] = value
|
||||||
@ -547,7 +547,7 @@ class ContainerController(BaseStorageServer):
|
|||||||
metadata = {}
|
metadata = {}
|
||||||
metadata.update(
|
metadata.update(
|
||||||
(key, (value, req_timestamp.internal))
|
(key, (value, req_timestamp.internal))
|
||||||
for key, value in req.headers.iteritems()
|
for key, value in req.headers.items()
|
||||||
if key.lower() in self.save_headers or
|
if key.lower() in self.save_headers or
|
||||||
is_sys_or_user_meta('container', key))
|
is_sys_or_user_meta('container', key))
|
||||||
if metadata:
|
if metadata:
|
||||||
|
@ -322,7 +322,7 @@ class ContainerSync(Daemon):
|
|||||||
user_key = None
|
user_key = None
|
||||||
sync_point1 = info['x_container_sync_point1']
|
sync_point1 = info['x_container_sync_point1']
|
||||||
sync_point2 = info['x_container_sync_point2']
|
sync_point2 = info['x_container_sync_point2']
|
||||||
for key, (value, timestamp) in broker.metadata.iteritems():
|
for key, (value, timestamp) in broker.metadata.items():
|
||||||
if key.lower() == 'x-container-sync-to':
|
if key.lower() == 'x-container-sync-to':
|
||||||
sync_to = value
|
sync_to = value
|
||||||
elif key.lower() == 'x-container-sync-key':
|
elif key.lower() == 'x-container-sync-key':
|
||||||
|
@ -121,7 +121,7 @@ class ContainerUpdater(Daemon):
|
|||||||
begin = time.time()
|
begin = time.time()
|
||||||
now = time.time()
|
now = time.time()
|
||||||
expired_suppressions = \
|
expired_suppressions = \
|
||||||
[a for a, u in self.account_suppressions.iteritems()
|
[a for a, u in self.account_suppressions.items()
|
||||||
if u < now]
|
if u < now]
|
||||||
for account in expired_suppressions:
|
for account in expired_suppressions:
|
||||||
del self.account_suppressions[account]
|
del self.account_suppressions[account]
|
||||||
|
@ -1620,7 +1620,7 @@ class DiskFile(object):
|
|||||||
if meta_file:
|
if meta_file:
|
||||||
self._metadata = self._failsafe_read_metadata(meta_file, meta_file)
|
self._metadata = self._failsafe_read_metadata(meta_file, meta_file)
|
||||||
sys_metadata = dict(
|
sys_metadata = dict(
|
||||||
[(key, val) for key, val in datafile_metadata.iteritems()
|
[(key, val) for key, val in datafile_metadata.items()
|
||||||
if key.lower() in DATAFILE_SYSTEM_META
|
if key.lower() in DATAFILE_SYSTEM_META
|
||||||
or is_sys_meta('object', key)])
|
or is_sys_meta('object', key)])
|
||||||
self._metadata.update(sys_metadata)
|
self._metadata.update(sys_metadata)
|
||||||
|
@ -419,7 +419,7 @@ class ObjectReconstructor(Daemon):
|
|||||||
:returns: a list of strings, the suffix dirs to sync
|
:returns: a list of strings, the suffix dirs to sync
|
||||||
"""
|
"""
|
||||||
suffixes = []
|
suffixes = []
|
||||||
for suffix, sub_dict_local in local_suff.iteritems():
|
for suffix, sub_dict_local in local_suff.items():
|
||||||
sub_dict_remote = remote_suff.get(suffix, {})
|
sub_dict_remote = remote_suff.get(suffix, {})
|
||||||
if (sub_dict_local.get(None) != sub_dict_remote.get(None) or
|
if (sub_dict_local.get(None) != sub_dict_remote.get(None) or
|
||||||
sub_dict_local.get(local_index) !=
|
sub_dict_local.get(local_index) !=
|
||||||
|
@ -273,7 +273,7 @@ class ObjectReplicator(Daemon):
|
|||||||
synced_remote_regions[node['region']] = \
|
synced_remote_regions[node['region']] = \
|
||||||
candidates.keys()
|
candidates.keys()
|
||||||
responses.append(success)
|
responses.append(success)
|
||||||
for region, cand_objs in synced_remote_regions.iteritems():
|
for region, cand_objs in synced_remote_regions.items():
|
||||||
if delete_objs is None:
|
if delete_objs is None:
|
||||||
delete_objs = cand_objs
|
delete_objs = cand_objs
|
||||||
else:
|
else:
|
||||||
|
@ -411,7 +411,7 @@ class ObjectController(BaseStorageServer):
|
|||||||
raise HTTPBadRequest("invalid JSON for footer doc")
|
raise HTTPBadRequest("invalid JSON for footer doc")
|
||||||
|
|
||||||
def _check_container_override(self, update_headers, metadata):
|
def _check_container_override(self, update_headers, metadata):
|
||||||
for key, val in metadata.iteritems():
|
for key, val in metadata.items():
|
||||||
override_prefix = 'x-backend-container-update-override-'
|
override_prefix = 'x-backend-container-update-override-'
|
||||||
if key.lower().startswith(override_prefix):
|
if key.lower().startswith(override_prefix):
|
||||||
override = key.lower().replace(override_prefix, 'x-')
|
override = key.lower().replace(override_prefix, 'x-')
|
||||||
@ -446,7 +446,7 @@ class ObjectController(BaseStorageServer):
|
|||||||
request=request,
|
request=request,
|
||||||
headers={'X-Backend-Timestamp': orig_timestamp.internal})
|
headers={'X-Backend-Timestamp': orig_timestamp.internal})
|
||||||
metadata = {'X-Timestamp': req_timestamp.internal}
|
metadata = {'X-Timestamp': req_timestamp.internal}
|
||||||
metadata.update(val for val in request.headers.iteritems()
|
metadata.update(val for val in request.headers.items()
|
||||||
if is_user_meta('object', val[0]))
|
if is_user_meta('object', val[0]))
|
||||||
for header_key in self.allowed_headers:
|
for header_key in self.allowed_headers:
|
||||||
if header_key in request.headers:
|
if header_key in request.headers:
|
||||||
@ -614,9 +614,9 @@ class ObjectController(BaseStorageServer):
|
|||||||
'ETag': etag,
|
'ETag': etag,
|
||||||
'Content-Length': str(upload_size),
|
'Content-Length': str(upload_size),
|
||||||
}
|
}
|
||||||
metadata.update(val for val in request.headers.iteritems()
|
metadata.update(val for val in request.headers.items()
|
||||||
if is_sys_or_user_meta('object', val[0]))
|
if is_sys_or_user_meta('object', val[0]))
|
||||||
metadata.update(val for val in footer_meta.iteritems()
|
metadata.update(val for val in footer_meta.items()
|
||||||
if is_sys_or_user_meta('object', val[0]))
|
if is_sys_or_user_meta('object', val[0]))
|
||||||
headers_to_copy = (
|
headers_to_copy = (
|
||||||
request.headers.get(
|
request.headers.get(
|
||||||
@ -712,7 +712,7 @@ class ObjectController(BaseStorageServer):
|
|||||||
conditional_etag=conditional_etag)
|
conditional_etag=conditional_etag)
|
||||||
response.headers['Content-Type'] = metadata.get(
|
response.headers['Content-Type'] = metadata.get(
|
||||||
'Content-Type', 'application/octet-stream')
|
'Content-Type', 'application/octet-stream')
|
||||||
for key, value in metadata.iteritems():
|
for key, value in metadata.items():
|
||||||
if is_sys_or_user_meta('object', key) or \
|
if is_sys_or_user_meta('object', key) or \
|
||||||
key.lower() in self.allowed_headers:
|
key.lower() in self.allowed_headers:
|
||||||
response.headers[key] = value
|
response.headers[key] = value
|
||||||
@ -767,7 +767,7 @@ class ObjectController(BaseStorageServer):
|
|||||||
conditional_etag=conditional_etag)
|
conditional_etag=conditional_etag)
|
||||||
response.headers['Content-Type'] = metadata.get(
|
response.headers['Content-Type'] = metadata.get(
|
||||||
'Content-Type', 'application/octet-stream')
|
'Content-Type', 'application/octet-stream')
|
||||||
for key, value in metadata.iteritems():
|
for key, value in metadata.items():
|
||||||
if is_sys_or_user_meta('object', key) or \
|
if is_sys_or_user_meta('object', key) or \
|
||||||
key.lower() in self.allowed_headers:
|
key.lower() in self.allowed_headers:
|
||||||
response.headers[key] = value
|
response.headers[key] = value
|
||||||
|
@ -332,7 +332,7 @@ class Sender(object):
|
|||||||
"""
|
"""
|
||||||
msg = ['PUT ' + url_path, 'Content-Length: ' + str(df.content_length)]
|
msg = ['PUT ' + url_path, 'Content-Length: ' + str(df.content_length)]
|
||||||
# Sorted to make it easier to test.
|
# Sorted to make it easier to test.
|
||||||
for key, value in sorted(df.get_metadata().iteritems()):
|
for key, value in sorted(df.get_metadata().items()):
|
||||||
if key not in ('name', 'Content-Length'):
|
if key not in ('name', 'Content-Length'):
|
||||||
msg.append('%s: %s' % (key, value))
|
msg.append('%s: %s' % (key, value))
|
||||||
msg = '\r\n'.join(msg) + '\r\n\r\n'
|
msg = '\r\n'.join(msg) + '\r\n\r\n'
|
||||||
|
@ -122,7 +122,7 @@ def _prep_headers_to_info(headers, server_type):
|
|||||||
meta = {}
|
meta = {}
|
||||||
sysmeta = {}
|
sysmeta = {}
|
||||||
other = {}
|
other = {}
|
||||||
for key, val in dict(headers).iteritems():
|
for key, val in dict(headers).items():
|
||||||
lkey = key.lower()
|
lkey = key.lower()
|
||||||
if is_user_meta(server_type, lkey):
|
if is_user_meta(server_type, lkey):
|
||||||
meta[strip_user_meta_prefix(server_type, lkey)] = val
|
meta[strip_user_meta_prefix(server_type, lkey)] = val
|
||||||
@ -1166,7 +1166,7 @@ class Controller(object):
|
|||||||
k.lower() in self._x_remove_headers())
|
k.lower() in self._x_remove_headers())
|
||||||
|
|
||||||
dst_headers.update((k.lower(), v)
|
dst_headers.update((k.lower(), v)
|
||||||
for k, v in src_headers.iteritems()
|
for k, v in src_headers.items()
|
||||||
if k.lower() in self.pass_through_headers or
|
if k.lower() in self.pass_through_headers or
|
||||||
is_sys_or_user_meta(st, k))
|
is_sys_or_user_meta(st, k))
|
||||||
|
|
||||||
@ -1488,7 +1488,7 @@ class Controller(object):
|
|||||||
# transfer any x-account-sysmeta headers from original request
|
# transfer any x-account-sysmeta headers from original request
|
||||||
# to the autocreate PUT
|
# to the autocreate PUT
|
||||||
headers.update((k, v)
|
headers.update((k, v)
|
||||||
for k, v in req.headers.iteritems()
|
for k, v in req.headers.items()
|
||||||
if is_sys_meta('account', k))
|
if is_sys_meta('account', k))
|
||||||
resp = self.make_requests(Request.blank('/v1' + path),
|
resp = self.make_requests(Request.blank('/v1' + path),
|
||||||
self.app.account_ring, partition, 'PUT',
|
self.app.account_ring, partition, 'PUT',
|
||||||
|
@ -334,7 +334,7 @@ class Connection(object):
|
|||||||
port=self.storage_port)
|
port=self.storage_port)
|
||||||
#self.connection.set_debuglevel(3)
|
#self.connection.set_debuglevel(3)
|
||||||
self.connection.putrequest('PUT', path)
|
self.connection.putrequest('PUT', path)
|
||||||
for key, value in headers.iteritems():
|
for key, value in headers.items():
|
||||||
self.connection.putheader(key, value)
|
self.connection.putheader(key, value)
|
||||||
self.connection.endheaders()
|
self.connection.endheaders()
|
||||||
|
|
||||||
|
@ -130,13 +130,13 @@ def kill_server(ipport, ipport2server, pids):
|
|||||||
|
|
||||||
def kill_nonprimary_server(primary_nodes, ipport2server, pids):
|
def kill_nonprimary_server(primary_nodes, ipport2server, pids):
|
||||||
primary_ipports = [(n['ip'], n['port']) for n in primary_nodes]
|
primary_ipports = [(n['ip'], n['port']) for n in primary_nodes]
|
||||||
for ipport, server in ipport2server.iteritems():
|
for ipport, server in ipport2server.items():
|
||||||
if ipport in primary_ipports:
|
if ipport in primary_ipports:
|
||||||
server_type = server[:-1]
|
server_type = server[:-1]
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
raise Exception('Cannot figure out server type for %r' % primary_nodes)
|
raise Exception('Cannot figure out server type for %r' % primary_nodes)
|
||||||
for ipport, server in list(ipport2server.iteritems()):
|
for ipport, server in list(ipport2server.items()):
|
||||||
if server[:-1] == server_type and ipport not in primary_ipports:
|
if server[:-1] == server_type and ipport not in primary_ipports:
|
||||||
kill_server(ipport, ipport2server, pids)
|
kill_server(ipport, ipport2server, pids)
|
||||||
return ipport
|
return ipport
|
||||||
@ -182,7 +182,7 @@ def get_ring(ring_name, required_replicas, required_devices,
|
|||||||
|
|
||||||
repl_name = '%s-replicator' % server
|
repl_name = '%s-replicator' % server
|
||||||
repl_configs = {i: readconf(c, section_name=repl_name)
|
repl_configs = {i: readconf(c, section_name=repl_name)
|
||||||
for i, c in config_paths[repl_name].iteritems()}
|
for i, c in config_paths[repl_name].items()}
|
||||||
servers_per_port = any(int(c.get('servers_per_port', '0'))
|
servers_per_port = any(int(c.get('servers_per_port', '0'))
|
||||||
for c in repl_configs.values())
|
for c in repl_configs.values())
|
||||||
|
|
||||||
|
@ -98,7 +98,7 @@ def build_dir_tree(start_path, tree_obj):
|
|||||||
for obj in tree_obj:
|
for obj in tree_obj:
|
||||||
build_dir_tree(start_path, obj)
|
build_dir_tree(start_path, obj)
|
||||||
if isinstance(tree_obj, dict):
|
if isinstance(tree_obj, dict):
|
||||||
for dir_name, obj in tree_obj.iteritems():
|
for dir_name, obj in tree_obj.items():
|
||||||
dir_path = os.path.join(start_path, dir_name)
|
dir_path = os.path.join(start_path, dir_name)
|
||||||
os.mkdir(dir_path)
|
os.mkdir(dir_path)
|
||||||
build_dir_tree(dir_path, obj)
|
build_dir_tree(dir_path, obj)
|
||||||
@ -115,7 +115,7 @@ def build_tar_tree(tar, start_path, tree_obj, base_path=''):
|
|||||||
for obj in tree_obj:
|
for obj in tree_obj:
|
||||||
build_tar_tree(tar, start_path, obj, base_path=base_path)
|
build_tar_tree(tar, start_path, obj, base_path=base_path)
|
||||||
if isinstance(tree_obj, dict):
|
if isinstance(tree_obj, dict):
|
||||||
for dir_name, obj in tree_obj.iteritems():
|
for dir_name, obj in tree_obj.items():
|
||||||
dir_path = os.path.join(start_path, dir_name)
|
dir_path = os.path.join(start_path, dir_name)
|
||||||
tar_info = tarfile.TarInfo(dir_path[len(base_path):])
|
tar_info = tarfile.TarInfo(dir_path[len(base_path):])
|
||||||
tar_info.type = tarfile.DIRTYPE
|
tar_info.type = tarfile.DIRTYPE
|
||||||
|
@ -187,7 +187,7 @@ class TestProxyLogging(unittest.TestCase):
|
|||||||
'/v1/a/c/o/p/p2': 'object',
|
'/v1/a/c/o/p/p2': 'object',
|
||||||
}
|
}
|
||||||
with mock.patch("time.time", stub_time):
|
with mock.patch("time.time", stub_time):
|
||||||
for path, exp_type in path_types.iteritems():
|
for path, exp_type in path_types.items():
|
||||||
# GET
|
# GET
|
||||||
app = proxy_logging.ProxyLoggingMiddleware(
|
app = proxy_logging.ProxyLoggingMiddleware(
|
||||||
FakeApp(body='7654321', response_str='321 Fubar'), {})
|
FakeApp(body='7654321', response_str='321 Fubar'), {})
|
||||||
@ -257,7 +257,7 @@ class TestProxyLogging(unittest.TestCase):
|
|||||||
'DELETE': 'DELETE',
|
'DELETE': 'DELETE',
|
||||||
'OPTIONS': 'OPTIONS',
|
'OPTIONS': 'OPTIONS',
|
||||||
}
|
}
|
||||||
for method, exp_method in method_map.iteritems():
|
for method, exp_method in method_map.items():
|
||||||
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
|
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
|
||||||
app.access_logger = FakeLogger()
|
app.access_logger = FakeLogger()
|
||||||
req = Request.blank('/v1/a/', environ={'REQUEST_METHOD': method})
|
req = Request.blank('/v1/a/', environ={'REQUEST_METHOD': method})
|
||||||
@ -281,7 +281,7 @@ class TestProxyLogging(unittest.TestCase):
|
|||||||
# this conf var supports optional leading access_
|
# this conf var supports optional leading access_
|
||||||
for conf_key in ['access_log_statsd_valid_http_methods',
|
for conf_key in ['access_log_statsd_valid_http_methods',
|
||||||
'log_statsd_valid_http_methods']:
|
'log_statsd_valid_http_methods']:
|
||||||
for method, exp_method in method_map.iteritems():
|
for method, exp_method in method_map.items():
|
||||||
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {
|
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {
|
||||||
conf_key: 'SPECIAL, GET,PUT ', # crazy spaces ok
|
conf_key: 'SPECIAL, GET,PUT ', # crazy spaces ok
|
||||||
})
|
})
|
||||||
|
@ -994,7 +994,7 @@ class TestTempURL(unittest.TestCase):
|
|||||||
hdrs = HeaderKeyDict(tempurl.TempURL(
|
hdrs = HeaderKeyDict(tempurl.TempURL(
|
||||||
None,
|
None,
|
||||||
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
|
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
|
||||||
)._clean_outgoing_headers(hdrs.iteritems()))
|
)._clean_outgoing_headers(hdrs.items()))
|
||||||
self.assertTrue('test-header' in hdrs)
|
self.assertTrue('test-header' in hdrs)
|
||||||
|
|
||||||
orh = 'test-header'
|
orh = 'test-header'
|
||||||
@ -1003,7 +1003,7 @@ class TestTempURL(unittest.TestCase):
|
|||||||
hdrs = HeaderKeyDict(tempurl.TempURL(
|
hdrs = HeaderKeyDict(tempurl.TempURL(
|
||||||
None,
|
None,
|
||||||
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
|
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
|
||||||
)._clean_outgoing_headers(hdrs.iteritems()))
|
)._clean_outgoing_headers(hdrs.items()))
|
||||||
self.assertTrue('test-header' not in hdrs)
|
self.assertTrue('test-header' not in hdrs)
|
||||||
|
|
||||||
orh = 'test-header-*'
|
orh = 'test-header-*'
|
||||||
@ -1013,7 +1013,7 @@ class TestTempURL(unittest.TestCase):
|
|||||||
hdrs = HeaderKeyDict(tempurl.TempURL(
|
hdrs = HeaderKeyDict(tempurl.TempURL(
|
||||||
None,
|
None,
|
||||||
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
|
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
|
||||||
)._clean_outgoing_headers(hdrs.iteritems()))
|
)._clean_outgoing_headers(hdrs.items()))
|
||||||
self.assertTrue('test-header-one' not in hdrs)
|
self.assertTrue('test-header-one' not in hdrs)
|
||||||
self.assertTrue('test-header-two' not in hdrs)
|
self.assertTrue('test-header-two' not in hdrs)
|
||||||
|
|
||||||
@ -1024,7 +1024,7 @@ class TestTempURL(unittest.TestCase):
|
|||||||
hdrs = HeaderKeyDict(tempurl.TempURL(
|
hdrs = HeaderKeyDict(tempurl.TempURL(
|
||||||
None,
|
None,
|
||||||
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
|
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
|
||||||
)._clean_outgoing_headers(hdrs.iteritems()))
|
)._clean_outgoing_headers(hdrs.items()))
|
||||||
self.assertTrue('test-header-one' not in hdrs)
|
self.assertTrue('test-header-one' not in hdrs)
|
||||||
self.assertTrue('test-header-two' in hdrs)
|
self.assertTrue('test-header-two' in hdrs)
|
||||||
|
|
||||||
@ -1038,7 +1038,7 @@ class TestTempURL(unittest.TestCase):
|
|||||||
hdrs = HeaderKeyDict(tempurl.TempURL(
|
hdrs = HeaderKeyDict(tempurl.TempURL(
|
||||||
None,
|
None,
|
||||||
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
|
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
|
||||||
)._clean_outgoing_headers(hdrs.iteritems()))
|
)._clean_outgoing_headers(hdrs.items()))
|
||||||
self.assertTrue('test-header-one' not in hdrs)
|
self.assertTrue('test-header-one' not in hdrs)
|
||||||
self.assertTrue('test-header-two' in hdrs)
|
self.assertTrue('test-header-two' in hdrs)
|
||||||
self.assertTrue('test-other-header' not in hdrs)
|
self.assertTrue('test-other-header' not in hdrs)
|
||||||
|
@ -425,7 +425,7 @@ class TestRingBuilder(unittest.TestCase):
|
|||||||
raise AssertionError(
|
raise AssertionError(
|
||||||
"Partition %d not evenly distributed (got %r)" %
|
"Partition %d not evenly distributed (got %r)" %
|
||||||
(part, counts['zone']))
|
(part, counts['zone']))
|
||||||
for dev_id, replica_count in counts['dev_id'].iteritems():
|
for dev_id, replica_count in counts['dev_id'].items():
|
||||||
if replica_count > 1:
|
if replica_count > 1:
|
||||||
raise AssertionError(
|
raise AssertionError(
|
||||||
"Partition %d is on device %d more than once (%r)" %
|
"Partition %d is on device %d more than once (%r)" %
|
||||||
@ -462,7 +462,7 @@ class TestRingBuilder(unittest.TestCase):
|
|||||||
raise AssertionError(
|
raise AssertionError(
|
||||||
"Partition %d not evenly distributed (got %r)" %
|
"Partition %d not evenly distributed (got %r)" %
|
||||||
(part, counts['zone']))
|
(part, counts['zone']))
|
||||||
for dev_id, replica_count in counts['dev_id'].iteritems():
|
for dev_id, replica_count in counts['dev_id'].items():
|
||||||
if replica_count != 1:
|
if replica_count != 1:
|
||||||
raise AssertionError(
|
raise AssertionError(
|
||||||
"Partition %d is on device %d %d times, not 1 (%r)" %
|
"Partition %d is on device %d %d times, not 1 (%r)" %
|
||||||
@ -497,12 +497,12 @@ class TestRingBuilder(unittest.TestCase):
|
|||||||
counts['dev_id'][dev['id']] += 1
|
counts['dev_id'][dev['id']] += 1
|
||||||
|
|
||||||
self.assertEquals(8, sum(counts['zone'].values()))
|
self.assertEquals(8, sum(counts['zone'].values()))
|
||||||
for zone, replica_count in counts['zone'].iteritems():
|
for zone, replica_count in counts['zone'].items():
|
||||||
if replica_count not in (2, 3):
|
if replica_count not in (2, 3):
|
||||||
raise AssertionError(
|
raise AssertionError(
|
||||||
"Partition %d not evenly distributed (got %r)" %
|
"Partition %d not evenly distributed (got %r)" %
|
||||||
(part, counts['zone']))
|
(part, counts['zone']))
|
||||||
for dev_id, replica_count in counts['dev_id'].iteritems():
|
for dev_id, replica_count in counts['dev_id'].items():
|
||||||
if replica_count not in (1, 2):
|
if replica_count not in (1, 2):
|
||||||
raise AssertionError(
|
raise AssertionError(
|
||||||
"Partition %d is on device %d %d times, "
|
"Partition %d is on device %d %d times, "
|
||||||
|
@ -2382,7 +2382,7 @@ class TestContainerController(unittest.TestCase):
|
|||||||
'headers': headers, 'query_string': query_string}
|
'headers': headers, 'query_string': query_string}
|
||||||
|
|
||||||
http_connect_args.append(
|
http_connect_args.append(
|
||||||
dict((k, v) for k, v in captured_args.iteritems()
|
dict((k, v) for k, v in captured_args.items()
|
||||||
if v is not None))
|
if v is not None))
|
||||||
|
|
||||||
req = Request.blank(
|
req = Request.blank(
|
||||||
|
@ -2998,7 +2998,7 @@ class TestObjectController(unittest.TestCase):
|
|||||||
'headers': headers, 'query_string': query_string}
|
'headers': headers, 'query_string': query_string}
|
||||||
|
|
||||||
http_connect_args.append(
|
http_connect_args.append(
|
||||||
dict((k, v) for k, v in captured_args.iteritems()
|
dict((k, v) for k, v in captured_args.items()
|
||||||
if v is not None))
|
if v is not None))
|
||||||
|
|
||||||
return SuccessfulFakeConn()
|
return SuccessfulFakeConn()
|
||||||
@ -3116,7 +3116,7 @@ class TestObjectController(unittest.TestCase):
|
|||||||
'headers': headers, 'query_string': query_string}
|
'headers': headers, 'query_string': query_string}
|
||||||
|
|
||||||
http_connect_args.append(
|
http_connect_args.append(
|
||||||
dict((k, v) for k, v in captured_args.iteritems()
|
dict((k, v) for k, v in captured_args.items()
|
||||||
if v is not None))
|
if v is not None))
|
||||||
|
|
||||||
return SuccessfulFakeConn()
|
return SuccessfulFakeConn()
|
||||||
|
@ -1371,7 +1371,7 @@ class TestBaseSsync(BaseTestSender):
|
|||||||
# sanity check, they are not the same ondisk files!
|
# sanity check, they are not the same ondisk files!
|
||||||
self.assertNotEqual(tx_df._datadir, rx_df._datadir)
|
self.assertNotEqual(tx_df._datadir, rx_df._datadir)
|
||||||
rx_metadata = dict(rx_df.get_metadata())
|
rx_metadata = dict(rx_df.get_metadata())
|
||||||
for k, v in tx_df.get_metadata().iteritems():
|
for k, v in tx_df.get_metadata().items():
|
||||||
if k == 'X-Object-Sysmeta-Ec-Frag-Index':
|
if k == 'X-Object-Sysmeta-Ec-Frag-Index':
|
||||||
# if tx_df had a frag_index then rx_df should also have one
|
# if tx_df had a frag_index then rx_df should also have one
|
||||||
self.assertTrue(k in rx_metadata)
|
self.assertTrue(k in rx_metadata)
|
||||||
@ -1481,7 +1481,7 @@ class TestBaseSsync(BaseTestSender):
|
|||||||
have been used as a source for sync'ing
|
have been used as a source for sync'ing
|
||||||
:param rx_frag_index: the fragment index of expected rx diskfiles
|
:param rx_frag_index: the fragment index of expected rx diskfiles
|
||||||
"""
|
"""
|
||||||
for o_name, diskfiles in tx_objs.iteritems():
|
for o_name, diskfiles in tx_objs.items():
|
||||||
for tx_df in diskfiles:
|
for tx_df in diskfiles:
|
||||||
if tx_frag_index is None or tx_df._frag_index == tx_frag_index:
|
if tx_frag_index is None or tx_df._frag_index == tx_frag_index:
|
||||||
# this diskfile should have been sync'd,
|
# this diskfile should have been sync'd,
|
||||||
@ -1503,7 +1503,7 @@ class TestBaseSsync(BaseTestSender):
|
|||||||
|
|
||||||
def _verify_tombstones(self, tx_objs, policy):
|
def _verify_tombstones(self, tx_objs, policy):
|
||||||
# verify tx and rx tombstones that should be in sync
|
# verify tx and rx tombstones that should be in sync
|
||||||
for o_name, diskfiles in tx_objs.iteritems():
|
for o_name, diskfiles in tx_objs.items():
|
||||||
for tx_df_ in diskfiles:
|
for tx_df_ in diskfiles:
|
||||||
try:
|
try:
|
||||||
self._open_tx_diskfile(o_name, policy)
|
self._open_tx_diskfile(o_name, policy)
|
||||||
@ -1703,7 +1703,7 @@ class TestSsyncEC(TestBaseSsync):
|
|||||||
failed_path = reconstruct_fa_calls[1][3]['name']
|
failed_path = reconstruct_fa_calls[1][3]['name']
|
||||||
expect_sync_paths.remove(failed_path)
|
expect_sync_paths.remove(failed_path)
|
||||||
failed_obj = None
|
failed_obj = None
|
||||||
for obj, diskfiles in tx_objs.iteritems():
|
for obj, diskfiles in tx_objs.items():
|
||||||
if diskfiles[0]._name == failed_path:
|
if diskfiles[0]._name == failed_path:
|
||||||
failed_obj = obj
|
failed_obj = obj
|
||||||
# sanity check
|
# sanity check
|
||||||
|
@ -157,7 +157,7 @@ class TestObjectUpdater(unittest.TestCase):
|
|||||||
}
|
}
|
||||||
|
|
||||||
expected = set()
|
expected = set()
|
||||||
for o, timestamps in objects.iteritems():
|
for o, timestamps in objects.items():
|
||||||
ohash = hash_path('account', 'container', o)
|
ohash = hash_path('account', 'container', o)
|
||||||
for t in timestamps:
|
for t in timestamps:
|
||||||
o_path = os.path.join(prefix_dir, ohash + '-' +
|
o_path = os.path.join(prefix_dir, ohash + '-' +
|
||||||
|
@ -633,7 +633,7 @@ class TestFuncs(unittest.TestCase):
|
|||||||
expected_headers = {'x-base-meta-owner': '',
|
expected_headers = {'x-base-meta-owner': '',
|
||||||
'x-base-meta-size': '151M',
|
'x-base-meta-size': '151M',
|
||||||
'connection': 'close'}
|
'connection': 'close'}
|
||||||
for k, v in expected_headers.iteritems():
|
for k, v in expected_headers.items():
|
||||||
self.assertTrue(k in dst_headers)
|
self.assertTrue(k in dst_headers)
|
||||||
self.assertEqual(v, dst_headers[k])
|
self.assertEqual(v, dst_headers[k])
|
||||||
self.assertFalse('new-owner' in dst_headers)
|
self.assertFalse('new-owner' in dst_headers)
|
||||||
@ -647,10 +647,10 @@ class TestFuncs(unittest.TestCase):
|
|||||||
hdrs.update(bad_hdrs)
|
hdrs.update(bad_hdrs)
|
||||||
req = Request.blank('/v1/a/c/o', headers=hdrs)
|
req = Request.blank('/v1/a/c/o', headers=hdrs)
|
||||||
dst_headers = base.generate_request_headers(req, transfer=True)
|
dst_headers = base.generate_request_headers(req, transfer=True)
|
||||||
for k, v in good_hdrs.iteritems():
|
for k, v in good_hdrs.items():
|
||||||
self.assertTrue(k.lower() in dst_headers)
|
self.assertTrue(k.lower() in dst_headers)
|
||||||
self.assertEqual(v, dst_headers[k.lower()])
|
self.assertEqual(v, dst_headers[k.lower()])
|
||||||
for k, v in bad_hdrs.iteritems():
|
for k, v in bad_hdrs.items():
|
||||||
self.assertFalse(k.lower() in dst_headers)
|
self.assertFalse(k.lower() in dst_headers)
|
||||||
|
|
||||||
def test_client_chunk_size(self):
|
def test_client_chunk_size(self):
|
||||||
|
@ -7682,7 +7682,7 @@ class TestContainerController(unittest.TestCase):
|
|||||||
find_header = \
|
find_header = \
|
||||||
find_header.lower().replace('-remove', '', 1)
|
find_header.lower().replace('-remove', '', 1)
|
||||||
find_value = ''
|
find_value = ''
|
||||||
for k, v in headers.iteritems():
|
for k, v in headers.items():
|
||||||
if k.lower() == find_header.lower() and \
|
if k.lower() == find_header.lower() and \
|
||||||
v == find_value:
|
v == find_value:
|
||||||
break
|
break
|
||||||
@ -8636,7 +8636,7 @@ class TestAccountController(unittest.TestCase):
|
|||||||
find_header = \
|
find_header = \
|
||||||
find_header.lower().replace('-remove', '', 1)
|
find_header.lower().replace('-remove', '', 1)
|
||||||
find_value = ''
|
find_value = ''
|
||||||
for k, v in headers.iteritems():
|
for k, v in headers.items():
|
||||||
if k.lower() == find_header.lower() and \
|
if k.lower() == find_header.lower() and \
|
||||||
v == find_value:
|
v == find_value:
|
||||||
break
|
break
|
||||||
|
@ -113,7 +113,7 @@ class TestObjectSysmeta(unittest.TestCase):
|
|||||||
% (expected, resp.status))
|
% (expected, resp.status))
|
||||||
|
|
||||||
def _assertInHeaders(self, resp, expected):
|
def _assertInHeaders(self, resp, expected):
|
||||||
for key, val in expected.iteritems():
|
for key, val in expected.items():
|
||||||
self.assertTrue(key in resp.headers,
|
self.assertTrue(key in resp.headers,
|
||||||
'Header %s missing from %s' % (key, resp.headers))
|
'Header %s missing from %s' % (key, resp.headers))
|
||||||
self.assertEqual(val, resp.headers[key],
|
self.assertEqual(val, resp.headers[key],
|
||||||
@ -121,7 +121,7 @@ class TestObjectSysmeta(unittest.TestCase):
|
|||||||
% (key, val, key, resp.headers[key]))
|
% (key, val, key, resp.headers[key]))
|
||||||
|
|
||||||
def _assertNotInHeaders(self, resp, unexpected):
|
def _assertNotInHeaders(self, resp, unexpected):
|
||||||
for key, val in unexpected.iteritems():
|
for key, val in unexpected.items():
|
||||||
self.assertFalse(key in resp.headers,
|
self.assertFalse(key in resp.headers,
|
||||||
'Header %s not expected in %s'
|
'Header %s not expected in %s'
|
||||||
% (key, resp.headers))
|
% (key, resp.headers))
|
||||||
|
@ -57,7 +57,7 @@ class TestTranslations(unittest.TestCase):
|
|||||||
threading._DummyThread._Thread__stop = lambda x: 42
|
threading._DummyThread._Thread__stop = lambda x: 42
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
for var, val in self.orig_env.iteritems():
|
for var, val in self.orig_env.items():
|
||||||
if val is not None:
|
if val is not None:
|
||||||
os.environ[var] = val
|
os.environ[var] = val
|
||||||
else:
|
else:
|
||||||
|
Loading…
Reference in New Issue
Block a user