Merge "Replace dict.iteritems() with dict.items()"

This commit is contained in:
Jenkins 2015-06-15 22:10:18 +00:00 committed by Gerrit Code Review
commit 52e2e8e271
55 changed files with 100 additions and 101 deletions

View File

@ -157,7 +157,7 @@ class HostController(wsgi.Controller):
def update(self, req, id, body): def update(self, req, id, body):
authorize(req.environ['cinder.context']) authorize(req.environ['cinder.context'])
update_values = {} update_values = {}
for raw_key, raw_val in body.iteritems(): for raw_key, raw_val in body.items():
key = raw_key.lower().strip() key = raw_key.lower().strip()
val = raw_val.lower().strip() val = raw_val.lower().strip()
if key == "status": if key == "status":
@ -171,7 +171,7 @@ class HostController(wsgi.Controller):
raise webob.exc.HTTPBadRequest(explanation=explanation) raise webob.exc.HTTPBadRequest(explanation=explanation)
update_setters = {'status': self._set_enabled_status} update_setters = {'status': self._set_enabled_status}
result = {} result = {}
for key, value in update_values.iteritems(): for key, value in update_values.items():
result.update(update_setters[key](req, id, value)) result.update(update_setters[key](req, id, value))
return result return result

View File

@ -40,7 +40,7 @@ class UsedLimitsController(wsgi.Controller):
} }
used_limits = {} used_limits = {}
for display_name, single_quota in quota_map.iteritems(): for display_name, single_quota in quota_map.items():
if single_quota in quotas: if single_quota in quotas:
used_limits[display_name] = quotas[single_quota]['in_use'] used_limits[display_name] = quotas[single_quota]['in_use']

View File

@ -151,7 +151,7 @@ class ExtensionsResource(wsgi.Resource):
@wsgi.serializers(xml=ExtensionsTemplate) @wsgi.serializers(xml=ExtensionsTemplate)
def index(self, req): def index(self, req):
extensions = [] extensions = []
for _alias, ext in self.extension_manager.extensions.iteritems(): for _alias, ext in self.extension_manager.extensions.items():
extensions.append(self._translate(ext)) extensions.append(self._translate(ext))
return dict(extensions=extensions) return dict(extensions=extensions)

View File

@ -275,7 +275,7 @@ class VolumeController(wsgi.Controller):
search_opts.pop('limit', None) search_opts.pop('limit', None)
search_opts.pop('offset', None) search_opts.pop('offset', None)
for k, v in search_opts.iteritems(): for k, v in search_opts.items():
try: try:
search_opts[k] = ast.literal_eval(v) search_opts[k] = ast.literal_eval(v)
except (ValueError, SyntaxError): except (ValueError, SyntaxError):

View File

@ -244,7 +244,7 @@ class VolumeController(wsgi.Controller):
filters['display_name'] = filters['name'] filters['display_name'] = filters['name']
del filters['name'] del filters['name']
for k, v in filters.iteritems(): for k, v in filters.items():
try: try:
filters[k] = ast.literal_eval(v) filters[k] = ast.literal_eval(v)
except (ValueError, SyntaxError): except (ValueError, SyntaxError):

View File

@ -55,7 +55,7 @@ class ViewBuilder(object):
"injected_file_content_bytes": ["maxPersonalitySize"], "injected_file_content_bytes": ["maxPersonalitySize"],
} }
limits = {} limits = {}
for name, value in absolute_limits.iteritems(): for name, value in absolute_limits.items():
if name in limit_names and value is not None: if name in limit_names and value is not None:
for name in limit_names[name]: for name in limit_names[name]:
limits[name] = value limits[name] = value

View File

@ -176,7 +176,7 @@ def _check_dsmc_output(output, check_attrs, exact_match=True):
if sep is not None and key is not None and len(val.strip()) > 0: if sep is not None and key is not None and len(val.strip()) > 0:
parsed_attrs[key] = val.strip() parsed_attrs[key] = val.strip()
for ckey, cval in check_attrs.iteritems(): for ckey, cval in check_attrs.items():
if ckey not in parsed_attrs: if ckey not in parsed_attrs:
return False return False
elif exact_match and parsed_attrs[ckey] != cval: elif exact_match and parsed_attrs[ckey] != cval:

View File

@ -338,7 +338,7 @@ class ConfigCommands(object):
if param: if param:
print('%s = %s' % (param, CONF.get(param))) print('%s = %s' % (param, CONF.get(param)))
else: else:
for key, value in CONF.iteritems(): for key, value in CONF.items():
print('%s = %s' % (key, value)) print('%s = %s' % (key, value))

View File

@ -697,7 +697,7 @@ class API(base.Base):
results = [] results = []
not_found = object() not_found = object()
for cgsnapshot in cgsnapshots: for cgsnapshot in cgsnapshots:
for opt, value in search_opts.iteritems(): for opt, value in search_opts.items():
if cgsnapshot.get(opt, not_found) != value: if cgsnapshot.get(opt, not_found) != value:
break break
else: else:

View File

@ -450,7 +450,7 @@ def service_update(context, service_id, values):
def _metadata_refs(metadata_dict, meta_class): def _metadata_refs(metadata_dict, meta_class):
metadata_refs = [] metadata_refs = []
if metadata_dict: if metadata_dict:
for k, v in metadata_dict.iteritems(): for k, v in metadata_dict.items():
metadata_ref = meta_class() metadata_ref = meta_class()
metadata_ref['key'] = k metadata_ref['key'] = k
metadata_ref['value'] = v metadata_ref['value'] = v
@ -490,7 +490,7 @@ def iscsi_target_count_by_host(context, host):
def iscsi_target_create_safe(context, values): def iscsi_target_create_safe(context, values):
iscsi_target_ref = models.IscsiTarget() iscsi_target_ref = models.IscsiTarget()
for (key, value) in values.iteritems(): for (key, value) in values.items():
iscsi_target_ref[key] = value iscsi_target_ref[key] = value
session = get_session() session = get_session()
@ -1564,14 +1564,14 @@ def _process_volume_filters(query, filters):
filter_dict = {} filter_dict = {}
# Iterate over all filters, special case the filter if necessary # Iterate over all filters, special case the filter if necessary
for key, value in filters.iteritems(): for key, value in filters.items():
if key == 'metadata': if key == 'metadata':
# model.VolumeMetadata defines the backref to Volumes as # model.VolumeMetadata defines the backref to Volumes as
# 'volume_metadata' or 'volume_admin_metadata', use those as # 'volume_metadata' or 'volume_admin_metadata', use those as
# column attribute keys # column attribute keys
col_attr = getattr(models.Volume, 'volume_metadata') col_attr = getattr(models.Volume, 'volume_metadata')
col_ad_attr = getattr(models.Volume, 'volume_admin_metadata') col_ad_attr = getattr(models.Volume, 'volume_admin_metadata')
for k, v in value.iteritems(): for k, v in value.items():
query = query.filter(or_(col_attr.any(key=k, value=v), query = query.filter(or_(col_attr.any(key=k, value=v),
col_ad_attr.any(key=k, value=v))) col_ad_attr.any(key=k, value=v)))
elif isinstance(value, (list, tuple, set, frozenset)): elif isinstance(value, (list, tuple, set, frozenset)):
@ -1773,7 +1773,7 @@ def _volume_x_metadata_update(context, volume_id, metadata, delete,
if delete: if delete:
original_metadata = _volume_x_metadata_get(context, volume_id, original_metadata = _volume_x_metadata_get(context, volume_id,
model, session=session) model, session=session)
for meta_key, meta_value in original_metadata.iteritems(): for meta_key, meta_value in original_metadata.items():
if meta_key not in metadata: if meta_key not in metadata:
meta_ref = _volume_x_metadata_get_item(context, volume_id, meta_ref = _volume_x_metadata_get_item(context, volume_id,
meta_key, model, meta_key, model,
@ -2130,7 +2130,7 @@ def snapshot_metadata_update(context, snapshot_id, metadata, delete):
if delete: if delete:
original_metadata = _snapshot_metadata_get(context, snapshot_id, original_metadata = _snapshot_metadata_get(context, snapshot_id,
session) session)
for meta_key, meta_value in original_metadata.iteritems(): for meta_key, meta_value in original_metadata.items():
if meta_key not in metadata: if meta_key not in metadata:
meta_ref = _snapshot_metadata_get_item(context, meta_ref = _snapshot_metadata_get_item(context,
snapshot_id, snapshot_id,
@ -2620,7 +2620,7 @@ def volume_type_extra_specs_update_or_create(context, volume_type_id,
session = get_session() session = get_session()
with session.begin(): with session.begin():
spec_ref = None spec_ref = None
for key, value in specs.iteritems(): for key, value in specs.items():
try: try:
spec_ref = _volume_type_extra_specs_get_item( spec_ref = _volume_type_extra_specs_get_item(
context, volume_type_id, key, session) context, volume_type_id, key, session)
@ -2672,7 +2672,7 @@ def qos_specs_create(context, values):
specs_root.save(session=session) specs_root.save(session=session)
# Insert all specification entries for QoS specs # Insert all specification entries for QoS specs
for k, v in values['qos_specs'].iteritems(): for k, v in values['qos_specs'].items():
item = dict(key=k, value=v, specs_id=specs_id) item = dict(key=k, value=v, specs_id=specs_id)
item['id'] = str(uuid.uuid4()) item['id'] = str(uuid.uuid4())
spec_entry = models.QualityOfServiceSpecs() spec_entry = models.QualityOfServiceSpecs()

View File

@ -80,7 +80,7 @@ class CinderException(Exception):
except AttributeError: except AttributeError:
pass pass
for k, v in self.kwargs.iteritems(): for k, v in self.kwargs.items():
if isinstance(v, Exception): if isinstance(v, Exception):
self.kwargs[k] = six.text_type(v) self.kwargs[k] = six.text_type(v)
@ -93,7 +93,7 @@ class CinderException(Exception):
# kwargs doesn't match a variable in the message # kwargs doesn't match a variable in the message
# log the issue and the kwargs # log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation')) LOG.exception(_LE('Exception in string format operation'))
for name, value in kwargs.iteritems(): for name, value in kwargs.items():
LOG.error(_LE("%(name)s: %(value)s"), LOG.error(_LE("%(name)s: %(value)s"),
{'name': name, 'value': value}) {'name': name, 'value': value})
if CONF.fatal_exception_format_errors: if CONF.fatal_exception_format_errors:

View File

@ -65,7 +65,7 @@ class ExtractSchedulerSpecTask(flow_utils.CinderTask):
'availability_zone': volume_ref.get('availability_zone'), 'availability_zone': volume_ref.get('availability_zone'),
'volume_type_id': volume_type_id, 'volume_type_id': volume_type_id,
}, },
'volume_type': list(dict(vol_type).iteritems()), 'volume_type': list(dict(vol_type).items()),
} }
def execute(self, context, request_spec, volume_id, snapshot_id, def execute(self, context, request_spec, volume_id, snapshot_id,

View File

@ -253,7 +253,7 @@ class TestCase(testtools.TestCase):
def flags(self, **kw): def flags(self, **kw):
"""Override CONF variables for a test.""" """Override CONF variables for a test."""
for k, v in kw.iteritems(): for k, v in kw.items():
self.override_config(k, v) self.override_config(k, v)
def log_level(self, level): def log_level(self, level):

View File

@ -47,7 +47,7 @@ class UsedLimitsTestCase(test.TestCase):
} }
limits = {} limits = {}
for display_name, q in quota_map.iteritems(): for display_name, q in quota_map.items():
limits[q] = {'limit': 2, limits[q] = {'limit': 2,
'in_use': 1} 'in_use': 1}
@ -61,6 +61,6 @@ class UsedLimitsTestCase(test.TestCase):
self.controller.index(fake_req, res) self.controller.index(fake_req, res)
abs_limits = res.obj['limits']['absolute'] abs_limits = res.obj['limits']['absolute']
for used_limit, value in abs_limits.iteritems(): for used_limit, value in abs_limits.items():
self.assertEqual(value, self.assertEqual(value,
limits[quota_map[used_limit]]['in_use']) limits[quota_map[used_limit]]['in_use'])

View File

@ -71,7 +71,7 @@ def fake_volume_type_get_all(context, inactive=False, filters=None):
if filters is None or filters['is_public'] is None: if filters is None or filters['is_public'] is None:
return VOLUME_TYPES return VOLUME_TYPES
res = {} res = {}
for k, v in VOLUME_TYPES.iteritems(): for k, v in VOLUME_TYPES.items():
if filters['is_public'] and _has_type_access(k, context.project_id): if filters['is_public'] and _has_type_access(k, context.project_id):
res.update({k: v}) res.update({k: v})
continue continue

View File

@ -91,7 +91,7 @@ class FakeToken(object):
def __init__(self, **kwargs): def __init__(self, **kwargs):
FakeToken.id_count += 1 FakeToken.id_count += 1
self.id = FakeToken.id_count self.id = FakeToken.id_count
for k, v in kwargs.iteritems(): for k, v in kwargs.items():
setattr(self, k, v) setattr(self, k, v)

View File

@ -180,7 +180,7 @@ class LoggingVolumeDriver(driver.VolumeDriver):
if entry['action'] != action: if entry['action'] != action:
continue continue
match = True match = True
for k, v in kwargs.iteritems(): for k, v in kwargs.items():
if entry.get(k) != v: if entry.get(k) != v:
match = False match = False
break break

View File

@ -90,7 +90,7 @@ class FakeHostManager(host_manager.HostManager):
class FakeHostState(host_manager.HostState): class FakeHostState(host_manager.HostState):
def __init__(self, host, attribute_dict): def __init__(self, host, attribute_dict):
super(FakeHostState, self).__init__(host) super(FakeHostState, self).__init__(host)
for (key, val) in attribute_dict.iteritems(): for (key, val) in attribute_dict.items():
setattr(self, key, val) setattr(self, key, val)
@ -118,7 +118,7 @@ class FakeNovaClient(object):
def list(self, detailed=True, search_opts=None): def list(self, detailed=True, search_opts=None):
matching = list(self._servers) matching = list(self._servers)
if search_opts: if search_opts:
for opt, val in search_opts.iteritems(): for opt, val in search_opts.items():
matching = [m for m in matching matching = [m for m in matching
if getattr(m, opt, None) == val] if getattr(m, opt, None) == val]
return matching return matching

View File

@ -496,7 +496,7 @@ class TestCinderManageCmd(test.TestCase):
def test_config_commands_list(self): def test_config_commands_list(self):
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
expected_out = '' expected_out = ''
for key, value in CONF.iteritems(): for key, value in CONF.items():
expected_out += '%s = %s' % (key, value) + '\n' expected_out += '%s = %s' % (key, value) + '\n'
config_cmds = cinder_manage.ConfigCommands() config_cmds = cinder_manage.ConfigCommands()

View File

@ -77,7 +77,7 @@ class ModelsObjectComparatorMixin(object):
self.assertEqual( self.assertEqual(
len(obj1), len(obj2), len(obj1), len(obj2),
"Keys mismatch: %s" % str(set(obj1.keys()) ^ set(obj2.keys()))) "Keys mismatch: %s" % str(set(obj1.keys()) ^ set(obj2.keys())))
for key, value in obj1.iteritems(): for key, value in obj1.items():
self.assertEqual(value, obj2[key]) self.assertEqual(value, obj2[key])
def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None): def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None):
@ -123,7 +123,7 @@ class DBAPIServiceTestCase(BaseTest):
def test_service_create(self): def test_service_create(self):
service = self._create_service({}) service = self._create_service({})
self.assertFalse(service['id'] is None) self.assertFalse(service['id'] is None)
for key, value in self._get_base_values().iteritems(): for key, value in self._get_base_values().items():
self.assertEqual(value, service[key]) self.assertEqual(value, service[key])
def test_service_destroy(self): def test_service_destroy(self):
@ -147,7 +147,7 @@ class DBAPIServiceTestCase(BaseTest):
} }
db.service_update(self.ctxt, service['id'], new_values) db.service_update(self.ctxt, service['id'], new_values)
updated_service = db.service_get(self.ctxt, service['id']) updated_service = db.service_get(self.ctxt, service['id'])
for key, value in new_values.iteritems(): for key, value in new_values.items():
self.assertEqual(value, updated_service[key]) self.assertEqual(value, updated_service[key])
def test_service_update_not_found_exception(self): def test_service_update_not_found_exception(self):
@ -1441,7 +1441,7 @@ class DBAPIQuotaTestCase(BaseTest):
quota_usage = db.quota_usage_get(self.ctxt, 'p1', 'gigabytes') quota_usage = db.quota_usage_get(self.ctxt, 'p1', 'gigabytes')
expected = {'resource': 'gigabytes', 'project_id': 'p1', expected = {'resource': 'gigabytes', 'project_id': 'p1',
'in_use': 0, 'reserved': 2, 'total': 2} 'in_use': 0, 'reserved': 2, 'total': 2}
for key, value in expected.iteritems(): for key, value in expected.items():
self.assertEqual(value, quota_usage[key], key) self.assertEqual(value, quota_usage[key], key)
def test_quota_usage_get_all_by_project(self): def test_quota_usage_get_all_by_project(self):

View File

@ -688,7 +688,7 @@ class FlashSystemDriverTestCase(test.TestCase):
def _reset_flags(self): def _reset_flags(self):
self.driver.configuration.local_conf.reset() self.driver.configuration.local_conf.reset()
for k, v in self._def_flags.iteritems(): for k, v in self._def_flags.items():
self._set_flag(k, v) self._set_flag(k, v)
def _generate_vol_info(self, def _generate_vol_info(self,

View File

@ -91,7 +91,7 @@ class FlashSystemISCSIDriverTestCase(test.TestCase):
def _reset_flags(self): def _reset_flags(self):
self.driver.configuration.local_conf.reset() self.driver.configuration.local_conf.reset()
for k, v in self._def_flags.iteritems(): for k, v in self._def_flags.items():
self._set_flag(k, v) self._set_flag(k, v)
def _generate_vol_info(self, def _generate_vol_info(self,

View File

@ -185,7 +185,7 @@ class XIVDS8KFakeProxyDriver(object):
def create_consistencygroup(self, ctxt, group): def create_consistencygroup(self, ctxt, group):
volumes = [volume for k, volume in self.volumes.iteritems() volumes = [volume for k, volume in self.volumes.items()
if volume['consistencygroup_id'] == group['id']] if volume['consistencygroup_id'] == group['id']]
if volumes: if volumes:
@ -207,12 +207,12 @@ class XIVDS8KFakeProxyDriver(object):
volumes.append(volume) volumes.append(volume)
# Delete snapshots in consistency group # Delete snapshots in consistency group
self.snapshots = {k: snap for k, snap in self.snapshots.iteritems() self.snapshots = {k: snap for k, snap in self.snapshots.items()
if not(snap.get('consistencygroup_id', None) if not(snap.get('consistencygroup_id', None)
== group.get('id', None))} == group.get('id', None))}
# Delete volume in consistency group # Delete volume in consistency group
self.volumes = {k: vol for k, vol in self.volumes.iteritems() self.volumes = {k: vol for k, vol in self.volumes.items()
if not(vol.get('consistencygroup_id', None) if not(vol.get('consistencygroup_id', None)
== group.get('id', None))} == group.get('id', None))}
@ -254,7 +254,7 @@ class XIVDS8KFakeProxyDriver(object):
snapshots.append(snapshot) snapshots.append(snapshot)
# Delete snapshots in consistency group # Delete snapshots in consistency group
self.snapshots = {k: snap for k, snap in self.snapshots.iteritems() self.snapshots = {k: snap for k, snap in self.snapshots.items()
if not(snap.get('consistencygroup_id', None) if not(snap.get('consistencygroup_id', None)
== cgsnapshot.get('cgsnapshot_id', None))} == cgsnapshot.get('cgsnapshot_id', None))}

View File

@ -87,7 +87,7 @@ class IBMNASDriverTestCase(test.TestCase):
def _reset_flags(self): def _reset_flags(self):
self._driver.configuration.local_conf.reset() self._driver.configuration.local_conf.reset()
for k, v in self._def_flags.iteritems(): for k, v in self._def_flags.items():
self._set_flag(k, v) self._set_flag(k, v)
def test_check_for_setup_error(self): def test_check_for_setup_error(self):

View File

@ -456,7 +456,7 @@ class FakeDirectCmodeHTTPConnection(object):
if not headers: if not headers:
headers = {} headers = {}
req_str = '%s %s HTTP/1.1\r\n' % (method, path) req_str = '%s %s HTTP/1.1\r\n' % (method, path)
for key, value in headers.iteritems(): for key, value in headers.items():
req_str += "%s: %s\r\n" % (key, value) req_str += "%s: %s\r\n" % (key, value)
if data: if data:
req_str += '\r\n%s' % data req_str += '\r\n%s' % data
@ -1138,7 +1138,7 @@ class FakeDirect7modeHTTPConnection(object):
if not headers: if not headers:
headers = {} headers = {}
req_str = '%s %s HTTP/1.1\r\n' % (method, path) req_str = '%s %s HTTP/1.1\r\n' % (method, path)
for key, value in headers.iteritems(): for key, value in headers.items():
req_str += "%s: %s\r\n" % (key, value) req_str += "%s: %s\r\n" % (key, value)
if data: if data:
req_str += '\r\n%s' % data req_str += '\r\n%s' % data

View File

@ -264,7 +264,7 @@ class FakeDirectCmodeHTTPConnection(object):
if not headers: if not headers:
headers = {} headers = {}
req_str = '%s %s HTTP/1.1\r\n' % (method, path) req_str = '%s %s HTTP/1.1\r\n' % (method, path)
for key, value in headers.iteritems(): for key, value in headers.items():
req_str += "%s: %s\r\n" % (key, value) req_str += "%s: %s\r\n" % (key, value)
if data: if data:
req_str += '\r\n%s' % data req_str += '\r\n%s' % data

View File

@ -1253,7 +1253,7 @@ port_speed!N/A
filter_key = kwargs['filtervalue'].split('=')[0] filter_key = kwargs['filtervalue'].split('=')[0]
filter_value = kwargs['filtervalue'].split('=')[1] filter_value = kwargs['filtervalue'].split('=')[1]
to_delete = [] to_delete = []
for k, v in self._fcmappings_list.iteritems(): for k, v in self._fcmappings_list.items():
if str(v[filter_key]) == filter_value: if str(v[filter_key]) == filter_value:
source = self._volumes_list[v['source']] source = self._volumes_list[v['source']]
target = self._volumes_list[v['target']] target = self._volumes_list[v['target']]
@ -1541,7 +1541,7 @@ port_speed!N/A
params = ['name', 'warning', 'udid', params = ['name', 'warning', 'udid',
'autoexpand', 'easytier', 'primary'] 'autoexpand', 'easytier', 'primary']
for key, value in kwargs.iteritems(): for key, value in kwargs.items():
if key == 'easytier': if key == 'easytier':
vol['easy_tier'] = value vol['easy_tier'] = value
continue continue
@ -1765,7 +1765,7 @@ class StorwizeSVCDriverTestCase(test.TestCase):
def _reset_flags(self): def _reset_flags(self):
self.driver.configuration.local_conf.reset() self.driver.configuration.local_conf.reset()
for k, v in self._def_flags.iteritems(): for k, v in self._def_flags.items():
self._set_flag(k, v) self._set_flag(k, v)
def _assert_vol_exists(self, name, exists): def _assert_vol_exists(self, name, exists):
@ -2206,7 +2206,7 @@ class StorwizeSVCDriverTestCase(test.TestCase):
for idx in range(len(opts_list)): for idx in range(len(opts_list)):
attrs = self._create_test_vol(opts_list[idx]) attrs = self._create_test_vol(opts_list[idx])
for k, v in chck_list[idx].iteritems(): for k, v in chck_list[idx].items():
try: try:
if k[0] == '-': if k[0] == '-':
k = k[1:] k = k[1:]
@ -2340,14 +2340,14 @@ class StorwizeSVCDriverTestCase(test.TestCase):
ret = self.driver.initialize_connection(volume1, self._connector) ret = self.driver.initialize_connection(volume1, self._connector)
self.assertEqual(ret['driver_volume_type'], self.assertEqual(ret['driver_volume_type'],
expected[protocol]['driver_volume_type']) expected[protocol]['driver_volume_type'])
for k, v in expected[protocol]['data'].iteritems(): for k, v in expected[protocol]['data'].items():
self.assertEqual(ret['data'][k], v) self.assertEqual(ret['data'][k], v)
# Initialize again, should notice it and do nothing # Initialize again, should notice it and do nothing
ret = self.driver.initialize_connection(volume1, self._connector) ret = self.driver.initialize_connection(volume1, self._connector)
self.assertEqual(ret['driver_volume_type'], self.assertEqual(ret['driver_volume_type'],
expected[protocol]['driver_volume_type']) expected[protocol]['driver_volume_type'])
for k, v in expected[protocol]['data'].iteritems(): for k, v in expected[protocol]['data'].items():
self.assertEqual(ret['data'][k], v) self.assertEqual(ret['data'][k], v)
# Try to delete the 1st volume (should fail because it is mapped) # Try to delete the 1st volume (should fail because it is mapped)
@ -2382,7 +2382,7 @@ class StorwizeSVCDriverTestCase(test.TestCase):
self.assertEqual( self.assertEqual(
ret['driver_volume_type'], ret['driver_volume_type'],
expected_fc_npiv['driver_volume_type']) expected_fc_npiv['driver_volume_type'])
for k, v in expected_fc_npiv['data'].iteritems(): for k, v in expected_fc_npiv['data'].items():
self.assertEqual(ret['data'][k], v) self.assertEqual(ret['data'][k], v)
self._set_flag('storwize_svc_npiv_compatibility_mode', self._set_flag('storwize_svc_npiv_compatibility_mode',
False) False)

View File

@ -68,7 +68,7 @@ class VolumeGlanceMetadataTestCase(test.TestCase):
metadata = db.volume_glance_metadata_get(ctxt, 2) metadata = db.volume_glance_metadata_get(ctxt, 2)
self.assertEqual(len(metadata), 3) self.assertEqual(len(metadata), 3)
for expected, meta in zip(expected_metadata_2, metadata): for expected, meta in zip(expected_metadata_2, metadata):
for key, value in expected.iteritems(): for key, value in expected.items():
self.assertEqual(meta[key], value) self.assertEqual(meta[key], value)
self.assertRaises(exception.GlanceMetadataExists, self.assertRaises(exception.GlanceMetadataExists,

View File

@ -68,7 +68,7 @@ class VolumeTypeTestCase(test.TestCase):
self.assertEqual(self.vol_type1_description, new['description']) self.assertEqual(self.vol_type1_description, new['description'])
for k, v in self.vol_type1_specs.iteritems(): for k, v in self.vol_type1_specs.items():
self.assertEqual(v, new['extra_specs'][k], self.assertEqual(v, new['extra_specs'][k],
'one of fields does not match') 'one of fields does not match')

View File

@ -35,7 +35,7 @@ class VolumeTypeExtraSpecsTestCase(test.TestCase):
self.addCleanup(db.volume_type_destroy, context.get_admin_context(), self.addCleanup(db.volume_type_destroy, context.get_admin_context(),
self.vol_type1['id']) self.vol_type1['id'])
self.volume_type1_id = ref.id self.volume_type1_id = ref.id
for k, v in self.vol_type1_specs.iteritems(): for k, v in self.vol_type1_specs.items():
self.vol_type1_specs[k] = str(v) self.vol_type1_specs[k] = str(v)
self.vol_type2_noextra = dict(name="TEST: Volume type without extra") self.vol_type2_noextra = dict(name="TEST: Volume type without extra")

View File

@ -313,7 +313,7 @@ class ExceptionTest(test.TestCase):
self.assertEqual(resp.status_int, exception_type.code, resp.body) self.assertEqual(resp.status_int, exception_type.code, resp.body)
if hasattr(exception_type, 'headers'): if hasattr(exception_type, 'headers'):
for (key, value) in exception_type.headers.iteritems(): for (key, value) in exception_type.headers.items():
self.assertIn(key, resp.headers) self.assertIn(key, resp.headers)
self.assertEqual(resp.headers[key], value) self.assertEqual(resp.headers[key], value)

View File

@ -17,7 +17,6 @@ Mock unit tests for the NetApp block storage driver interfaces
import mock import mock
import six
from cinder import test from cinder import test
from cinder.volume.drivers.netapp.dataontap import block_7mode from cinder.volume.drivers.netapp.dataontap import block_7mode
@ -69,5 +68,5 @@ class NetAppBlockStorageDriverInterfaceTestCase(test.TestCase):
def _get_local_functions(self, obj): def _get_local_functions(self, obj):
"""Get function names of an object without superclass functions.""" """Get function names of an object without superclass functions."""
return set([key for key, value in six.iteritems(type(obj).__dict__) return set([key for key, value in type(obj).__dict__.items()
if callable(value)]) if callable(value)])

View File

@ -92,7 +92,7 @@ class NetAppDriverFactoryTestCase(test.TestCase):
registry = na_common.NETAPP_UNIFIED_DRIVER_REGISTRY registry = na_common.NETAPP_UNIFIED_DRIVER_REGISTRY
for family in six.iterkeys(registry): for family in six.iterkeys(registry):
for protocol, full_class_name in six.iteritems(registry[family]): for protocol, full_class_name in registry[family].items():
driver = na_common.NetAppDriver.create_driver( driver = na_common.NetAppDriver.create_driver(
family, protocol, **kwargs) family, protocol, **kwargs)
self.assertEqual(full_class_name, get_full_class_name(driver)) self.assertEqual(full_class_name, get_full_class_name(driver))

View File

@ -118,7 +118,7 @@ def check_exclusive_options(**kwargs):
pretty_keys = kwargs.pop("pretty_keys", True) pretty_keys = kwargs.pop("pretty_keys", True)
exclusive_options = {} exclusive_options = {}
for (k, v) in kwargs.iteritems(): for (k, v) in kwargs.items():
if v is not None: if v is not None:
exclusive_options[k] = True exclusive_options[k] = True
@ -714,7 +714,7 @@ def add_visible_admin_metadata(volume):
for item in orig_meta: for item in orig_meta:
if item['key'] in visible_admin_meta.keys(): if item['key'] in visible_admin_meta.keys():
item['value'] = visible_admin_meta.pop(item['key']) item['value'] = visible_admin_meta.pop(item['key'])
for key, value in visible_admin_meta.iteritems(): for key, value in visible_admin_meta.items():
orig_meta.append({'key': key, 'value': value}) orig_meta.append({'key': key, 'value': value})
volume['volume_metadata'] = orig_meta volume['volume_metadata'] = orig_meta
# avoid circular ref when vol is a Volume instance # avoid circular ref when vol is a Volume instance

View File

@ -522,7 +522,7 @@ class API(base.Base):
results = [] results = []
not_found = object() not_found = object()
for snapshot in snapshots: for snapshot in snapshots:
for opt, value in search_opts.iteritems(): for opt, value in search_opts.items():
if snapshot.get(opt, not_found) != value: if snapshot.get(opt, not_found) != value:
break break
else: else:
@ -954,7 +954,7 @@ class API(base.Base):
if not metadata: if not metadata:
metadata = {} metadata = {}
for k, v in metadata.iteritems(): for k, v in metadata.items():
if len(k) == 0: if len(k) == 0:
msg = _("Metadata property key blank.") msg = _("Metadata property key blank.")
LOG.warning(msg) LOG.warning(msg)

View File

@ -127,7 +127,7 @@ class BlockDeviceDriver(driver.BaseVD, driver.LocalVD, driver.CloneableVD,
used_devices = self._get_used_devices() used_devices = self._get_used_devices()
total_size = 0 total_size = 0
free_size = 0 free_size = 0
for device, size in dict_of_devices_sizes.iteritems(): for device, size in dict_of_devices_sizes.items():
if device not in used_devices: if device not in used_devices:
free_size += size free_size += size
total_size += size total_size += size

View File

@ -86,7 +86,7 @@ class CloudByteISCSIDriver(san.SanISCSIDriver):
error_msg = "" error_msg = ""
# error_data is a single key value dict # error_data is a single key value dict
for key, value in error_data.iteritems(): for key, value in error_data.items():
error_msg = value.get('errortext') error_msg = value.get('errortext')
return error_msg return error_msg
@ -188,7 +188,7 @@ class CloudByteISCSIDriver(san.SanISCSIDriver):
# Nothing to override # Nothing to override
return default_dict return default_dict
for key, value in default_dict.iteritems(): for key, value in default_dict.items():
# Fill the user dict with default options based on condition # Fill the user dict with default options based on condition
if filtered_user_dict.get(key) is None and value is not None: if filtered_user_dict.get(key) is None and value is not None:
filtered_user_dict[key] = value filtered_user_dict[key] = value

View File

@ -897,7 +897,7 @@ class ScaleIODriver(driver.VolumeDriver):
if type_id: if type_id:
volume_type = volume_types.get_volume_type(ctxt, type_id) volume_type = volume_types.get_volume_type(ctxt, type_id)
specs = volume_type.get('extra_specs') specs = volume_type.get('extra_specs')
for key, value in specs.iteritems(): for key, value in specs.items():
specs[key] = value specs[key] = value
return specs return specs

View File

@ -1620,7 +1620,7 @@ class RestCommon(object):
kvs = specs kvs = specs
LOG.info(_LI('The QoS sepcs is: %s.'), kvs) LOG.info(_LI('The QoS sepcs is: %s.'), kvs)
for key, value in kvs.iteritems(): for key, value in kvs.items():
if key in huawei_valid_keys: if key in huawei_valid_keys:
qos[key.upper()] = value qos[key.upper()] = value
@ -1696,7 +1696,7 @@ class RestCommon(object):
def _check_qos_high_priority(self, qos): def _check_qos_high_priority(self, qos):
"""Check QoS priority.""" """Check QoS priority."""
for key, value in qos.iteritems(): for key, value in qos.items():
if (key.find('MIN') == 0) or (key.find('LATENCY') == 0): if (key.find('MIN') == 0) or (key.find('LATENCY') == 0):
return True return True

View File

@ -588,7 +588,7 @@ class FlashSystemDriver(san.SanDriver):
# Get preferred node and other nodes in I/O group # Get preferred node and other nodes in I/O group
preferred_node_entry = None preferred_node_entry = None
io_group_nodes = [] io_group_nodes = []
for k, node in self._storage_nodes.iteritems(): for k, node in self._storage_nodes.items():
if vdisk_params['protocol'] != node['protocol']: if vdisk_params['protocol'] != node['protocol']:
continue continue
if node['id'] == preferred_node: if node['id'] == preferred_node:
@ -644,7 +644,7 @@ class FlashSystemDriver(san.SanDriver):
ctxt = context.get_admin_context() ctxt = context.get_admin_context()
volume_type = volume_types.get_volume_type(ctxt, type_id) volume_type = volume_types.get_volume_type(ctxt, type_id)
specs = volume_type.get('extra_specs') specs = volume_type.get('extra_specs')
for k, value in specs.iteritems(): for k, value in specs.items():
# Get the scope, if using scope format # Get the scope, if using scope format
key_split = k.split(':') key_split = k.split(':')
if len(key_split) == 1: if len(key_split) == 1:
@ -1084,7 +1084,7 @@ class FlashSystemDriver(san.SanDriver):
# For each node, check what connection modes it supports. Delete any # For each node, check what connection modes it supports. Delete any
# nodes that do not support any types (may be partially configured). # nodes that do not support any types (may be partially configured).
to_delete = [] to_delete = []
for k, node in self._storage_nodes.iteritems(): for k, node in self._storage_nodes.items():
if not node['WWPN']: if not node['WWPN']:
to_delete.append(k) to_delete.append(k)

View File

@ -200,7 +200,7 @@ class StorwizeSVCDriver(san.SanDriver):
# For each node, check what connection modes it supports. Delete any # For each node, check what connection modes it supports. Delete any
# nodes that do not support any types (may be partially configured). # nodes that do not support any types (may be partially configured).
to_delete = [] to_delete = []
for k, node in self._state['storage_nodes'].iteritems(): for k, node in self._state['storage_nodes'].items():
if ((len(node['ipv4']) or len(node['ipv6'])) if ((len(node['ipv4']) or len(node['ipv6']))
and len(node['iscsi_name'])): and len(node['iscsi_name'])):
node['enabled_protocols'].append('iSCSI') node['enabled_protocols'].append('iSCSI')

View File

@ -455,7 +455,7 @@ class StorwizeHelpers(object):
def _get_opts_from_specs(self, opts, specs): def _get_opts_from_specs(self, opts, specs):
qos = {} qos = {}
for k, value in specs.iteritems(): for k, value in specs.items():
# Get the scope, if using scope format # Get the scope, if using scope format
key_split = k.split(':') key_split = k.split(':')
if len(key_split) == 1: if len(key_split) == 1:
@ -1033,7 +1033,7 @@ class StorwizeHelpers(object):
def add_vdisk_qos(self, vdisk, qos): def add_vdisk_qos(self, vdisk, qos):
"""Add the QoS configuration to the volume.""" """Add the QoS configuration to the volume."""
for key, value in qos.iteritems(): for key, value in qos.items():
if key in self.svc_qos_keys.keys(): if key in self.svc_qos_keys.keys():
param = self.svc_qos_keys[key]['param'] param = self.svc_qos_keys[key]['param']
self.ssh.chvdisk(vdisk, ['-' + param, str(value)]) self.ssh.chvdisk(vdisk, ['-' + param, str(value)])
@ -1046,7 +1046,7 @@ class StorwizeHelpers(object):
available in the QoS configuration, the value is taken from it; available in the QoS configuration, the value is taken from it;
if not, the value will be set to default. if not, the value will be set to default.
""" """
for key, value in self.svc_qos_keys.iteritems(): for key, value in self.svc_qos_keys.items():
param = value['param'] param = value['param']
if key in qos.keys(): if key in qos.keys():
# If the value is set in QoS, take the value from # If the value is set in QoS, take the value from
@ -1059,7 +1059,7 @@ class StorwizeHelpers(object):
def disable_vdisk_qos(self, vdisk, qos): def disable_vdisk_qos(self, vdisk, qos):
"""Disable the QoS.""" """Disable the QoS."""
for key, value in qos.iteritems(): for key, value in qos.items():
if key in self.svc_qos_keys.keys(): if key in self.svc_qos_keys.keys():
param = self.svc_qos_keys[key]['param'] param = self.svc_qos_keys[key]['param']
# Take the default value. # Take the default value.

View File

@ -174,7 +174,7 @@ def validate_qos_spec(qos_spec):
return return
normalized_qos_keys = [key.lower() for key in QOS_KEYS] normalized_qos_keys = [key.lower() for key in QOS_KEYS]
keylist = [] keylist = []
for key, value in six.iteritems(qos_spec): for key, value in qos_spec.items():
lower_case_key = key.lower() lower_case_key = key.lower()
if lower_case_key not in normalized_qos_keys: if lower_case_key not in normalized_qos_keys:
msg = _('Unrecognized QOS keyword: "%s"') % key msg = _('Unrecognized QOS keyword: "%s"') % key

View File

@ -172,7 +172,7 @@ class RemoteFSDriver(driver.VolumeDriver):
self.configuration.nas_secure_file_permissions, self.configuration.nas_secure_file_permissions,
'nas_secure_file_operations': 'nas_secure_file_operations':
self.configuration.nas_secure_file_operations} self.configuration.nas_secure_file_operations}
for opt_name, opt_value in secure_options.iteritems(): for opt_name, opt_value in secure_options.items():
if opt_value not in valid_secure_opts: if opt_value not in valid_secure_opts:
err_parms = {'name': opt_name, 'value': opt_value} err_parms = {'name': opt_name, 'value': opt_value}
msg = _("NAS config '%(name)s=%(value)s' invalid. Must be " msg = _("NAS config '%(name)s=%(value)s' invalid. Must be "
@ -978,7 +978,7 @@ class RemoteFSSnapDriver(RemoteFSDriver):
new_base_file = base_file_img_info.backing_file new_base_file = base_file_img_info.backing_file
base_id = None base_id = None
for key, value in snap_info.iteritems(): for key, value in snap_info.items():
if value == base_file and key != 'active': if value == base_file and key != 'active':
base_id = key base_id = key
break break

View File

@ -869,7 +869,7 @@ class HP3PARCommon(object):
else: else:
kvs = specs kvs = specs
for key, value in kvs.iteritems(): for key, value in kvs.items():
if 'qos:' in key: if 'qos:' in key:
fields = key.split(':') fields = key.split(':')
key = fields[1] key = fields[1]
@ -880,7 +880,7 @@ class HP3PARCommon(object):
def _get_keys_by_volume_type(self, volume_type): def _get_keys_by_volume_type(self, volume_type):
hp3par_keys = {} hp3par_keys = {}
specs = volume_type.get('extra_specs') specs = volume_type.get('extra_specs')
for key, value in specs.iteritems(): for key, value in specs.items():
if ':' in key: if ':' in key:
fields = key.split(':') fields = key.split(':')
key = fields[1] key = fields[1]

View File

@ -655,7 +655,7 @@ class HP3PARISCSIDriver(cinder.volume.driver.ISCSIDriver):
# identify key (nsp) of least used nsp # identify key (nsp) of least used nsp
current_smallest_count = sys.maxint current_smallest_count = sys.maxint
for (nsp, count) in nsp_counts.iteritems(): for (nsp, count) in nsp_counts.items():
if count < current_smallest_count: if count < current_smallest_count:
current_least_used_nsp = nsp current_least_used_nsp = nsp
current_smallest_count = count current_smallest_count = count

View File

@ -438,7 +438,7 @@ class HPLeftHandRESTProxy(driver.ISCSIDriver):
def _get_lh_extra_specs(self, extra_specs, valid_keys): def _get_lh_extra_specs(self, extra_specs, valid_keys):
"""Get LeftHand extra_specs (valid_keys only).""" """Get LeftHand extra_specs (valid_keys only)."""
extra_specs_of_interest = {} extra_specs_of_interest = {}
for key, value in extra_specs.iteritems(): for key, value in extra_specs.items():
if key in valid_keys: if key in valid_keys:
extra_specs_of_interest[key] = value extra_specs_of_interest[key] = value
return extra_specs_of_interest return extra_specs_of_interest
@ -446,7 +446,7 @@ class HPLeftHandRESTProxy(driver.ISCSIDriver):
def _map_extra_specs(self, extra_specs): def _map_extra_specs(self, extra_specs):
"""Map the extra spec key/values to LeftHand key/values.""" """Map the extra spec key/values to LeftHand key/values."""
client_options = {} client_options = {}
for key, value in extra_specs.iteritems(): for key, value in extra_specs.items():
# map extra spec key to lh client option key # map extra spec key to lh client option key
client_key = extra_specs_key_map[key] client_key = extra_specs_key_map[key]
# map extra spect value to lh client option value # map extra spect value to lh client option value
@ -540,7 +540,7 @@ class HPLeftHandRESTProxy(driver.ISCSIDriver):
# only set the ones that have changed # only set the ones that have changed
changed_extra_specs = {} changed_extra_specs = {}
for key, value in lh_extra_specs.iteritems(): for key, value in lh_extra_specs.items():
(old, new) = diff['extra_specs'][key] (old, new) = diff['extra_specs'][key]
if old != new: if old != new:
changed_extra_specs[key] = value changed_extra_specs[key] = value

View File

@ -541,7 +541,7 @@ class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver):
else: else:
options_dict['username'] = 'guest' options_dict['username'] = 'guest'
named_options = ','.join("%s=%s" % (key, val) for (key, val) named_options = ','.join("%s=%s" % (key, val) for (key, val)
in options_dict.iteritems()) in options_dict.items())
options_list = ','.join(options_list) options_list = ','.join(options_list)
flags = '-o ' + ','.join([named_options, options_list]) flags = '-o ' + ','.join([named_options, options_list])

View File

@ -493,7 +493,7 @@ class SolidFireDriver(san.SanISCSIDriver):
else: else:
kvs = specs kvs = specs
for key, value in kvs.iteritems(): for key, value in kvs.items():
if ':' in key: if ':' in key:
fields = key.split(':') fields = key.split(':')
key = fields[1] key = fields[1]

View File

@ -241,7 +241,7 @@ class StorPoolDriver(driver.TransferVD, driver.ExtendVD, driver.CloneableVD,
used = 0 used = 0
free = 0 free = 0
agSize = 512 * units.Mi agSize = 512 * units.Mi
for (id, desc) in dl.iteritems(): for (id, desc) in dl.items():
if desc.generationLeft != -1: if desc.generationLeft != -1:
continue continue
total += desc.agCount * agSize total += desc.agCount * agSize
@ -405,7 +405,7 @@ class StorPoolDriver(driver.TransferVD, driver.ExtendVD, driver.CloneableVD,
templ = self.configuration.storpool_template templ = self.configuration.storpool_template
repl = self.configuration.storpool_replication repl = self.configuration.storpool_replication
if diff['extra_specs']: if diff['extra_specs']:
for (k, v) in diff['extra_specs'].iteritems(): for (k, v) in diff['extra_specs'].items():
if k == 'volume_backend_name': if k == 'volume_backend_name':
if v[0] != v[1]: if v[0] != v[1]:
# Retype of a volume backend not supported yet, # Retype of a volume backend not supported yet,

View File

@ -1073,7 +1073,7 @@ class XIOISEDriver(object):
volume_type = volume_types.get_volume_type(ctxt, type_id) volume_type = volume_types.get_volume_type(ctxt, type_id)
extra_specs = volume_type.get('extra_specs') extra_specs = volume_type.get('extra_specs')
# Parse out RAID, pool and affinity values # Parse out RAID, pool and affinity values
for key, value in extra_specs.iteritems(): for key, value in extra_specs.items():
subkey = '' subkey = ''
if ':' in key: if ':' in key:
fields = key.split(':') fields = key.split(':')
@ -1110,7 +1110,7 @@ class XIOISEDriver(object):
else: else:
kvs = volume_type.get('extra_specs') kvs = volume_type.get('extra_specs')
# Parse out min, max and burst values # Parse out min, max and burst values
for key, value in kvs.iteritems(): for key, value in kvs.items():
if ':' in key: if ':' in key:
fields = key.split(':') fields = key.split(':')
key = fields[1] key = fields[1]

View File

@ -281,7 +281,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
if not metadata: if not metadata:
metadata = {} metadata = {}
for (k, v) in metadata.iteritems(): for (k, v) in metadata.items():
if len(k) == 0: if len(k) == 0:
msg = _("Metadata property key blank") msg = _("Metadata property key blank")
LOG.warning(msg) LOG.warning(msg)

View File

@ -242,7 +242,7 @@ def get_all_specs(context, inactive=False, search_opts=None):
LOG.debug("Searching by: %s", search_opts) LOG.debug("Searching by: %s", search_opts)
def _check_specs_match(qos_specs, searchdict): def _check_specs_match(qos_specs, searchdict):
for k, v in searchdict.iteritems(): for k, v in searchdict.items():
if ((k not in qos_specs['specs'].keys() or if ((k not in qos_specs['specs'].keys() or
qos_specs['specs'][k] != v)): qos_specs['specs'][k] != v)):
return False return False
@ -252,9 +252,9 @@ def get_all_specs(context, inactive=False, search_opts=None):
filter_mapping = {'qos_specs': _check_specs_match} filter_mapping = {'qos_specs': _check_specs_match}
result = {} result = {}
for name, args in qos_specs.iteritems(): for name, args in qos_specs.items():
# go over all filters in the list # go over all filters in the list
for opt, values in search_opts.iteritems(): for opt, values in search_opts.items():
try: try:
filter_func = filter_mapping[opt] filter_func = filter_mapping[opt]
except KeyError: except KeyError:

View File

@ -102,7 +102,7 @@ def get_all_types(context, inactive=0, search_opts=None):
LOG.debug("Searching by: %s" % search_opts) LOG.debug("Searching by: %s" % search_opts)
def _check_extra_specs_match(vol_type, searchdict): def _check_extra_specs_match(vol_type, searchdict):
for k, v in searchdict.iteritems(): for k, v in searchdict.items():
if (k not in vol_type['extra_specs'].keys() if (k not in vol_type['extra_specs'].keys()
or vol_type['extra_specs'][k] != v): or vol_type['extra_specs'][k] != v):
return False return False
@ -112,9 +112,9 @@ def get_all_types(context, inactive=0, search_opts=None):
filter_mapping = {'extra_specs': _check_extra_specs_match} filter_mapping = {'extra_specs': _check_extra_specs_match}
result = {} result = {}
for type_name, type_args in vol_types.iteritems(): for type_name, type_args in vol_types.items():
# go over all filters in the list # go over all filters in the list
for opt, values in search_opts.iteritems(): for opt, values in search_opts.items():
try: try:
filter_func = filter_mapping[opt] filter_func = filter_mapping[opt]
except KeyError: except KeyError:
@ -258,11 +258,11 @@ def volume_types_diff(context, vol_type_id1, vol_type_id2):
dict1 = {} dict1 = {}
if dict2 is None: if dict2 is None:
dict2 = {} dict2 = {}
for k, v in dict1.iteritems(): for k, v in dict1.items():
res[k] = (v, dict2.get(k)) res[k] = (v, dict2.get(k))
if k not in dict2 or res[k][0] != res[k][1]: if k not in dict2 or res[k][0] != res[k][1]:
equal = False equal = False
for k, v in dict2.iteritems(): for k, v in dict2.items():
res[k] = (dict1.get(k), v) res[k] = (dict1.get(k), v)
if k not in dict1 or res[k][0] != res[k][1]: if k not in dict1 or res[k][0] != res[k][1]:
equal = False equal = False

View File

@ -445,7 +445,7 @@ class Debug(Middleware):
resp = req.get_response(self.application) resp = req.get_response(self.application)
print(('*' * 40) + ' RESPONSE HEADERS') # noqa print(('*' * 40) + ' RESPONSE HEADERS') # noqa
for (key, value) in resp.headers.iteritems(): for (key, value) in resp.headers.items():
print(key, '=', value) # noqa print(key, '=', value) # noqa
print() # noqa print() # noqa