Merge "Replace dict.iteritems() with dict.items()"

This commit is contained in:
Jenkins 2015-06-15 22:10:18 +00:00 committed by Gerrit Code Review
commit 52e2e8e271
55 changed files with 100 additions and 101 deletions

View File

@ -157,7 +157,7 @@ class HostController(wsgi.Controller):
def update(self, req, id, body):
authorize(req.environ['cinder.context'])
update_values = {}
for raw_key, raw_val in body.iteritems():
for raw_key, raw_val in body.items():
key = raw_key.lower().strip()
val = raw_val.lower().strip()
if key == "status":
@ -171,7 +171,7 @@ class HostController(wsgi.Controller):
raise webob.exc.HTTPBadRequest(explanation=explanation)
update_setters = {'status': self._set_enabled_status}
result = {}
for key, value in update_values.iteritems():
for key, value in update_values.items():
result.update(update_setters[key](req, id, value))
return result

View File

@ -40,7 +40,7 @@ class UsedLimitsController(wsgi.Controller):
}
used_limits = {}
for display_name, single_quota in quota_map.iteritems():
for display_name, single_quota in quota_map.items():
if single_quota in quotas:
used_limits[display_name] = quotas[single_quota]['in_use']

View File

@ -151,7 +151,7 @@ class ExtensionsResource(wsgi.Resource):
@wsgi.serializers(xml=ExtensionsTemplate)
def index(self, req):
extensions = []
for _alias, ext in self.extension_manager.extensions.iteritems():
for _alias, ext in self.extension_manager.extensions.items():
extensions.append(self._translate(ext))
return dict(extensions=extensions)

View File

@ -275,7 +275,7 @@ class VolumeController(wsgi.Controller):
search_opts.pop('limit', None)
search_opts.pop('offset', None)
for k, v in search_opts.iteritems():
for k, v in search_opts.items():
try:
search_opts[k] = ast.literal_eval(v)
except (ValueError, SyntaxError):

View File

@ -244,7 +244,7 @@ class VolumeController(wsgi.Controller):
filters['display_name'] = filters['name']
del filters['name']
for k, v in filters.iteritems():
for k, v in filters.items():
try:
filters[k] = ast.literal_eval(v)
except (ValueError, SyntaxError):

View File

@ -55,7 +55,7 @@ class ViewBuilder(object):
"injected_file_content_bytes": ["maxPersonalitySize"],
}
limits = {}
for name, value in absolute_limits.iteritems():
for name, value in absolute_limits.items():
if name in limit_names and value is not None:
for name in limit_names[name]:
limits[name] = value

View File

@ -176,7 +176,7 @@ def _check_dsmc_output(output, check_attrs, exact_match=True):
if sep is not None and key is not None and len(val.strip()) > 0:
parsed_attrs[key] = val.strip()
for ckey, cval in check_attrs.iteritems():
for ckey, cval in check_attrs.items():
if ckey not in parsed_attrs:
return False
elif exact_match and parsed_attrs[ckey] != cval:

View File

@ -338,7 +338,7 @@ class ConfigCommands(object):
if param:
print('%s = %s' % (param, CONF.get(param)))
else:
for key, value in CONF.iteritems():
for key, value in CONF.items():
print('%s = %s' % (key, value))

View File

@ -697,7 +697,7 @@ class API(base.Base):
results = []
not_found = object()
for cgsnapshot in cgsnapshots:
for opt, value in search_opts.iteritems():
for opt, value in search_opts.items():
if cgsnapshot.get(opt, not_found) != value:
break
else:

View File

@ -450,7 +450,7 @@ def service_update(context, service_id, values):
def _metadata_refs(metadata_dict, meta_class):
metadata_refs = []
if metadata_dict:
for k, v in metadata_dict.iteritems():
for k, v in metadata_dict.items():
metadata_ref = meta_class()
metadata_ref['key'] = k
metadata_ref['value'] = v
@ -490,7 +490,7 @@ def iscsi_target_count_by_host(context, host):
def iscsi_target_create_safe(context, values):
iscsi_target_ref = models.IscsiTarget()
for (key, value) in values.iteritems():
for (key, value) in values.items():
iscsi_target_ref[key] = value
session = get_session()
@ -1564,14 +1564,14 @@ def _process_volume_filters(query, filters):
filter_dict = {}
# Iterate over all filters, special case the filter if necessary
for key, value in filters.iteritems():
for key, value in filters.items():
if key == 'metadata':
# model.VolumeMetadata defines the backref to Volumes as
# 'volume_metadata' or 'volume_admin_metadata', use those as
# column attribute keys
col_attr = getattr(models.Volume, 'volume_metadata')
col_ad_attr = getattr(models.Volume, 'volume_admin_metadata')
for k, v in value.iteritems():
for k, v in value.items():
query = query.filter(or_(col_attr.any(key=k, value=v),
col_ad_attr.any(key=k, value=v)))
elif isinstance(value, (list, tuple, set, frozenset)):
@ -1773,7 +1773,7 @@ def _volume_x_metadata_update(context, volume_id, metadata, delete,
if delete:
original_metadata = _volume_x_metadata_get(context, volume_id,
model, session=session)
for meta_key, meta_value in original_metadata.iteritems():
for meta_key, meta_value in original_metadata.items():
if meta_key not in metadata:
meta_ref = _volume_x_metadata_get_item(context, volume_id,
meta_key, model,
@ -2130,7 +2130,7 @@ def snapshot_metadata_update(context, snapshot_id, metadata, delete):
if delete:
original_metadata = _snapshot_metadata_get(context, snapshot_id,
session)
for meta_key, meta_value in original_metadata.iteritems():
for meta_key, meta_value in original_metadata.items():
if meta_key not in metadata:
meta_ref = _snapshot_metadata_get_item(context,
snapshot_id,
@ -2620,7 +2620,7 @@ def volume_type_extra_specs_update_or_create(context, volume_type_id,
session = get_session()
with session.begin():
spec_ref = None
for key, value in specs.iteritems():
for key, value in specs.items():
try:
spec_ref = _volume_type_extra_specs_get_item(
context, volume_type_id, key, session)
@ -2672,7 +2672,7 @@ def qos_specs_create(context, values):
specs_root.save(session=session)
# Insert all specification entries for QoS specs
for k, v in values['qos_specs'].iteritems():
for k, v in values['qos_specs'].items():
item = dict(key=k, value=v, specs_id=specs_id)
item['id'] = str(uuid.uuid4())
spec_entry = models.QualityOfServiceSpecs()

View File

@ -80,7 +80,7 @@ class CinderException(Exception):
except AttributeError:
pass
for k, v in self.kwargs.iteritems():
for k, v in self.kwargs.items():
if isinstance(v, Exception):
self.kwargs[k] = six.text_type(v)
@ -93,7 +93,7 @@ class CinderException(Exception):
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation'))
for name, value in kwargs.iteritems():
for name, value in kwargs.items():
LOG.error(_LE("%(name)s: %(value)s"),
{'name': name, 'value': value})
if CONF.fatal_exception_format_errors:

View File

@ -65,7 +65,7 @@ class ExtractSchedulerSpecTask(flow_utils.CinderTask):
'availability_zone': volume_ref.get('availability_zone'),
'volume_type_id': volume_type_id,
},
'volume_type': list(dict(vol_type).iteritems()),
'volume_type': list(dict(vol_type).items()),
}
def execute(self, context, request_spec, volume_id, snapshot_id,

View File

@ -253,7 +253,7 @@ class TestCase(testtools.TestCase):
def flags(self, **kw):
"""Override CONF variables for a test."""
for k, v in kw.iteritems():
for k, v in kw.items():
self.override_config(k, v)
def log_level(self, level):

View File

@ -47,7 +47,7 @@ class UsedLimitsTestCase(test.TestCase):
}
limits = {}
for display_name, q in quota_map.iteritems():
for display_name, q in quota_map.items():
limits[q] = {'limit': 2,
'in_use': 1}
@ -61,6 +61,6 @@ class UsedLimitsTestCase(test.TestCase):
self.controller.index(fake_req, res)
abs_limits = res.obj['limits']['absolute']
for used_limit, value in abs_limits.iteritems():
for used_limit, value in abs_limits.items():
self.assertEqual(value,
limits[quota_map[used_limit]]['in_use'])

View File

@ -71,7 +71,7 @@ def fake_volume_type_get_all(context, inactive=False, filters=None):
if filters is None or filters['is_public'] is None:
return VOLUME_TYPES
res = {}
for k, v in VOLUME_TYPES.iteritems():
for k, v in VOLUME_TYPES.items():
if filters['is_public'] and _has_type_access(k, context.project_id):
res.update({k: v})
continue

View File

@ -91,7 +91,7 @@ class FakeToken(object):
def __init__(self, **kwargs):
FakeToken.id_count += 1
self.id = FakeToken.id_count
for k, v in kwargs.iteritems():
for k, v in kwargs.items():
setattr(self, k, v)

View File

@ -180,7 +180,7 @@ class LoggingVolumeDriver(driver.VolumeDriver):
if entry['action'] != action:
continue
match = True
for k, v in kwargs.iteritems():
for k, v in kwargs.items():
if entry.get(k) != v:
match = False
break

View File

@ -90,7 +90,7 @@ class FakeHostManager(host_manager.HostManager):
class FakeHostState(host_manager.HostState):
def __init__(self, host, attribute_dict):
super(FakeHostState, self).__init__(host)
for (key, val) in attribute_dict.iteritems():
for (key, val) in attribute_dict.items():
setattr(self, key, val)
@ -118,7 +118,7 @@ class FakeNovaClient(object):
def list(self, detailed=True, search_opts=None):
matching = list(self._servers)
if search_opts:
for opt, val in search_opts.iteritems():
for opt, val in search_opts.items():
matching = [m for m in matching
if getattr(m, opt, None) == val]
return matching

View File

@ -496,7 +496,7 @@ class TestCinderManageCmd(test.TestCase):
def test_config_commands_list(self):
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
expected_out = ''
for key, value in CONF.iteritems():
for key, value in CONF.items():
expected_out += '%s = %s' % (key, value) + '\n'
config_cmds = cinder_manage.ConfigCommands()

View File

@ -77,7 +77,7 @@ class ModelsObjectComparatorMixin(object):
self.assertEqual(
len(obj1), len(obj2),
"Keys mismatch: %s" % str(set(obj1.keys()) ^ set(obj2.keys())))
for key, value in obj1.iteritems():
for key, value in obj1.items():
self.assertEqual(value, obj2[key])
def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None):
@ -123,7 +123,7 @@ class DBAPIServiceTestCase(BaseTest):
def test_service_create(self):
service = self._create_service({})
self.assertFalse(service['id'] is None)
for key, value in self._get_base_values().iteritems():
for key, value in self._get_base_values().items():
self.assertEqual(value, service[key])
def test_service_destroy(self):
@ -147,7 +147,7 @@ class DBAPIServiceTestCase(BaseTest):
}
db.service_update(self.ctxt, service['id'], new_values)
updated_service = db.service_get(self.ctxt, service['id'])
for key, value in new_values.iteritems():
for key, value in new_values.items():
self.assertEqual(value, updated_service[key])
def test_service_update_not_found_exception(self):
@ -1441,7 +1441,7 @@ class DBAPIQuotaTestCase(BaseTest):
quota_usage = db.quota_usage_get(self.ctxt, 'p1', 'gigabytes')
expected = {'resource': 'gigabytes', 'project_id': 'p1',
'in_use': 0, 'reserved': 2, 'total': 2}
for key, value in expected.iteritems():
for key, value in expected.items():
self.assertEqual(value, quota_usage[key], key)
def test_quota_usage_get_all_by_project(self):

View File

@ -688,7 +688,7 @@ class FlashSystemDriverTestCase(test.TestCase):
def _reset_flags(self):
self.driver.configuration.local_conf.reset()
for k, v in self._def_flags.iteritems():
for k, v in self._def_flags.items():
self._set_flag(k, v)
def _generate_vol_info(self,

View File

@ -91,7 +91,7 @@ class FlashSystemISCSIDriverTestCase(test.TestCase):
def _reset_flags(self):
self.driver.configuration.local_conf.reset()
for k, v in self._def_flags.iteritems():
for k, v in self._def_flags.items():
self._set_flag(k, v)
def _generate_vol_info(self,

View File

@ -185,7 +185,7 @@ class XIVDS8KFakeProxyDriver(object):
def create_consistencygroup(self, ctxt, group):
volumes = [volume for k, volume in self.volumes.iteritems()
volumes = [volume for k, volume in self.volumes.items()
if volume['consistencygroup_id'] == group['id']]
if volumes:
@ -207,12 +207,12 @@ class XIVDS8KFakeProxyDriver(object):
volumes.append(volume)
# Delete snapshots in consistency group
self.snapshots = {k: snap for k, snap in self.snapshots.iteritems()
self.snapshots = {k: snap for k, snap in self.snapshots.items()
if not(snap.get('consistencygroup_id', None)
== group.get('id', None))}
# Delete volume in consistency group
self.volumes = {k: vol for k, vol in self.volumes.iteritems()
self.volumes = {k: vol for k, vol in self.volumes.items()
if not(vol.get('consistencygroup_id', None)
== group.get('id', None))}
@ -254,7 +254,7 @@ class XIVDS8KFakeProxyDriver(object):
snapshots.append(snapshot)
# Delete snapshots in consistency group
self.snapshots = {k: snap for k, snap in self.snapshots.iteritems()
self.snapshots = {k: snap for k, snap in self.snapshots.items()
if not(snap.get('consistencygroup_id', None)
== cgsnapshot.get('cgsnapshot_id', None))}

View File

@ -87,7 +87,7 @@ class IBMNASDriverTestCase(test.TestCase):
def _reset_flags(self):
self._driver.configuration.local_conf.reset()
for k, v in self._def_flags.iteritems():
for k, v in self._def_flags.items():
self._set_flag(k, v)
def test_check_for_setup_error(self):

View File

@ -456,7 +456,7 @@ class FakeDirectCmodeHTTPConnection(object):
if not headers:
headers = {}
req_str = '%s %s HTTP/1.1\r\n' % (method, path)
for key, value in headers.iteritems():
for key, value in headers.items():
req_str += "%s: %s\r\n" % (key, value)
if data:
req_str += '\r\n%s' % data
@ -1138,7 +1138,7 @@ class FakeDirect7modeHTTPConnection(object):
if not headers:
headers = {}
req_str = '%s %s HTTP/1.1\r\n' % (method, path)
for key, value in headers.iteritems():
for key, value in headers.items():
req_str += "%s: %s\r\n" % (key, value)
if data:
req_str += '\r\n%s' % data

View File

@ -264,7 +264,7 @@ class FakeDirectCmodeHTTPConnection(object):
if not headers:
headers = {}
req_str = '%s %s HTTP/1.1\r\n' % (method, path)
for key, value in headers.iteritems():
for key, value in headers.items():
req_str += "%s: %s\r\n" % (key, value)
if data:
req_str += '\r\n%s' % data

View File

@ -1253,7 +1253,7 @@ port_speed!N/A
filter_key = kwargs['filtervalue'].split('=')[0]
filter_value = kwargs['filtervalue'].split('=')[1]
to_delete = []
for k, v in self._fcmappings_list.iteritems():
for k, v in self._fcmappings_list.items():
if str(v[filter_key]) == filter_value:
source = self._volumes_list[v['source']]
target = self._volumes_list[v['target']]
@ -1541,7 +1541,7 @@ port_speed!N/A
params = ['name', 'warning', 'udid',
'autoexpand', 'easytier', 'primary']
for key, value in kwargs.iteritems():
for key, value in kwargs.items():
if key == 'easytier':
vol['easy_tier'] = value
continue
@ -1765,7 +1765,7 @@ class StorwizeSVCDriverTestCase(test.TestCase):
def _reset_flags(self):
self.driver.configuration.local_conf.reset()
for k, v in self._def_flags.iteritems():
for k, v in self._def_flags.items():
self._set_flag(k, v)
def _assert_vol_exists(self, name, exists):
@ -2206,7 +2206,7 @@ class StorwizeSVCDriverTestCase(test.TestCase):
for idx in range(len(opts_list)):
attrs = self._create_test_vol(opts_list[idx])
for k, v in chck_list[idx].iteritems():
for k, v in chck_list[idx].items():
try:
if k[0] == '-':
k = k[1:]
@ -2340,14 +2340,14 @@ class StorwizeSVCDriverTestCase(test.TestCase):
ret = self.driver.initialize_connection(volume1, self._connector)
self.assertEqual(ret['driver_volume_type'],
expected[protocol]['driver_volume_type'])
for k, v in expected[protocol]['data'].iteritems():
for k, v in expected[protocol]['data'].items():
self.assertEqual(ret['data'][k], v)
# Initialize again, should notice it and do nothing
ret = self.driver.initialize_connection(volume1, self._connector)
self.assertEqual(ret['driver_volume_type'],
expected[protocol]['driver_volume_type'])
for k, v in expected[protocol]['data'].iteritems():
for k, v in expected[protocol]['data'].items():
self.assertEqual(ret['data'][k], v)
# Try to delete the 1st volume (should fail because it is mapped)
@ -2382,7 +2382,7 @@ class StorwizeSVCDriverTestCase(test.TestCase):
self.assertEqual(
ret['driver_volume_type'],
expected_fc_npiv['driver_volume_type'])
for k, v in expected_fc_npiv['data'].iteritems():
for k, v in expected_fc_npiv['data'].items():
self.assertEqual(ret['data'][k], v)
self._set_flag('storwize_svc_npiv_compatibility_mode',
False)

View File

@ -68,7 +68,7 @@ class VolumeGlanceMetadataTestCase(test.TestCase):
metadata = db.volume_glance_metadata_get(ctxt, 2)
self.assertEqual(len(metadata), 3)
for expected, meta in zip(expected_metadata_2, metadata):
for key, value in expected.iteritems():
for key, value in expected.items():
self.assertEqual(meta[key], value)
self.assertRaises(exception.GlanceMetadataExists,

View File

@ -68,7 +68,7 @@ class VolumeTypeTestCase(test.TestCase):
self.assertEqual(self.vol_type1_description, new['description'])
for k, v in self.vol_type1_specs.iteritems():
for k, v in self.vol_type1_specs.items():
self.assertEqual(v, new['extra_specs'][k],
'one of fields does not match')

View File

@ -35,7 +35,7 @@ class VolumeTypeExtraSpecsTestCase(test.TestCase):
self.addCleanup(db.volume_type_destroy, context.get_admin_context(),
self.vol_type1['id'])
self.volume_type1_id = ref.id
for k, v in self.vol_type1_specs.iteritems():
for k, v in self.vol_type1_specs.items():
self.vol_type1_specs[k] = str(v)
self.vol_type2_noextra = dict(name="TEST: Volume type without extra")

View File

@ -313,7 +313,7 @@ class ExceptionTest(test.TestCase):
self.assertEqual(resp.status_int, exception_type.code, resp.body)
if hasattr(exception_type, 'headers'):
for (key, value) in exception_type.headers.iteritems():
for (key, value) in exception_type.headers.items():
self.assertIn(key, resp.headers)
self.assertEqual(resp.headers[key], value)

View File

@ -17,7 +17,6 @@ Mock unit tests for the NetApp block storage driver interfaces
import mock
import six
from cinder import test
from cinder.volume.drivers.netapp.dataontap import block_7mode
@ -69,5 +68,5 @@ class NetAppBlockStorageDriverInterfaceTestCase(test.TestCase):
def _get_local_functions(self, obj):
"""Get function names of an object without superclass functions."""
return set([key for key, value in six.iteritems(type(obj).__dict__)
return set([key for key, value in type(obj).__dict__.items()
if callable(value)])

View File

@ -92,7 +92,7 @@ class NetAppDriverFactoryTestCase(test.TestCase):
registry = na_common.NETAPP_UNIFIED_DRIVER_REGISTRY
for family in six.iterkeys(registry):
for protocol, full_class_name in six.iteritems(registry[family]):
for protocol, full_class_name in registry[family].items():
driver = na_common.NetAppDriver.create_driver(
family, protocol, **kwargs)
self.assertEqual(full_class_name, get_full_class_name(driver))

View File

@ -118,7 +118,7 @@ def check_exclusive_options(**kwargs):
pretty_keys = kwargs.pop("pretty_keys", True)
exclusive_options = {}
for (k, v) in kwargs.iteritems():
for (k, v) in kwargs.items():
if v is not None:
exclusive_options[k] = True
@ -714,7 +714,7 @@ def add_visible_admin_metadata(volume):
for item in orig_meta:
if item['key'] in visible_admin_meta.keys():
item['value'] = visible_admin_meta.pop(item['key'])
for key, value in visible_admin_meta.iteritems():
for key, value in visible_admin_meta.items():
orig_meta.append({'key': key, 'value': value})
volume['volume_metadata'] = orig_meta
# avoid circular ref when vol is a Volume instance

View File

@ -522,7 +522,7 @@ class API(base.Base):
results = []
not_found = object()
for snapshot in snapshots:
for opt, value in search_opts.iteritems():
for opt, value in search_opts.items():
if snapshot.get(opt, not_found) != value:
break
else:
@ -954,7 +954,7 @@ class API(base.Base):
if not metadata:
metadata = {}
for k, v in metadata.iteritems():
for k, v in metadata.items():
if len(k) == 0:
msg = _("Metadata property key blank.")
LOG.warning(msg)

View File

@ -127,7 +127,7 @@ class BlockDeviceDriver(driver.BaseVD, driver.LocalVD, driver.CloneableVD,
used_devices = self._get_used_devices()
total_size = 0
free_size = 0
for device, size in dict_of_devices_sizes.iteritems():
for device, size in dict_of_devices_sizes.items():
if device not in used_devices:
free_size += size
total_size += size

View File

@ -86,7 +86,7 @@ class CloudByteISCSIDriver(san.SanISCSIDriver):
error_msg = ""
# error_data is a single key value dict
for key, value in error_data.iteritems():
for key, value in error_data.items():
error_msg = value.get('errortext')
return error_msg
@ -188,7 +188,7 @@ class CloudByteISCSIDriver(san.SanISCSIDriver):
# Nothing to override
return default_dict
for key, value in default_dict.iteritems():
for key, value in default_dict.items():
# Fill the user dict with default options based on condition
if filtered_user_dict.get(key) is None and value is not None:
filtered_user_dict[key] = value

View File

@ -897,7 +897,7 @@ class ScaleIODriver(driver.VolumeDriver):
if type_id:
volume_type = volume_types.get_volume_type(ctxt, type_id)
specs = volume_type.get('extra_specs')
for key, value in specs.iteritems():
for key, value in specs.items():
specs[key] = value
return specs

View File

@ -1620,7 +1620,7 @@ class RestCommon(object):
kvs = specs
LOG.info(_LI('The QoS sepcs is: %s.'), kvs)
for key, value in kvs.iteritems():
for key, value in kvs.items():
if key in huawei_valid_keys:
qos[key.upper()] = value
@ -1696,7 +1696,7 @@ class RestCommon(object):
def _check_qos_high_priority(self, qos):
"""Check QoS priority."""
for key, value in qos.iteritems():
for key, value in qos.items():
if (key.find('MIN') == 0) or (key.find('LATENCY') == 0):
return True

View File

@ -588,7 +588,7 @@ class FlashSystemDriver(san.SanDriver):
# Get preferred node and other nodes in I/O group
preferred_node_entry = None
io_group_nodes = []
for k, node in self._storage_nodes.iteritems():
for k, node in self._storage_nodes.items():
if vdisk_params['protocol'] != node['protocol']:
continue
if node['id'] == preferred_node:
@ -644,7 +644,7 @@ class FlashSystemDriver(san.SanDriver):
ctxt = context.get_admin_context()
volume_type = volume_types.get_volume_type(ctxt, type_id)
specs = volume_type.get('extra_specs')
for k, value in specs.iteritems():
for k, value in specs.items():
# Get the scope, if using scope format
key_split = k.split(':')
if len(key_split) == 1:
@ -1084,7 +1084,7 @@ class FlashSystemDriver(san.SanDriver):
# For each node, check what connection modes it supports. Delete any
# nodes that do not support any types (may be partially configured).
to_delete = []
for k, node in self._storage_nodes.iteritems():
for k, node in self._storage_nodes.items():
if not node['WWPN']:
to_delete.append(k)

View File

@ -200,7 +200,7 @@ class StorwizeSVCDriver(san.SanDriver):
# For each node, check what connection modes it supports. Delete any
# nodes that do not support any types (may be partially configured).
to_delete = []
for k, node in self._state['storage_nodes'].iteritems():
for k, node in self._state['storage_nodes'].items():
if ((len(node['ipv4']) or len(node['ipv6']))
and len(node['iscsi_name'])):
node['enabled_protocols'].append('iSCSI')

View File

@ -455,7 +455,7 @@ class StorwizeHelpers(object):
def _get_opts_from_specs(self, opts, specs):
qos = {}
for k, value in specs.iteritems():
for k, value in specs.items():
# Get the scope, if using scope format
key_split = k.split(':')
if len(key_split) == 1:
@ -1033,7 +1033,7 @@ class StorwizeHelpers(object):
def add_vdisk_qos(self, vdisk, qos):
"""Add the QoS configuration to the volume."""
for key, value in qos.iteritems():
for key, value in qos.items():
if key in self.svc_qos_keys.keys():
param = self.svc_qos_keys[key]['param']
self.ssh.chvdisk(vdisk, ['-' + param, str(value)])
@ -1046,7 +1046,7 @@ class StorwizeHelpers(object):
available in the QoS configuration, the value is taken from it;
if not, the value will be set to default.
"""
for key, value in self.svc_qos_keys.iteritems():
for key, value in self.svc_qos_keys.items():
param = value['param']
if key in qos.keys():
# If the value is set in QoS, take the value from
@ -1059,7 +1059,7 @@ class StorwizeHelpers(object):
def disable_vdisk_qos(self, vdisk, qos):
"""Disable the QoS."""
for key, value in qos.iteritems():
for key, value in qos.items():
if key in self.svc_qos_keys.keys():
param = self.svc_qos_keys[key]['param']
# Take the default value.

View File

@ -174,7 +174,7 @@ def validate_qos_spec(qos_spec):
return
normalized_qos_keys = [key.lower() for key in QOS_KEYS]
keylist = []
for key, value in six.iteritems(qos_spec):
for key, value in qos_spec.items():
lower_case_key = key.lower()
if lower_case_key not in normalized_qos_keys:
msg = _('Unrecognized QOS keyword: "%s"') % key

View File

@ -172,7 +172,7 @@ class RemoteFSDriver(driver.VolumeDriver):
self.configuration.nas_secure_file_permissions,
'nas_secure_file_operations':
self.configuration.nas_secure_file_operations}
for opt_name, opt_value in secure_options.iteritems():
for opt_name, opt_value in secure_options.items():
if opt_value not in valid_secure_opts:
err_parms = {'name': opt_name, 'value': opt_value}
msg = _("NAS config '%(name)s=%(value)s' invalid. Must be "
@ -978,7 +978,7 @@ class RemoteFSSnapDriver(RemoteFSDriver):
new_base_file = base_file_img_info.backing_file
base_id = None
for key, value in snap_info.iteritems():
for key, value in snap_info.items():
if value == base_file and key != 'active':
base_id = key
break

View File

@ -869,7 +869,7 @@ class HP3PARCommon(object):
else:
kvs = specs
for key, value in kvs.iteritems():
for key, value in kvs.items():
if 'qos:' in key:
fields = key.split(':')
key = fields[1]
@ -880,7 +880,7 @@ class HP3PARCommon(object):
def _get_keys_by_volume_type(self, volume_type):
hp3par_keys = {}
specs = volume_type.get('extra_specs')
for key, value in specs.iteritems():
for key, value in specs.items():
if ':' in key:
fields = key.split(':')
key = fields[1]

View File

@ -655,7 +655,7 @@ class HP3PARISCSIDriver(cinder.volume.driver.ISCSIDriver):
# identify key (nsp) of least used nsp
current_smallest_count = sys.maxint
for (nsp, count) in nsp_counts.iteritems():
for (nsp, count) in nsp_counts.items():
if count < current_smallest_count:
current_least_used_nsp = nsp
current_smallest_count = count

View File

@ -438,7 +438,7 @@ class HPLeftHandRESTProxy(driver.ISCSIDriver):
def _get_lh_extra_specs(self, extra_specs, valid_keys):
"""Get LeftHand extra_specs (valid_keys only)."""
extra_specs_of_interest = {}
for key, value in extra_specs.iteritems():
for key, value in extra_specs.items():
if key in valid_keys:
extra_specs_of_interest[key] = value
return extra_specs_of_interest
@ -446,7 +446,7 @@ class HPLeftHandRESTProxy(driver.ISCSIDriver):
def _map_extra_specs(self, extra_specs):
"""Map the extra spec key/values to LeftHand key/values."""
client_options = {}
for key, value in extra_specs.iteritems():
for key, value in extra_specs.items():
# map extra spec key to lh client option key
client_key = extra_specs_key_map[key]
# map extra spect value to lh client option value
@ -540,7 +540,7 @@ class HPLeftHandRESTProxy(driver.ISCSIDriver):
# only set the ones that have changed
changed_extra_specs = {}
for key, value in lh_extra_specs.iteritems():
for key, value in lh_extra_specs.items():
(old, new) = diff['extra_specs'][key]
if old != new:
changed_extra_specs[key] = value

View File

@ -541,7 +541,7 @@ class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver):
else:
options_dict['username'] = 'guest'
named_options = ','.join("%s=%s" % (key, val) for (key, val)
in options_dict.iteritems())
in options_dict.items())
options_list = ','.join(options_list)
flags = '-o ' + ','.join([named_options, options_list])

View File

@ -493,7 +493,7 @@ class SolidFireDriver(san.SanISCSIDriver):
else:
kvs = specs
for key, value in kvs.iteritems():
for key, value in kvs.items():
if ':' in key:
fields = key.split(':')
key = fields[1]

View File

@ -241,7 +241,7 @@ class StorPoolDriver(driver.TransferVD, driver.ExtendVD, driver.CloneableVD,
used = 0
free = 0
agSize = 512 * units.Mi
for (id, desc) in dl.iteritems():
for (id, desc) in dl.items():
if desc.generationLeft != -1:
continue
total += desc.agCount * agSize
@ -405,7 +405,7 @@ class StorPoolDriver(driver.TransferVD, driver.ExtendVD, driver.CloneableVD,
templ = self.configuration.storpool_template
repl = self.configuration.storpool_replication
if diff['extra_specs']:
for (k, v) in diff['extra_specs'].iteritems():
for (k, v) in diff['extra_specs'].items():
if k == 'volume_backend_name':
if v[0] != v[1]:
# Retype of a volume backend not supported yet,

View File

@ -1073,7 +1073,7 @@ class XIOISEDriver(object):
volume_type = volume_types.get_volume_type(ctxt, type_id)
extra_specs = volume_type.get('extra_specs')
# Parse out RAID, pool and affinity values
for key, value in extra_specs.iteritems():
for key, value in extra_specs.items():
subkey = ''
if ':' in key:
fields = key.split(':')
@ -1110,7 +1110,7 @@ class XIOISEDriver(object):
else:
kvs = volume_type.get('extra_specs')
# Parse out min, max and burst values
for key, value in kvs.iteritems():
for key, value in kvs.items():
if ':' in key:
fields = key.split(':')
key = fields[1]

View File

@ -281,7 +281,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
if not metadata:
metadata = {}
for (k, v) in metadata.iteritems():
for (k, v) in metadata.items():
if len(k) == 0:
msg = _("Metadata property key blank")
LOG.warning(msg)

View File

@ -242,7 +242,7 @@ def get_all_specs(context, inactive=False, search_opts=None):
LOG.debug("Searching by: %s", search_opts)
def _check_specs_match(qos_specs, searchdict):
for k, v in searchdict.iteritems():
for k, v in searchdict.items():
if ((k not in qos_specs['specs'].keys() or
qos_specs['specs'][k] != v)):
return False
@ -252,9 +252,9 @@ def get_all_specs(context, inactive=False, search_opts=None):
filter_mapping = {'qos_specs': _check_specs_match}
result = {}
for name, args in qos_specs.iteritems():
for name, args in qos_specs.items():
# go over all filters in the list
for opt, values in search_opts.iteritems():
for opt, values in search_opts.items():
try:
filter_func = filter_mapping[opt]
except KeyError:

View File

@ -102,7 +102,7 @@ def get_all_types(context, inactive=0, search_opts=None):
LOG.debug("Searching by: %s" % search_opts)
def _check_extra_specs_match(vol_type, searchdict):
for k, v in searchdict.iteritems():
for k, v in searchdict.items():
if (k not in vol_type['extra_specs'].keys()
or vol_type['extra_specs'][k] != v):
return False
@ -112,9 +112,9 @@ def get_all_types(context, inactive=0, search_opts=None):
filter_mapping = {'extra_specs': _check_extra_specs_match}
result = {}
for type_name, type_args in vol_types.iteritems():
for type_name, type_args in vol_types.items():
# go over all filters in the list
for opt, values in search_opts.iteritems():
for opt, values in search_opts.items():
try:
filter_func = filter_mapping[opt]
except KeyError:
@ -258,11 +258,11 @@ def volume_types_diff(context, vol_type_id1, vol_type_id2):
dict1 = {}
if dict2 is None:
dict2 = {}
for k, v in dict1.iteritems():
for k, v in dict1.items():
res[k] = (v, dict2.get(k))
if k not in dict2 or res[k][0] != res[k][1]:
equal = False
for k, v in dict2.iteritems():
for k, v in dict2.items():
res[k] = (dict1.get(k), v)
if k not in dict1 or res[k][0] != res[k][1]:
equal = False

View File

@ -445,7 +445,7 @@ class Debug(Middleware):
resp = req.get_response(self.application)
print(('*' * 40) + ' RESPONSE HEADERS') # noqa
for (key, value) in resp.headers.iteritems():
for (key, value) in resp.headers.items():
print(key, '=', value) # noqa
print() # noqa