Performance: leverage dict comprehension in PEP-0274

PEP-0274 introduced dict comprehensions to replace dict constructor
with a sequence of length-2 sequences, these are benefits copied
from [1]:
  The dictionary constructor approach has two distinct disadvantages
  from the proposed syntax though.  First, it isn't as legible as a
  dict comprehension.  Second, it forces the programmer to create an
  in-core list object first, which could be expensive.
Nova dropped python 2.6 support, we can leverage this now.
There is deep dive about PEP-0274[2] and basic tests about
performance[3].
Note: This commit doesn't handle dict constructor with kwagrs.
This commit also adds a hacking rule.

[1]http://legacy.python.org/dev/peps/pep-0274/
[2]http://doughellmann.com/2012/11/12/the-performance-impact-of-using-dict-instead-of-in-cpython-2-7-2.html
[3]http://paste.openstack.org/show/154798/

Change-Id: Ifb5cb05b9cc2b8758d5a8e34f7792470a73d7c40
This commit is contained in:
ChangBo Guo(gcb) 2014-12-24 18:10:30 +08:00
parent a9fca62914
commit 69fef14509
54 changed files with 170 additions and 143 deletions

View File

@ -47,6 +47,7 @@ Nova Specific Commandments
- [N334] Change assertTrue/False(A in/not in B, message) to the more specific
assertIn/NotIn(A, B, message)
- [N335] Check for usage of deprecated assertRaisesRegexp
- [N336] Must use a dict comprehension instead of a dict constructor with a sequence of key-value pairs.
Creating Unit Tests
-------------------

View File

@ -440,8 +440,8 @@ def dict_from_dotted_str(items):
def search_opts_from_filters(filters):
return dict((f['name'].replace('-', '_'), f['value']['1'])
for f in filters if f['value']['1']) if filters else {}
return {f['name'].replace('-', '_'): f['value']['1']
for f in filters if f['value']['1']} if filters else {}
def regex_from_ec2_regex(ec2_re):

View File

@ -44,7 +44,7 @@ def _filter_keys(item, keys):
item is a dict
"""
return dict((k, v) for k, v in item.iteritems() if k in keys)
return {k: v for k, v in item.iteritems() if k in keys}
def _fixup_cell_info(cell_info, keys):

View File

@ -47,10 +47,10 @@ class ConsoleAuthTokensController(wsgi.Controller):
"accessible"))
return {'console':
dict([(i, connect_info[i])
for i in ['instance_uuid', 'host', 'port',
'internal_access_path']
if i in connect_info])}
{i: connect_info[i]
for i in ['instance_uuid', 'host', 'port',
'internal_access_path']
if i in connect_info}}
class Console_auth_tokens(extensions.ExtensionDescriptor):

View File

@ -112,7 +112,7 @@ class HostController(object):
context = req.environ['nova.context']
authorize(context)
# See what the user wants to 'update'
params = dict([(k.strip().lower(), v) for k, v in body.iteritems()])
params = {k.strip().lower(): v for k, v in body.iteritems()}
orig_status = status = params.pop('status', None)
orig_maint_mode = maint_mode = params.pop('maintenance_mode', None)
# Validate the request

View File

@ -100,11 +100,11 @@ class InstanceUsageAuditLogController(object):
running_hosts.add(tlog['host'])
total_errors += tlog['errors']
total_items += tlog['task_items']
log = dict((tl['host'], dict(state=tl['state'],
instances=tl['task_items'],
errors=tl['errors'],
message=tl['message']))
for tl in task_logs)
log = {tl['host']: dict(state=tl['state'],
instances=tl['task_items'],
errors=tl['errors'],
message=tl['message'])
for tl in task_logs}
missing_hosts = hosts - seen_hosts
overall_status = "%s hosts done. %s errors." % (
'ALL' if len(done_hosts) == len(hosts)

View File

@ -152,7 +152,7 @@ class NetworkController(object):
network = body["network"]
keys = ["cidr", "cidr_v6", "ipam", "vlan_start", "network_size",
"num_networks"]
kwargs = dict((k, network.get(k)) for k in keys)
kwargs = {k: network.get(k) for k in keys}
label = network["label"]

View File

@ -97,7 +97,7 @@ class QuotaSetsController(wsgi.Controller):
if usages:
return values
else:
return dict((k, v['limit']) for k, v in values.items())
return {k: v['limit'] for k, v in values.items()}
def show(self, req, id):
context = req.environ['nova.context']

View File

@ -64,7 +64,7 @@ class LimitsController(object):
project_id = req.params.get('tenant_id', context.project_id)
quotas = QUOTAS.get_project_quotas(context, project_id,
usages=False)
abs_limits = dict((k, v['limit']) for k, v in quotas.items())
abs_limits = {k: v['limit'] for k, v in quotas.items()}
rate_limits = req.environ.get("nova.limits", [])
builder = self._get_view_builder(req)
@ -101,7 +101,7 @@ def create_resource():
class Limit(object):
"""Stores information about a limit for HTTP requests."""
UNITS = dict([(v, k) for k, v in utils.TIME_UNITS.items()])
UNITS = {v: k for k, v in utils.TIME_UNITS.items()}
def __init__(self, verb, uri, regex, value, unit):
"""Initialize a new `Limit`.

View File

@ -46,7 +46,7 @@ def _filter_keys(item, keys):
"""Filters all model attributes except for keys
item is a dict
"""
return dict((k, v) for k, v in item.iteritems() if k in keys)
return {k: v for k, v in item.iteritems() if k in keys}
def _fixup_cell_info(cell_info, keys):

View File

@ -52,10 +52,10 @@ class ConsoleAuthTokensController(wsgi.Controller):
"accessible"))
return {'console':
dict([(i, connect_info[i])
for i in ['instance_uuid', 'host', 'port',
'internal_access_path']
if i in connect_info])}
{i: connect_info[i]
for i in ['instance_uuid', 'host', 'port',
'internal_access_path']
if i in connect_info}}
class ConsoleAuthTokens(extensions.V3APIExtensionBase):

View File

@ -103,11 +103,11 @@ class InstanceUsageAuditLogController(wsgi.Controller):
running_hosts.add(tlog['host'])
total_errors += tlog['errors']
total_items += tlog['task_items']
log = dict((tl['host'], dict(state=tl['state'],
instances=tl['task_items'],
errors=tl['errors'],
message=tl['message']))
for tl in task_logs)
log = {tl['host']: dict(state=tl['state'],
instances=tl['task_items'],
errors=tl['errors'],
message=tl['message'])
for tl in task_logs}
missing_hosts = hosts - seen_hosts
overall_status = "%s hosts done. %s errors." % (
'ALL' if len(done_hosts) == len(hosts)

View File

@ -34,7 +34,7 @@ class LimitsController(wsgi.Controller):
project_id = req.params.get('tenant_id', context.project_id)
quotas = QUOTAS.get_project_quotas(context, project_id,
usages=False)
abs_limits = dict((k, v['limit']) for k, v in quotas.items())
abs_limits = {k: v['limit'] for k, v in quotas.items()}
rate_limits = req.environ.get("nova.limits", [])
builder = self._get_view_builder(req)

View File

@ -80,7 +80,7 @@ class QuotaSetsController(wsgi.Controller):
if usages:
return values
else:
return dict((k, v['limit']) for k, v in values.items())
return {k: v['limit'] for k, v in values.items()}
@extensions.expected_errors(403)
def show(self, req, id):

View File

@ -144,7 +144,7 @@ class TenantNetworkController(wsgi.Controller):
network = body["network"]
keys = ["cidr", "cidr_v6", "ipam", "vlan_start", "network_size",
"num_networks"]
kwargs = dict((k, network.get(k)) for k in keys)
kwargs = {k: network.get(k) for k in keys}
label = network["label"]

View File

@ -86,9 +86,7 @@ class BlockDeviceDict(dict):
if bdm_dict.get('device_name'):
bdm_dict['device_name'] = prepend_dev(bdm_dict['device_name'])
# NOTE (ndipanov): Never default db fields
self.update(
dict((field, None)
for field in self._fields - do_not_default))
self.update({field: None for field in self._fields - do_not_default})
self.update(list(bdm_dict.iteritems()))
def _validate(self, bdm_dict):
@ -139,8 +137,8 @@ class BlockDeviceDict(dict):
non_computable_fields = set(['boot_index', 'disk_bus',
'guest_format', 'device_type'])
new_bdm = dict((fld, val) for fld, val in legacy_bdm.iteritems()
if fld in copy_over_fields)
new_bdm = {fld: val for fld, val in legacy_bdm.iteritems()
if fld in copy_over_fields}
virt_name = legacy_bdm.get('virtual_name')
@ -203,8 +201,8 @@ class BlockDeviceDict(dict):
copy_over_fields |= (bdm_db_only_fields |
bdm_db_inherited_fields)
legacy_block_device = dict((field, self.get(field))
for field in copy_over_fields if field in self)
legacy_block_device = {field: self.get(field)
for field in copy_over_fields if field in self}
source_type = self.get('source_type')
destination_type = self.get('destination_type')

View File

@ -74,9 +74,8 @@ class CellState(object):
def update_db_info(self, cell_db_info):
"""Update cell credentials from db."""
self.db_info = dict(
[(k, v) for k, v in cell_db_info.iteritems()
if k != 'name'])
self.db_info = {k: v for k, v in cell_db_info.iteritems()
if k != 'name'}
def update_capabilities(self, cell_metadata):
"""Update cell capabilities for a cell."""
@ -440,7 +439,7 @@ class CellStateManagerDB(CellStateManager):
self.last_cell_db_check = timeutils.utcnow()
ctxt = context.get_admin_context()
db_cells = self.db.cell_get_all(ctxt)
db_cells_dict = dict((cell['name'], cell) for cell in db_cells)
db_cells_dict = {cell['name']: cell for cell in db_cells}
self._refresh_cells_from_dict(db_cells_dict)
self._update_our_capacity(ctxt)

View File

@ -536,8 +536,8 @@ class NetworkCommands(object):
dns1=None, dns2=None, project_id=None, priority=None,
uuid=None, fixed_cidr=None):
"""Creates fixed ips for host by range."""
kwargs = dict(((k, v) for k, v in locals().iteritems()
if v and k != "self"))
kwargs = {k: v for k, v in locals().iteritems()
if v and k != "self"}
if multi_host is not None:
kwargs['multi_host'] = multi_host == 'T'
net_manager = importutils.import_object(CONF.network_manager)

View File

@ -254,7 +254,7 @@ def _diff_dict(orig, new):
element, giving the updated value.
"""
# Figure out what keys went away
result = dict((k, ['-']) for k in set(orig.keys()) - set(new.keys()))
result = {k: ['-'] for k in set(orig.keys()) - set(new.keys())}
# Compute the updates
for key, value in new.items():
if key not in orig or value != orig[key]:
@ -358,9 +358,9 @@ class API(base.Base):
raise exception.OnsetFileContentLimitExceeded()
def _get_headroom(self, quotas, usages, deltas):
headroom = dict((res, quotas[res] -
(usages[res]['in_use'] + usages[res]['reserved']))
for res in quotas.keys())
headroom = {res: quotas[res] -
(usages[res]['in_use'] + usages[res]['reserved'])
for res in quotas.keys()}
# If quota_cores is unlimited [-1]:
# - set cores headroom based on instances headroom:
if quotas.get('cores') == -1:

View File

@ -704,7 +704,7 @@ class ComputeManager(manager.Manager):
driver_instances = self.driver.list_instances()
instances = objects.InstanceList.get_by_filters(context, filters,
use_slave=True)
name_map = dict((instance.name, instance) for instance in instances)
name_map = {instance.name: instance for instance in instances}
local_instances = []
for driver_instance in driver_instances:
instance = name_map.get(driver_instance)

View File

@ -152,8 +152,7 @@ class ResourceMonitorHandler(loadables.BaseLoader):
"""
monitor_classes = self.get_matching_classes(
CONF.compute_available_monitors)
monitor_class_map = dict((cls.__name__, cls)
for cls in monitor_classes)
monitor_class_map = {cls.__name__: cls for cls in monitor_classes}
monitor_cls_names = CONF.compute_monitors
good_monitors = []
bad_monitors = []

View File

@ -659,8 +659,8 @@ def compute_node_statistics(context):
fields = ('count', 'vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
'memory_mb_used', 'local_gb_used', 'free_ram_mb', 'free_disk_gb',
'current_workload', 'running_vms', 'disk_available_least')
return dict((field, int(result[idx] or 0))
for idx, field in enumerate(fields))
return {field: int(result[idx] or 0)
for idx, field in enumerate(fields)}
###################
@ -3481,8 +3481,8 @@ def quota_reserve(context, resources, project_quotas, user_quotas, deltas,
usages = project_usages
else:
usages = user_usages
usages = dict((k, dict(in_use=v['in_use'], reserved=v['reserved']))
for k, v in usages.items())
usages = {k: dict(in_use=v['in_use'], reserved=v['reserved'])
for k, v in usages.items()}
LOG.debug('Raise OverQuota exception because: '
'project_quotas: %(project_quotas)s, '
'user_quotas: %(user_quotas)s, deltas: %(deltas)s, '
@ -4503,8 +4503,8 @@ def _dict_with_extra_specs(inst_type_query):
"""
inst_type_dict = dict(inst_type_query)
extra_specs = dict([(x['key'], x['value'])
for x in inst_type_query['extra_specs']])
extra_specs = {x['key']: x['value']
for x in inst_type_query['extra_specs']}
inst_type_dict['extra_specs'] = extra_specs
return inst_type_dict
@ -4711,7 +4711,7 @@ def _flavor_extra_specs_get_query(context, flavor_id, session=None):
@require_context
def flavor_extra_specs_get(context, flavor_id):
rows = _flavor_extra_specs_get_query(context, flavor_id).all()
return dict([(row['key'], row['value']) for row in rows])
return {row['key']: row['value'] for row in rows}
@require_context
@ -4835,7 +4835,7 @@ def _instance_metadata_get_query(context, instance_uuid, session=None):
@require_context
def instance_metadata_get(context, instance_uuid):
rows = _instance_metadata_get_query(context, instance_uuid).all()
return dict((row['key'], row['value']) for row in rows)
return {row['key']: row['value'] for row in rows}
@require_context
@ -4901,7 +4901,7 @@ def _instance_system_metadata_get_query(context, instance_uuid, session=None):
@require_context
def instance_system_metadata_get(context, instance_uuid):
rows = _instance_system_metadata_get_query(context, instance_uuid).all()
return dict((row['key'], row['value']) for row in rows)
return {row['key']: row['value'] for row in rows}
@require_context
@ -5417,7 +5417,7 @@ def aggregate_metadata_get(context, aggregate_id):
models.AggregateMetadata).\
filter_by(aggregate_id=aggregate_id).all()
return dict([(r['key'], r['value']) for r in rows])
return {r['key']: r['value'] for r in rows}
@require_aggregate_exists

View File

@ -1132,7 +1132,7 @@ class Aggregate(BASE, NovaBase):
@property
def metadetails(self):
return dict([(m.key, m.value) for m in self._metadata])
return {m.key: m.value for m in self._metadata}
@property
def availability_zone(self):

View File

@ -60,8 +60,8 @@ def check_shadow_table(migrate_engine, table_name):
shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta,
autoload=True)
columns = dict([(c.name, c) for c in table.columns])
shadow_columns = dict([(c.name, c) for c in shadow_table.columns])
columns = {c.name: c for c in table.columns}
shadow_columns = {c.name: c for c in shadow_table.columns}
for name, column in columns.iteritems():
if name not in shadow_columns:

View File

@ -55,7 +55,7 @@ class ConvertedException(webob.exc.WSGIHTTPException):
def _cleanse_dict(original):
"""Strip all admin_password, new_pass, rescue_pass keys from a dict."""
return dict((k, v) for k, v in original.iteritems() if "_pass" not in k)
return {k: v for k, v in original.iteritems() if "_pass" not in k}
def wrap_exception(notifier=None, get_notifier=None):

View File

@ -91,6 +91,7 @@ underscore_import_check = re.compile(r"(.)*import _(.)*")
# We need this for cases where they have created their own _ function.
custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*")
api_version_re = re.compile(r"@.*api_version")
dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)")
# TODO(dims): When other oslo libraries switch over non-namespace'd
# imports, we need to add them to the regexp below.
@ -490,6 +491,14 @@ def assert_raises_regexp(logical_line):
"of assertRaisesRegexp")
def dict_constructor_with_list_copy(logical_line):
msg = ("N336: Must use a dict comprehension instead of a dict constructor"
" with a sequence of key-value pairs."
)
if dict_constructor_with_list_copy_re.match(logical_line):
yield (0, msg)
def factory(register):
register(import_no_db_in_virt)
register(no_db_session_in_public_api)
@ -513,3 +522,4 @@ def factory(register):
register(CheckForTransAdd)
register(check_oslo_namespace_imports)
register(assert_true_or_false_with_in)
register(dict_constructor_with_list_copy)

View File

@ -26,7 +26,7 @@ from nova.i18n import _
def ensure_string_keys(d):
# http://bugs.python.org/issue4978
return dict([(str(k), v) for k, v in d.iteritems()])
return {str(k): v for k, v in d.iteritems()}
# Constants for the 'vif_type' field in VIF class
VIF_TYPE_OVS = 'ovs'

View File

@ -537,8 +537,7 @@ class API(base_api.NetworkAPI):
extensions_list = neutron.list_extensions()['extensions']
self.last_neutron_extension_sync = time.time()
self.extensions.clear()
self.extensions = dict((ext['name'], ext)
for ext in extensions_list)
self.extensions = {ext['name']: ext for ext in extensions_list}
def _has_port_binding_extension(self, context, refresh_cache=False,
neutron=None):
@ -1099,12 +1098,12 @@ class API(base_api.NetworkAPI):
def _setup_pools_dict(self, client):
pools = self._get_floating_ip_pools(client)
return dict([(i['id'], i) for i in pools])
return {i['id']: i for i in pools}
def _setup_ports_dict(self, client, project_id=None):
search_opts = {'tenant_id': project_id} if project_id else {}
ports = client.list_ports(**search_opts)['ports']
return dict([(p['id'], p) for p in ports])
return {p['id']: p for p in ports}
def get_floating_ip(self, context, id):
"""Return floating ip object given the floating ip id."""

View File

@ -780,8 +780,8 @@ class NovaObjectSerializer(messaging.NoOpSerializer):
"""
iterable = values.__class__
if issubclass(iterable, dict):
return iterable(**dict((k, action_fn(context, v))
for k, v in six.iteritems(values)))
return iterable(**{k: action_fn(context, v)
for k, v in six.iteritems(values)})
else:
# NOTE(danms): A set can't have an unhashable value inside, such as
# a dict. Convert sets to tuples, which is fine, since we can't

View File

@ -455,15 +455,15 @@ class DictProxyField(object):
return self
if getattr(obj, self._fld_name) is None:
return
return dict((self._key_type(k), v)
for k, v in six.iteritems(getattr(obj, self._fld_name)))
return {self._key_type(k): v
for k, v in six.iteritems(getattr(obj, self._fld_name))}
def __set__(self, obj, val):
if val is None:
setattr(obj, self._fld_name, val)
else:
setattr(obj, self._fld_name, dict((six.text_type(k), v)
for k, v in six.iteritems(val)))
setattr(obj, self._fld_name, {six.text_type(k): v
for k, v in six.iteritems(val)})
class Set(CompoundFieldType):

View File

@ -85,7 +85,7 @@ class PciDeviceStats(object):
if not devspec:
return
tags = devspec.get_tags()
pool = dict((k, dev.get(k)) for k in self.pool_keys)
pool = {k: dev.get(k) for k in self.pool_keys}
if tags:
pool.update(tags)
return pool
@ -217,7 +217,7 @@ class PciDeviceStats(object):
# 'devices' shouldn't be part of stats
pools = []
for pool in self.pools:
tmp = dict((k, v) for k, v in pool.iteritems() if k != 'devices')
tmp = {k: v for k, v in pool.iteritems() if k != 'devices'}
pools.append(tmp)
return iter(pools)

View File

@ -372,8 +372,8 @@ class DbQuotaDriver(object):
else:
sync_filt = lambda x: not hasattr(x, 'sync')
desired = set(keys)
sub_resources = dict((k, v) for k, v in resources.items()
if k in desired and sync_filt(v))
sub_resources = {k: v for k, v in resources.items()
if k in desired and sync_filt(v)}
# Make sure we accounted for all of them...
if len(keys) != len(sub_resources):
@ -394,7 +394,7 @@ class DbQuotaDriver(object):
usages=False,
project_quotas=project_quotas)
return dict((k, v['limit']) for k, v in quotas.items())
return {k: v['limit'] for k, v in quotas.items()}
def limit_check(self, context, resources, values, project_id=None,
user_id=None):

View File

@ -283,8 +283,7 @@ class HostManager(object):
self.filter_handler = filters.HostFilterHandler()
filter_classes = self.filter_handler.get_matching_classes(
CONF.scheduler_available_filters)
self.filter_cls_map = dict(
(cls.__name__, cls) for cls in filter_classes)
self.filter_cls_map = {cls.__name__: cls for cls in filter_classes}
self.filter_obj_map = {}
self.weight_handler = weights.HostWeightHandler()
weigher_classes = self.weight_handler.get_matching_classes(
@ -372,7 +371,7 @@ class HostManager(object):
if ignore_hosts or force_hosts or force_nodes:
# NOTE(deva): we can't assume "host" is unique because
# one host may have many nodes.
name_to_cls_map = dict([((x.host, x.nodename), x) for x in hosts])
name_to_cls_map = {(x.host, x.nodename): x for x in hosts}
if ignore_hosts:
_strip_ignore_hosts(name_to_cls_map, ignore_hosts)
if not name_to_cls_map:

View File

@ -316,7 +316,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
# The server should also be in the all-servers details list
servers = self.api.get_servers(detail=True)
server_map = dict((server['id'], server) for server in servers)
server_map = {server['id']: server for server in servers}
found_server = server_map.get(created_server_id)
self.assertTrue(found_server)
# Details do include metadata
@ -324,7 +324,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
# The server should also be in the all-servers summary list
servers = self.api.get_servers(detail=False)
server_map = dict((server['id'], server) for server in servers)
server_map = {server['id']: server for server in servers}
found_server = server_map.get(created_server_id)
self.assertTrue(found_server)
# Summary should not include metadata
@ -453,7 +453,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
# lookup servers created by the first request.
servers = self.api.get_servers(detail=True,
search_opts={'reservation_id': reservation_id})
server_map = dict((server['id'], server) for server in servers)
server_map = {server['id']: server for server in servers}
found_server = server_map.get(created_server_id)
# The server from the 2nd request should not be there.
self.assertIsNone(found_server)

View File

@ -347,7 +347,7 @@ class ExtendedQuotasTestV21(BaseQuotaSetsTest):
if usages:
return self.fake_quotas
else:
return dict((k, v['limit']) for k, v in self.fake_quotas.items())
return {k: v['limit'] for k, v in self.fake_quotas.items()}
def fake_get_settable_quotas(self, context, project_id, user_id=None):
return {

View File

@ -62,8 +62,8 @@ class BaseLimitTestSuite(test.NoDBTestCase):
self.absolute_limits = {}
def stub_get_project_quotas(context, project_id, usages=True):
return dict((k, dict(limit=v))
for k, v in self.absolute_limits.items())
return {k: dict(limit=v)
for k, v in self.absolute_limits.items()}
self.stubs.Set(nova.quota.QUOTAS, "get_project_quotas",
stub_get_project_quotas)
@ -197,8 +197,7 @@ class LimitsControllerTestV21(BaseLimitTestSuite):
}
def _get_project_quotas(context, project_id, usages=True):
return dict((k, dict(limit=v))
for k, v in self.absolute_limits.items())
return {k: dict(limit=v) for k, v in self.absolute_limits.items()}
with mock.patch('nova.quota.QUOTAS.get_project_quotas') as \
get_project_quotas:

View File

@ -5815,10 +5815,10 @@ class ComputeTestCase(BaseTestCase):
self.assertIsNone(instances[0]['task_state'])
def _fill_fault(self, values):
extra = dict([(x, None) for x in ['created_at',
'deleted_at',
'updated_at',
'deleted']])
extra = {x: None for x in ['created_at',
'deleted_at',
'updated_at',
'deleted']}
extra['id'] = 1
extra['details'] = ''
extra.update(values)
@ -7579,8 +7579,8 @@ class ComputeAPITestCase(BaseTestCase):
instance.refresh()
self.assertEqual(instance.task_state, task_states.REBUILDING)
sys_meta = dict([(k, v) for k, v in instance.system_metadata.items()
if not k.startswith('instance_type')])
sys_meta = {k: v for k, v in instance.system_metadata.items()
if not k.startswith('instance_type')}
self.assertEqual(sys_meta,
{'image_kernel_id': 'fake_kernel_id',
'image_min_disk': '1',

View File

@ -164,8 +164,8 @@ class _ComputeAPIUnitTestMixIn(object):
self.mox.StubOutWithMock(quota.QUOTAS, "reserve")
quotas = {'instances': 1, 'cores': 1, 'ram': 1}
usages = dict((r, {'in_use': 1, 'reserved': 1}) for r in
['instances', 'cores', 'ram'])
usages = {r: {'in_use': 1, 'reserved': 1} for r in
['instances', 'cores', 'ram']}
quota_exception = exception.OverQuota(quotas=quotas,
usages=usages, overs=['instances'])

View File

@ -1201,7 +1201,7 @@ class ResizeClaimTestCase(BaseTrackerTestCase):
src_dict = {
'memory_mb': 1, 'root_gb': 1, 'ephemeral_gb': 0, 'vcpus': 1}
dest_dict = dict((k, v + 1) for (k, v) in src_dict.iteritems())
dest_dict = {k: v + 1 for (k, v) in src_dict.iteritems()}
src_type = self._fake_flavor_create(
id=10, name="srcflavor", **src_dict)
dest_type = self._fake_flavor_create(

View File

@ -1240,8 +1240,8 @@ class ModelsObjectComparatorMixin(object):
def _dict_from_object(self, obj, ignored_keys):
if ignored_keys is None:
ignored_keys = []
return dict([(k, v) for k, v in obj.iteritems()
if k not in ignored_keys])
return {k: v for k, v in obj.iteritems()
if k not in ignored_keys}
def _assertEqualObjects(self, obj1, obj2, ignored_keys=None):
obj1 = self._dict_from_object(obj1, ignored_keys)
@ -6478,8 +6478,8 @@ class CertificateTestCase(test.TestCase, ModelsObjectComparatorMixin):
'project_id': 'project',
'file_name': 'filename'
}
return [dict((k, v + str(x)) for k, v in base_values.iteritems())
for x in xrange(1, 4)]
return [{k: v + str(x) for k, v in base_values.iteritems()}
for x in xrange(1, 4)]
def _certificates_create(self):
return [db.certificate_create(self.ctxt, cert)
@ -6535,11 +6535,11 @@ class ConsoleTestCase(test.TestCase, ModelsObjectComparatorMixin):
for val in pools_data]
instance_uuid = uuidutils.generate_uuid()
db.instance_create(self.ctxt, {'uuid': instance_uuid})
self.console_data = [dict([('instance_name', 'name' + str(x)),
('instance_uuid', instance_uuid),
('password', 'pass' + str(x)),
('port', 7878 + x),
('pool_id', self.console_pools[x]['id'])])
self.console_data = [{'instance_name': 'name' + str(x),
'instance_uuid': instance_uuid,
'password': 'pass' + str(x),
'port': 7878 + x,
'pool_id': self.console_pools[x]['id']}
for x in xrange(len(pools_data))]
self.consoles = [db.console_create(self.ctxt, val)
for val in self.console_data]
@ -6644,8 +6644,8 @@ class CellTestCase(test.TestCase, ModelsObjectComparatorMixin):
def _create_cells(self):
test_values = []
for x in xrange(1, 4):
modified_val = dict([(k, self._cell_value_modify(v, x))
for k, v in self._get_cell_base_values().iteritems()])
modified_val = {k: self._cell_value_modify(v, x)
for k, v in self._get_cell_base_values().iteritems()}
db.cell_create(self.ctxt, modified_val)
test_values.append(modified_val)
return test_values
@ -6689,8 +6689,8 @@ class CellTestCase(test.TestCase, ModelsObjectComparatorMixin):
new_cells = self._create_cells()
cells = db.cell_get_all(self.ctxt)
self.assertEqual(len(new_cells), len(cells))
cells_byname = dict([(newcell['name'],
newcell) for newcell in new_cells])
cells_byname = {newcell['name']: newcell
for newcell in new_cells}
for cell in cells:
self._assertEqualObjects(cell, cells_byname[cell['name']],
self._ignored_keys)

View File

@ -234,7 +234,7 @@ class FakeLDAP(object):
raise SERVER_DOWN()
key = "%s%s" % (self.__prefix, dn)
value_dict = dict([(k, _to_json(v)) for k, v in attr])
value_dict = {k: _to_json(v) for k, v in attr}
Store.instance().hmset(key, value_dict)
def delete_s(self, dn):
@ -313,13 +313,12 @@ class FakeLDAP(object):
# get the attributes from the store
attrs = store.hgetall(key)
# turn the values from the store into lists
attrs = dict([(k, _from_json(v))
for k, v in attrs.iteritems()])
attrs = {k: _from_json(v) for k, v in attrs.iteritems()}
# filter the objects by query
if not query or _match_query(query, attrs):
# filter the attributes by fields
attrs = dict([(k, v) for k, v in attrs.iteritems()
if not fields or k in fields])
attrs = {k: v for k, v in attrs.iteritems()
if not fields or k in fields}
objects.append((key[len(self.__prefix):], attrs))
return objects

View File

@ -41,8 +41,8 @@ class PolicyFixture(fixtures.Fixture):
def set_rules(self, rules):
policy = nova.policy._ENFORCER
policy.set_rules(dict((k, common_policy.parse_rule(v))
for k, v in rules.items()))
policy.set_rules({k: common_policy.parse_rule(v)
for k, v in rules.items()})
class RoleBasedPolicyFixture(fixtures.Fixture):

View File

@ -455,3 +455,31 @@ class HackingTestCase(test.NoDBTestCase):
"""
errors = []
self._assert_has_errors(code, checker, expected_errors=errors)
def test_dict_constructor_with_list_copy(self):
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" dict([(i, connect_info[i])"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" attrs = dict([(k, _from_json(v))"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" type_names = dict((value, key) for key, value in"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" dict((value, key) for key, value in"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
"foo(param=dict((k, v) for k, v in bar.items()))"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" dict([[i,i] for i in range(3)])"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" dd = dict([i,i] for i in range(3))"))))
self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy(
" create_kwargs = dict(snapshot=snapshot,"))))
self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy(
" self._render_dict(xml, data_el, data.__dict__)"))))

View File

@ -75,8 +75,8 @@ class PolicyTestCase(test.NoDBTestCase):
}
policy.reset()
policy.init()
policy.set_rules(dict((k, common_policy.parse_rule(v))
for k, v in rules.items()))
policy.set_rules({k: common_policy.parse_rule(v)
for k, v in rules.items()})
self.context = context.RequestContext('fake', 'fake', roles=['member'])
self.target = {}
@ -161,8 +161,8 @@ class DefaultPolicyTestCase(test.NoDBTestCase):
def _set_rules(self, default_rule):
policy.reset()
rules = dict((k, common_policy.parse_rule(v))
for k, v in self.rules.items())
rules = {k: common_policy.parse_rule(v)
for k, v in self.rules.items()}
policy.init(rules=rules, default_rule=default_rule, use_conf=False)
def test_policy_called(self):

View File

@ -1960,8 +1960,7 @@ class DbQuotaDriverTestCase(test.TestCase):
usages=True, remains=False,
project_quotas=None):
self.calls.append('get_project_quotas')
return dict((k, dict(limit=v.default))
for k, v in resources.items())
return {k: dict(limit=v.default) for k, v in resources.items()}
self.stubs.Set(self.driver, 'get_project_quotas',
fake_get_project_quotas)

View File

@ -274,8 +274,8 @@ class ManagedObject(object):
return prefix + "-" + str(self.__class__._counter)
def __repr__(self):
return jsonutils.dumps(dict([(elem.name, elem.val)
for elem in self.propSet]))
return jsonutils.dumps({elem.name: elem.val
for elem in self.propSet})
class DataObject(object):

View File

@ -1658,7 +1658,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
'overallCpuUsage': 0, 'powerState': 'poweredOn',
'cpuReservation': 0, 'overallCpuDemand': 0,
'numVirtualDisks': 1, 'hostMemoryUsage': 141}
expected = dict([('vmware:' + k, v) for k, v in expected.items()])
expected = {'vmware:' + k: v for k, v in expected.items()}
self.assertThat(
self.conn.get_diagnostics({'name': 1, 'uuid': self.uuid,
'node': self.instance_node}),

View File

@ -96,8 +96,7 @@ class DriverBlockDevice(dict):
if self._bdm_obj.no_device:
raise _NotTransformable()
self.update(dict((field, None)
for field in self._fields))
self.update({field: None for field in self._fields})
self._transform()
def __getattr__(self, name):
@ -122,7 +121,7 @@ class DriverBlockDevice(dict):
Basic method will just drop the fields that are not in
_legacy_fields set. Override this in subclass if needed.
"""
return dict((key, self.get(key)) for key in self._legacy_fields)
return {key: self.get(key) for key in self._legacy_fields}
def attach(self, **kwargs):
"""Make the device available to be used by VMs.
@ -201,8 +200,8 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
raise _InvalidType
self.update(
dict((k, v) for k, v in self._bdm_obj.iteritems()
if k in self._new_fields | set(['delete_on_termination']))
{k: v for k, v in self._bdm_obj.iteritems()
if k in self._new_fields | set(['delete_on_termination'])}
)
self['mount_device'] = self._bdm_obj.device_name
try:

View File

@ -94,8 +94,8 @@ class VMUtils(object):
constants.HYPERV_VM_STATE_SUSPENDED: 32769}
def __init__(self, host='.'):
self._enabled_states_map = dict((v, k) for k, v in
self._vm_power_states_map.iteritems())
self._enabled_states_map = {v: k for k, v in
self._vm_power_states_map.iteritems()}
if sys.platform == 'win32':
self._init_hyperv_wmi_conn(host)
self._conn_cimv2 = wmi.WMI(moniker='//%s/root/cimv2' % host)

View File

@ -365,8 +365,7 @@ def get_info_from_bdm(virt_type, bdm, mapping=None, disk_bus=None,
if not device_name:
if assigned_devices:
padded_mapping = dict((dev, {'dev': dev})
for dev in assigned_devices)
padded_mapping = {dev: {'dev': dev} for dev in assigned_devices}
padded_mapping.update(mapping)
else:
padded_mapping = mapping

View File

@ -1286,7 +1286,7 @@ class VMwareVMOps(object):
"""Return data about VM diagnostics."""
data = self._get_diagnostics(instance)
# Add a namespace to all of the diagnostsics
return dict([('vmware:' + k, v) for k, v in data.items()])
return {'vmware:' + k: v for k, v in data.items()}
def get_instance_diagnostics(self, instance):
"""Return data about VM diagnostics."""

View File

@ -351,7 +351,7 @@ class XenAPIDriver(driver.ComputeDriver):
# we only care about VMs that correspond to a nova-managed
# instance:
imap = dict([(inst['name'], inst['uuid']) for inst in instances])
imap = {inst['name']: inst['uuid'] for inst in instances}
bwcounters = []
# get a dictionary of instance names. values are dictionaries

View File

@ -478,8 +478,7 @@ class Failure(Exception):
return "XenAPI Fake Failure: %s" % str(self.details)
def _details_map(self):
return dict([(str(i), self.details[i])
for i in range(len(self.details))])
return {str(i): self.details[i] for i in range(len(self.details))}
class SessionBase(object):

View File

@ -2143,7 +2143,7 @@ class VMOps(object):
sr_ref = vm_utils.safe_find_sr(self._session)
vm_vdis = vm_utils.get_instance_vdis_for_sr(self._session,
vm_ref, sr_ref)
return dict((vdi, destination_sr_ref) for vdi in vm_vdis)
return {vdi: destination_sr_ref for vdi in vm_vdis}
def _call_live_migrate_command(self, command_name, vm_ref, migrate_data):
"""unpack xapi specific parameters, and call a live migrate command."""