Fix and gate on E125

E125 continuation line does not distinguish itself from next logical
line.

E125 makes code more readable, and fixing all the existing issues allows
us to gate on this, making this one less thing reviewers have to check
for.

Fix was made using https://github.com/hhatto/autopep8

Change-Id: Ie569fd74fb84d220ceb87e37b1b1f39143bba80f
This commit is contained in:
Joe Gordon 2013-06-07 17:08:38 -07:00
parent 9d02587d3c
commit efe1957237
57 changed files with 99 additions and 99 deletions

View File

@ -256,7 +256,7 @@ class CloudController(object):
def describe_availability_zones(self, context, **kwargs): def describe_availability_zones(self, context, **kwargs):
if ('zone_name' in kwargs and if ('zone_name' in kwargs and
'verbose' in kwargs['zone_name'] and 'verbose' in kwargs['zone_name'] and
context.is_admin): context.is_admin):
return self._describe_availability_zones_verbose(context, return self._describe_availability_zones_verbose(context,
**kwargs) **kwargs)
else: else:
@ -629,7 +629,7 @@ class CloudController(object):
def _validate_security_group_protocol(self, values): def _validate_security_group_protocol(self, values):
validprotocols = ['tcp', 'udp', 'icmp', '6', '17', '1'] validprotocols = ['tcp', 'udp', 'icmp', '6', '17', '1']
if 'ip_protocol' in values and \ if 'ip_protocol' in values and \
values['ip_protocol'] not in validprotocols: values['ip_protocol'] not in validprotocols:
err = _('Invalid IP protocol %s.') % values['ip_protocol'] err = _('Invalid IP protocol %s.') % values['ip_protocol']
raise exception.EC2APIError(message=err, code="400") raise exception.EC2APIError(message=err, code="400")
@ -1074,7 +1074,7 @@ class CloudController(object):
continue continue
if (bdm['device_name'] == root_device_name and if (bdm['device_name'] == root_device_name and
(bdm['snapshot_id'] or bdm['volume_id'])): (bdm['snapshot_id'] or bdm['volume_id'])):
assert not bdm['virtual_name'] assert not bdm['virtual_name']
root_device_type = 'ebs' root_device_type = 'ebs'
@ -1464,7 +1464,7 @@ class CloudController(object):
if (block_device.strip_dev(bdm.get('device_name')) == if (block_device.strip_dev(bdm.get('device_name')) ==
block_device.strip_dev(root_device_name) and block_device.strip_dev(root_device_name) and
('snapshot_id' in bdm or 'volume_id' in bdm) and ('snapshot_id' in bdm or 'volume_id' in bdm) and
not bdm.get('no_device')): not bdm.get('no_device')):
root_device_type = 'ebs' root_device_type = 'ebs'
i['rootDeviceName'] = (root_device_name or i['rootDeviceName'] = (root_device_name or
block_device.DEFAULT_ROOT_DEV_NAME) block_device.DEFAULT_ROOT_DEV_NAME)

View File

@ -260,7 +260,7 @@ class APIRouterV3(base_wsgi.Router):
# Check whitelist is either empty or if not then the extension # Check whitelist is either empty or if not then the extension
# is in the whitelist # is in the whitelist
if (not CONF.osapi_v3.extensions_whitelist or if (not CONF.osapi_v3.extensions_whitelist or
ext.obj.alias in CONF.osapi_v3.extensions_whitelist): ext.obj.alias in CONF.osapi_v3.extensions_whitelist):
# Check the extension is not in the blacklist # Check the extension is not in the blacklist
if ext.obj.alias not in CONF.osapi_v3.extensions_blacklist: if ext.obj.alias not in CONF.osapi_v3.extensions_blacklist:

View File

@ -164,7 +164,7 @@ class FloatingIPDNSDomainController(object):
if (scope not in ('private', 'public') or if (scope not in ('private', 'public') or
project and av_zone or project and av_zone or
scope == 'private' and project or scope == 'private' and project or
scope == 'public' and av_zone): scope == 'public' and av_zone):
raise webob.exc.HTTPUnprocessableEntity() raise webob.exc.HTTPUnprocessableEntity()
if scope == 'private': if scope == 'private':
create_dns_domain = self.network_api.create_private_dns_domain create_dns_domain = self.network_api.create_private_dns_domain

View File

@ -1299,7 +1299,7 @@ class ServersController(wsgi.Controller):
def _action_change_password(self, req, id, body): def _action_change_password(self, req, id, body):
context = req.environ['nova.context'] context = req.environ['nova.context']
if (not 'changePassword' in body if (not 'changePassword' in body
or 'adminPass' not in body['changePassword']): or 'adminPass' not in body['changePassword']):
msg = _("No adminPass was specified") msg = _("No adminPass was specified")
raise exc.HTTPBadRequest(explanation=msg) raise exc.HTTPBadRequest(explanation=msg)
password = body['changePassword']['adminPass'] password = body['changePassword']['adminPass']

View File

@ -1174,7 +1174,7 @@ class Controller(wsgi.Controller):
def _action_change_password(self, req, id, body): def _action_change_password(self, req, id, body):
context = req.environ['nova.context'] context = req.environ['nova.context']
if (not 'changePassword' in body if (not 'changePassword' in body
or 'adminPass' not in body['changePassword']): or 'adminPass' not in body['changePassword']):
msg = _("No adminPass was specified") msg = _("No adminPass was specified")
raise exc.HTTPBadRequest(explanation=msg) raise exc.HTTPBadRequest(explanation=msg)
password = body['changePassword']['adminPass'] password = body['changePassword']['adminPass']

View File

@ -71,7 +71,7 @@ class ViewBuilder(object):
# check for existing key # check for existing key
for limit in limits: for limit in limits:
if (limit["uri"] == rate_limit["URI"] and if (limit["uri"] == rate_limit["URI"] and
limit["regex"] == rate_limit["regex"]): limit["regex"] == rate_limit["regex"]):
_rate_limit_key = limit _rate_limit_key = limit
break break

View File

@ -169,7 +169,7 @@ class URLMap(paste.urlmap.URLMap):
if domain and domain != host and domain != host + ':' + port: if domain and domain != host and domain != host + ':' + port:
continue continue
if (path_info == app_url if (path_info == app_url
or path_info.startswith(app_url + '/')): or path_info.startswith(app_url + '/')):
return app, app_url return app, app_url
return None, None return None, None

View File

@ -254,7 +254,7 @@ class XMLDeserializer(TextDeserializer):
for node in parent.childNodes: for node in parent.childNodes:
if (node.localName == name and if (node.localName == name and
node.namespaceURI and node.namespaceURI and
node.namespaceURI == namespace): node.namespaceURI == namespace):
return node return node
return None return None
@ -1020,7 +1020,7 @@ class Resource(wsgi.Application):
meth = getattr(self.controller, action) meth = getattr(self.controller, action)
except AttributeError: except AttributeError:
if (not self.wsgi_actions or if (not self.wsgi_actions or
action not in _ROUTES_METHODS + ['action']): action not in _ROUTES_METHODS + ['action']):
# Propagate the error # Propagate the error
raise raise
else: else:

View File

@ -246,7 +246,7 @@ def mappings_prepend_dev(mappings):
for m in mappings: for m in mappings:
virtual = m['virtual'] virtual = m['virtual']
if (is_swap_or_ephemeral(virtual) and if (is_swap_or_ephemeral(virtual) and
(not m['device'].startswith('/'))): (not m['device'].startswith('/'))):
m['device'] = '/dev/' + m['device'] m['device'] = '/dev/' + m['device']
return mappings return mappings

View File

@ -22,7 +22,7 @@ import sys
# implementation which doesn't work for IPv6. What we're checking here is # implementation which doesn't work for IPv6. What we're checking here is
# that the magic environment variable was set when the import happened. # that the magic environment variable was set when the import happened.
if ('eventlet' in sys.modules and if ('eventlet' in sys.modules and
os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'): os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'):
raise ImportError('eventlet imported before nova/cmd/__init__ ' raise ImportError('eventlet imported before nova/cmd/__init__ '
'(env var set to %s)' '(env var set to %s)'
% os.environ.get('EVENTLET_NO_GREENDNS')) % os.environ.get('EVENTLET_NO_GREENDNS'))

View File

@ -133,7 +133,7 @@ def check_instance_state(vm_state=None, task_state=(None,),
state=instance['vm_state'], state=instance['vm_state'],
method=f.__name__) method=f.__name__)
if (task_state is not None and if (task_state is not None and
instance['task_state'] not in task_state): instance['task_state'] not in task_state):
raise exception.InstanceInvalidState( raise exception.InstanceInvalidState(
attr='task_state', attr='task_state',
instance_uuid=instance['uuid'], instance_uuid=instance['uuid'],
@ -849,7 +849,7 @@ class API(base.Base):
virtual_name = bdm.get('virtual_name') virtual_name = bdm.get('virtual_name')
if (virtual_name is not None and if (virtual_name is not None and
block_device.is_swap_or_ephemeral(virtual_name)): block_device.is_swap_or_ephemeral(virtual_name)):
size = self._volume_size(instance_type, virtual_name) size = self._volume_size(instance_type, virtual_name)
if size == 0: if size == 0:
continue continue
@ -923,7 +923,7 @@ class API(base.Base):
image_properties = image.get('properties', {}) image_properties = image.get('properties', {})
if (block_device_mapping or if (block_device_mapping or
image_properties.get('mappings') or image_properties.get('mappings') or
image_properties.get('block_device_mapping')): image_properties.get('block_device_mapping')):
instance['shutdown_terminate'] = False instance['shutdown_terminate'] = False
def _populate_instance_names(self, instance, num_instances): def _populate_instance_names(self, instance, num_instances):

View File

@ -396,7 +396,7 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_uuid, instance_uuid,
**kwargs) **kwargs)
if (instance_ref['host'] == self.host and if (instance_ref['host'] == self.host and
instance_ref['node'] in self.driver.get_available_nodes()): instance_ref['node'] in self.driver.get_available_nodes()):
rt = self._get_resource_tracker(instance_ref.get('node')) rt = self._get_resource_tracker(instance_ref.get('node'))
rt.update_usage(context, instance_ref) rt.update_usage(context, instance_ref)
@ -759,7 +759,7 @@ class ComputeManager(manager.SchedulerDependentManager):
def _get_instance_nw_info(self, context, instance): def _get_instance_nw_info(self, context, instance):
"""Get a list of dictionaries of network data of an instance.""" """Get a list of dictionaries of network data of an instance."""
if (not hasattr(instance, 'system_metadata') or if (not hasattr(instance, 'system_metadata') or
len(instance['system_metadata']) == 0): len(instance['system_metadata']) == 0):
# NOTE(danms): Several places in the code look up instances without # NOTE(danms): Several places in the code look up instances without
# pulling system_metadata for performance, and call this function. # pulling system_metadata for performance, and call this function.
# If we get an instance without it, re-fetch so that the call # If we get an instance without it, re-fetch so that the call
@ -827,7 +827,7 @@ class ComputeManager(manager.SchedulerDependentManager):
continue continue
if ((bdm['snapshot_id'] is not None) and if ((bdm['snapshot_id'] is not None) and
(bdm['volume_id'] is None)): (bdm['volume_id'] is None)):
# TODO(yamahata): default name and description # TODO(yamahata): default name and description
snapshot = self.volume_api.get_snapshot(context, snapshot = self.volume_api.get_snapshot(context,
bdm['snapshot_id']) bdm['snapshot_id'])

View File

@ -395,7 +395,7 @@ class ResourceTracker(object):
# same node resize. record usage for whichever instance type the # same node resize. record usage for whichever instance type the
# instance is *not* in: # instance is *not* in:
if (instance['instance_type_id'] == if (instance['instance_type_id'] ==
migration['old_instance_type_id']): migration['old_instance_type_id']):
itype = self._get_instance_type(context, instance, 'new_', itype = self._get_instance_type(context, instance, 'new_',
migration['new_instance_type_id']) migration['new_instance_type_id'])
else: else:

View File

@ -4473,7 +4473,7 @@ def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
if (rd_req < current_usage['curr_reads'] or if (rd_req < current_usage['curr_reads'] or
rd_bytes < current_usage['curr_read_bytes'] or rd_bytes < current_usage['curr_read_bytes'] or
wr_req < current_usage['curr_writes'] or wr_req < current_usage['curr_writes'] or
wr_bytes < current_usage['curr_write_bytes']): wr_bytes < current_usage['curr_write_bytes']):
LOG.info(_("Volume(%s) has lower stats then what is in " LOG.info(_("Volume(%s) has lower stats then what is in "
"the database. Instance must have been rebooted " "the database. Instance must have been rebooted "
"or crashed. Updating totals.") % id) "or crashed. Updating totals.") % id)

View File

@ -240,7 +240,7 @@ def _downgrade_bdm_v2(meta, bdm_table):
for instance_uuid, in itertools.chain( for instance_uuid, in itertools.chain(
instance_q.execute().fetchall(), instance_q.execute().fetchall(),
instance_shadow_q.execute().fetchall()): instance_shadow_q.execute().fetchall()):
# Get all the bdms for an instance # Get all the bdms for an instance
bdm_q = select( bdm_q = select(
[bdm_table.c.id, bdm_table.c.source_type, bdm_table.c.guest_format] [bdm_table.c.id, bdm_table.c.source_type, bdm_table.c.guest_format]

View File

@ -47,7 +47,7 @@ def patched_with_engine(f, *a, **kw):
# on that version or higher, this can be removed # on that version or higher, this can be removed
MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3') MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3')
if (not hasattr(migrate, '__version__') or if (not hasattr(migrate, '__version__') or
dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION): dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
migrate_util.with_engine = patched_with_engine migrate_util.with_engine = patched_with_engine

View File

@ -621,7 +621,7 @@ class FloatingIP(object):
for db_domain in db_domain_list: for db_domain in db_domain_list:
if (db_domain in floating_driver_domain_list or if (db_domain in floating_driver_domain_list or
db_domain in instance_driver_domain_list): db_domain in instance_driver_domain_list):
domain_entry = self._prepare_domain_entry(context, domain_entry = self._prepare_domain_entry(context,
db_domain) db_domain)
if domain_entry: if domain_entry:

View File

@ -111,7 +111,7 @@ class MiniDNS(dns_driver.DNSDriver):
for line in infile: for line in infile:
entry = self.parse_line(line) entry = self.parse_line(line)
if ((not entry) or if ((not entry) or
entry['name'] != self.qualify(name, domain)): entry['name'] != self.qualify(name, domain)):
outfile.write(line) outfile.write(line)
else: else:
deleted = True deleted = True
@ -133,7 +133,7 @@ class MiniDNS(dns_driver.DNSDriver):
for line in infile: for line in infile:
entry = self.parse_line(line) entry = self.parse_line(line)
if (entry and if (entry and
entry['name'] == self.qualify(name, domain)): entry['name'] == self.qualify(name, domain)):
outfile.write("%s %s %s\n" % outfile.write("%s %s %s\n" %
(address, self.qualify(name, domain), entry['type'])) (address, self.qualify(name, domain), entry['type']))
else: else:
@ -162,7 +162,7 @@ class MiniDNS(dns_driver.DNSDriver):
for line in infile: for line in infile:
entry = self.parse_line(line) entry = self.parse_line(line)
if (entry and if (entry and
entry['name'] == self.qualify(name, domain)): entry['name'] == self.qualify(name, domain)):
entries.append(entry['address']) entries.append(entry['address'])
infile.close() infile.close()
return entries return entries
@ -195,7 +195,7 @@ class MiniDNS(dns_driver.DNSDriver):
for line in infile: for line in infile:
entry = self.parse_line(line) entry = self.parse_line(line)
if ((not entry) or if ((not entry) or
entry['domain'] != fqdomain.lower()): entry['domain'] != fqdomain.lower()):
outfile.write(line) outfile.write(line)
else: else:
LOG.info(_("deleted %s"), entry) LOG.info(_("deleted %s"), entry)

View File

@ -69,7 +69,7 @@ class SecurityGroupBase(object):
# specified, but only if a source group was specified. # specified, but only if a source group was specified.
ip_proto_upper = ip_protocol.upper() if ip_protocol else '' ip_proto_upper = ip_protocol.upper() if ip_protocol else ''
if (ip_proto_upper == 'ICMP' and if (ip_proto_upper == 'ICMP' and
from_port is None and to_port is None): from_port is None and to_port is None):
from_port = -1 from_port = -1
to_port = -1 to_port = -1
elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None
@ -101,14 +101,14 @@ class SecurityGroupBase(object):
# Verify that from_port must always be less than # Verify that from_port must always be less than
# or equal to to_port # or equal to to_port
if (ip_protocol.upper() in ['TCP', 'UDP'] and if (ip_protocol.upper() in ['TCP', 'UDP'] and
(from_port > to_port)): (from_port > to_port)):
raise exception.InvalidPortRange(from_port=from_port, raise exception.InvalidPortRange(from_port=from_port,
to_port=to_port, msg="Former value cannot" to_port=to_port, msg="Former value cannot"
" be greater than the later") " be greater than the later")
# Verify valid TCP, UDP port ranges # Verify valid TCP, UDP port ranges
if (ip_protocol.upper() in ['TCP', 'UDP'] and if (ip_protocol.upper() in ['TCP', 'UDP'] and
(from_port < 1 or to_port > 65535)): (from_port < 1 or to_port > 65535)):
raise exception.InvalidPortRange(from_port=from_port, raise exception.InvalidPortRange(from_port=from_port,
to_port=to_port, msg="Valid TCP ports should" to_port=to_port, msg="Valid TCP ports should"
" be between 1-65535") " be between 1-65535")
@ -116,7 +116,7 @@ class SecurityGroupBase(object):
# Verify ICMP type and code # Verify ICMP type and code
if (ip_protocol.upper() == "ICMP" and if (ip_protocol.upper() == "ICMP" and
(from_port < -1 or from_port > 255 or (from_port < -1 or from_port > 255 or
to_port < -1 or to_port > 255)): to_port < -1 or to_port > 255)):
raise exception.InvalidPortRange(from_port=from_port, raise exception.InvalidPortRange(from_port=from_port,
to_port=to_port, msg="For ICMP, the" to_port=to_port, msg="For ICMP, the"
" type:code must be valid") " type:code must be valid")

View File

@ -93,7 +93,7 @@ def send_update(context, old_instance, new_instance, service=None, host=None):
update_with_state_change = True update_with_state_change = True
elif CONF.notify_on_state_change: elif CONF.notify_on_state_change:
if (CONF.notify_on_state_change.lower() == "vm_and_task_state" and if (CONF.notify_on_state_change.lower() == "vm_and_task_state" and
old_task_state != new_task_state): old_task_state != new_task_state):
# yes, the task state is changing: # yes, the task state is changing:
update_with_state_change = True update_with_state_change = True
@ -141,7 +141,7 @@ def send_update_with_states(context, instance, old_vm_state, new_vm_state,
fire_update = True fire_update = True
elif CONF.notify_on_state_change: elif CONF.notify_on_state_change:
if (CONF.notify_on_state_change.lower() == "vm_and_task_state" and if (CONF.notify_on_state_change.lower() == "vm_and_task_state" and
old_task_state != new_task_state): old_task_state != new_task_state):
# yes, the task state is changing: # yes, the task state is changing:
fire_update = True fire_update = True
@ -227,7 +227,7 @@ def bandwidth_usage(instance_ref, audit_start,
admin_context = nova.context.get_admin_context(read_deleted='yes') admin_context = nova.context.get_admin_context(read_deleted='yes')
if (instance_ref.get('info_cache') and if (instance_ref.get('info_cache') and
instance_ref['info_cache'].get('network_info') is not None): instance_ref['info_cache'].get('network_info') is not None):
cached_info = instance_ref['info_cache']['network_info'] cached_info = instance_ref['info_cache']['network_info']
nw_info = network_model.NetworkInfo.hydrate(cached_info) nw_info = network_model.NetworkInfo.hydrate(cached_info)

View File

@ -334,7 +334,7 @@ class NovaObject(object):
""" """
for name in self.fields.keys() + self.obj_extra_fields: for name in self.fields.keys() + self.obj_extra_fields:
if (hasattr(self, get_attrname(name)) or if (hasattr(self, get_attrname(name)) or
name in self.obj_extra_fields): name in self.obj_extra_fields):
yield name, getattr(self, name) yield name, getattr(self, name)
items = lambda self: list(self.iteritems()) items = lambda self: list(self.iteritems())

View File

@ -265,7 +265,7 @@ class Instance(base.NovaObject):
changes = self.obj_what_changed() changes = self.obj_what_changed()
for field in self.fields: for field in self.fields:
if (hasattr(self, base.get_attrname(field)) and if (hasattr(self, base.get_attrname(field)) and
isinstance(self[field], base.NovaObject)): isinstance(self[field], base.NovaObject)):
getattr(self, '_save_%s' % field)(context) getattr(self, '_save_%s' % field)(context)
elif field in changes: elif field in changes:
updates[field] = self[field] updates[field] = self[field]
@ -296,7 +296,7 @@ class Instance(base.NovaObject):
expected_attrs=extra) expected_attrs=extra)
for field in self.fields: for field in self.fields:
if (hasattr(self, base.get_attrname(field)) and if (hasattr(self, base.get_attrname(field)) and
self[field] != current[field]): self[field] != current[field]):
self[field] = current[field] self[field] = current[field]
self.obj_reset_changes() self.obj_reset_changes()

View File

@ -239,7 +239,7 @@ class BucketHandler(BaseRequestHandler):
bucket_name)) bucket_name))
terse = int(self.get_argument("terse", 0)) terse = int(self.get_argument("terse", 0))
if (not path.startswith(self.application.directory) or if (not path.startswith(self.application.directory) or
not os.path.isdir(path)): not os.path.isdir(path)):
self.set_404() self.set_404()
return return
object_names = [] object_names = []
@ -290,7 +290,7 @@ class BucketHandler(BaseRequestHandler):
path = os.path.abspath(os.path.join( path = os.path.abspath(os.path.join(
self.application.directory, bucket_name)) self.application.directory, bucket_name))
if (not path.startswith(self.application.directory) or if (not path.startswith(self.application.directory) or
os.path.exists(path)): os.path.exists(path)):
self.set_status(403) self.set_status(403)
return return
fileutils.ensure_tree(path) fileutils.ensure_tree(path)
@ -300,7 +300,7 @@ class BucketHandler(BaseRequestHandler):
path = os.path.abspath(os.path.join( path = os.path.abspath(os.path.join(
self.application.directory, bucket_name)) self.application.directory, bucket_name))
if (not path.startswith(self.application.directory) or if (not path.startswith(self.application.directory) or
not os.path.isdir(path)): not os.path.isdir(path)):
self.set_404() self.set_404()
return return
if len(os.listdir(path)) > 0: if len(os.listdir(path)) > 0:
@ -316,7 +316,7 @@ class ObjectHandler(BaseRequestHandler):
object_name = urllib.unquote(object_name) object_name = urllib.unquote(object_name)
path = self._object_path(bucket, object_name) path = self._object_path(bucket, object_name)
if (not path.startswith(self.application.directory) or if (not path.startswith(self.application.directory) or
not os.path.isfile(path)): not os.path.isfile(path)):
self.set_404() self.set_404()
return return
info = os.stat(path) info = os.stat(path)
@ -334,7 +334,7 @@ class ObjectHandler(BaseRequestHandler):
bucket_dir = os.path.abspath(os.path.join( bucket_dir = os.path.abspath(os.path.join(
self.application.directory, bucket)) self.application.directory, bucket))
if (not bucket_dir.startswith(self.application.directory) or if (not bucket_dir.startswith(self.application.directory) or
not os.path.isdir(bucket_dir)): not os.path.isdir(bucket_dir)):
self.set_404() self.set_404()
return return
path = self._object_path(bucket, object_name) path = self._object_path(bucket, object_name)
@ -354,7 +354,7 @@ class ObjectHandler(BaseRequestHandler):
object_name = urllib.unquote(object_name) object_name = urllib.unquote(object_name)
path = self._object_path(bucket, object_name) path = self._object_path(bucket, object_name)
if (not path.startswith(self.application.directory) or if (not path.startswith(self.application.directory) or
not os.path.isfile(path)): not os.path.isfile(path)):
self.set_404() self.set_404()
return return
os.unlink(path) os.unlink(path)

View File

@ -142,7 +142,7 @@ class HostState(object):
def update_from_compute_node(self, compute): def update_from_compute_node(self, compute):
"""Update information about a host from its compute_node info.""" """Update information about a host from its compute_node info."""
if (self.updated and compute['updated_at'] if (self.updated and compute['updated_at']
and self.updated > compute['updated_at']): and self.updated > compute['updated_at']):
return return
all_ram_mb = compute['memory_mb'] all_ram_mb = compute['memory_mb']

View File

@ -97,7 +97,7 @@ class SchedulerOptions(object):
last_modified = self._get_file_timestamp(filename) last_modified = self._get_file_timestamp(filename)
if (not last_modified or not self.last_modified or if (not last_modified or not self.last_modified or
last_modified > self.last_modified): last_modified > self.last_modified):
self.data = self._load_file(self._get_file_handle(filename)) self.data = self._load_file(self._get_file_handle(filename))
self.last_modified = last_modified self.last_modified = last_modified
if not self.data: if not self.data:

View File

@ -33,7 +33,7 @@ import sys
# implementation which doesn't work for IPv6. What we're checking here is # implementation which doesn't work for IPv6. What we're checking here is
# that the magic environment variable was set when the import happened. # that the magic environment variable was set when the import happened.
if ('eventlet' in sys.modules and if ('eventlet' in sys.modules and
os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'): os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'):
raise ImportError('eventlet imported before nova/cmd/__init__ ' raise ImportError('eventlet imported before nova/cmd/__init__ '
'(env var set to %s)' '(env var set to %s)'
% os.environ.get('EVENTLET_NO_GREENDNS')) % os.environ.get('EVENTLET_NO_GREENDNS'))

View File

@ -476,7 +476,7 @@ class CinderCloudTestCase(test.TestCase):
self.volume_api.delete(self.context, vol['id']) self.volume_api.delete(self.context, vol['id'])
for uuid in (inst1['uuid'], inst2['uuid']): for uuid in (inst1['uuid'], inst2['uuid']):
for bdm in db.block_device_mapping_get_all_by_instance( for bdm in db.block_device_mapping_get_all_by_instance(
self.context, uuid): self.context, uuid):
db.block_device_mapping_destroy(self.context, bdm['id']) db.block_device_mapping_destroy(self.context, bdm['id'])
db.instance_destroy(self.context, inst2['uuid']) db.instance_destroy(self.context, inst2['uuid'])
db.instance_destroy(self.context, inst1['uuid']) db.instance_destroy(self.context, inst1['uuid'])

View File

@ -1821,7 +1821,7 @@ class ServersControllerCreateTest(test.TestCase):
def rpc_call_wrapper(context, topic, msg, timeout=None): def rpc_call_wrapper(context, topic, msg, timeout=None):
"""Stub out the scheduler creating the instance entry.""" """Stub out the scheduler creating the instance entry."""
if (topic == CONF.scheduler_topic and if (topic == CONF.scheduler_topic and
msg['method'] == 'run_instance'): msg['method'] == 'run_instance'):
request_spec = msg['args']['request_spec'] request_spec = msg['args']['request_spec']
num_instances = request_spec.get('num_instances', 1) num_instances = request_spec.get('num_instances', 1)
instances = [] instances = []
@ -1954,7 +1954,7 @@ class ServersControllerCreateTest(test.TestCase):
req.headers["content-type"] = "application/json" req.headers["content-type"] = "application/json"
with testtools.ExpectedException( with testtools.ExpectedException(
webob.exc.HTTPBadRequest, webob.exc.HTTPBadRequest,
'Image 76fa36fc-c930-4bf3-8c8a-ea2a2420deb6 is not active.'): 'Image 76fa36fc-c930-4bf3-8c8a-ea2a2420deb6 is not active.'):
self.controller.create(req, body) self.controller.create(req, body)
def test_create_server_image_too_large(self): def test_create_server_image_too_large(self):
@ -1983,7 +1983,7 @@ class ServersControllerCreateTest(test.TestCase):
req.headers["content-type"] = "application/json" req.headers["content-type"] = "application/json"
with testtools.ExpectedException( with testtools.ExpectedException(
webob.exc.HTTPBadRequest, webob.exc.HTTPBadRequest,
"Instance type's disk is too small for requested image."): "Instance type's disk is too small for requested image."):
self.controller.create(req, body) self.controller.create(req, body)
# TODO(cyeoh): bp-v3-api-unittests # TODO(cyeoh): bp-v3-api-unittests

View File

@ -1827,7 +1827,7 @@ class ServersControllerCreateTest(test.TestCase):
def rpc_call_wrapper(context, topic, msg, timeout=None): def rpc_call_wrapper(context, topic, msg, timeout=None):
"""Stub out the scheduler creating the instance entry.""" """Stub out the scheduler creating the instance entry."""
if (topic == CONF.scheduler_topic and if (topic == CONF.scheduler_topic and
msg['method'] == 'run_instance'): msg['method'] == 'run_instance'):
request_spec = msg['args']['request_spec'] request_spec = msg['args']['request_spec']
num_instances = request_spec.get('num_instances', 1) num_instances = request_spec.get('num_instances', 1)
instances = [] instances = []
@ -1958,7 +1958,7 @@ class ServersControllerCreateTest(test.TestCase):
req.headers["content-type"] = "application/json" req.headers["content-type"] = "application/json"
with testtools.ExpectedException( with testtools.ExpectedException(
webob.exc.HTTPBadRequest, webob.exc.HTTPBadRequest,
'Image 76fa36fc-c930-4bf3-8c8a-ea2a2420deb6 is not active.'): 'Image 76fa36fc-c930-4bf3-8c8a-ea2a2420deb6 is not active.'):
self.controller.create(req, body) self.controller.create(req, body)
def test_create_server_image_too_large(self): def test_create_server_image_too_large(self):
@ -1987,7 +1987,7 @@ class ServersControllerCreateTest(test.TestCase):
req.headers["content-type"] = "application/json" req.headers["content-type"] = "application/json"
with testtools.ExpectedException( with testtools.ExpectedException(
webob.exc.HTTPBadRequest, webob.exc.HTTPBadRequest,
"Instance type's disk is too small for requested image."): "Instance type's disk is too small for requested image."):
self.controller.create(req, body) self.controller.create(req, body)
def test_create_instance_invalid_negative_min(self): def test_create_instance_invalid_negative_min(self):

View File

@ -1402,7 +1402,7 @@ class ComputeTestCase(BaseTestCase):
instance['vm_state'] = 'some_random_state' instance['vm_state'] = 'some_random_state'
with testtools.ExpectedException( with testtools.ExpectedException(
exception.InstanceNotRescuable, expected_message): exception.InstanceNotRescuable, expected_message):
self.compute.rescue_instance( self.compute.rescue_instance(
self.context, instance=instance, self.context, instance=instance,
rescue_password='password') rescue_password='password')
@ -4536,7 +4536,7 @@ class ComputeTestCase(BaseTestCase):
self.assertNotEqual(migration_ref, None) self.assertNotEqual(migration_ref, None)
for migration in migrations: for migration in migrations:
if (migration['instance_uuid'] == if (migration['instance_uuid'] ==
migration_ref['instance_uuid']): migration_ref['instance_uuid']):
migration['status'] = 'confirmed' migration['status'] = 'confirmed'
self.stubs.Set(db, 'instance_get_by_uuid', self.stubs.Set(db, 'instance_get_by_uuid',
@ -7063,7 +7063,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertThat(bdms, matchers.DictListMatches(expected_result)) self.assertThat(bdms, matchers.DictListMatches(expected_result))
for bdm in db.block_device_mapping_get_all_by_instance( for bdm in db.block_device_mapping_get_all_by_instance(
self.context, instance['uuid']): self.context, instance['uuid']):
db.block_device_mapping_destroy(self.context, bdm['id']) db.block_device_mapping_destroy(self.context, bdm['id'])
instance = db.instance_get_by_uuid(self.context, instance['uuid']) instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.compute.terminate_instance(self.context, instance) self.compute.terminate_instance(self.context, instance)
@ -8876,7 +8876,7 @@ class EvacuateHostTestCase(BaseTestCase):
# cleanup # cleanup
for bdms in db.block_device_mapping_get_all_by_instance( for bdms in db.block_device_mapping_get_all_by_instance(
self.context, self.inst_ref['uuid']): self.context, self.inst_ref['uuid']):
db.block_device_mapping_destroy(self.context, bdms['id']) db.block_device_mapping_destroy(self.context, bdms['id'])
def test_rebuild_on_host_with_shared_storage(self): def test_rebuild_on_host_with_shared_storage(self):

View File

@ -291,7 +291,7 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
template_data = self._read_template(name) template_data = self._read_template(name)
if (self.generate_samples and if (self.generate_samples and
not os.path.exists(self._get_sample(name))): not os.path.exists(self._get_sample(name))):
self._write_sample(name, response_data) self._write_sample(name, response_data)
sample_data = response_data sample_data = response_data
else: else:
@ -429,7 +429,7 @@ class ApiSamplesTrap(ApiSampleTestBase):
# NOTE(danms): if you add an extension, it must come with # NOTE(danms): if you add an extension, it must come with
# api_samples tests! # api_samples tests!
if (extension not in tests and if (extension not in tests and
extension not in do_not_approve_additions): extension not in do_not_approve_additions):
missing_tests.append(extension) missing_tests.append(extension)
if missing_tests: if missing_tests:

View File

@ -410,7 +410,7 @@ class XMLMatches(object):
if len(expected) == 0 and len(actual) == 0: if len(expected) == 0 and len(actual) == 0:
# No children, compare text values # No children, compare text values
if ('DONTCARE' not in (expected.text, actual.text) and if ('DONTCARE' not in (expected.text, actual.text) and
expected.text != actual.text): expected.text != actual.text):
return XMLTextValueMismatch(state, expected.text, return XMLTextValueMismatch(state, expected.text,
actual.text) actual.text)
else: else:
@ -423,7 +423,7 @@ class XMLMatches(object):
# allow for, say, arbitrary ordering of some # allow for, say, arbitrary ordering of some
# elements # elements
if (expected[expected_idx].tag in if (expected[expected_idx].tag in
(etree.Comment, etree.ProcessingInstruction)): (etree.Comment, etree.ProcessingInstruction)):
expected_idx += 1 expected_idx += 1
continue continue
@ -447,7 +447,7 @@ class XMLMatches(object):
if expected_idx < len(expected): if expected_idx < len(expected):
for node in expected[expected_idx:]: for node in expected[expected_idx:]:
if (node.tag in if (node.tag in
(etree.Comment, etree.ProcessingInstruction)): (etree.Comment, etree.ProcessingInstruction)):
continue continue
return XMLExpectedChild(state, node.tag, actual_idx) return XMLExpectedChild(state, node.tag, actual_idx)

View File

@ -200,7 +200,7 @@ def get_associated(context, network_id, host=None, address=None):
for datum in fixed_ips: for datum in fixed_ips:
if (datum['network_id'] == network_id and datum['allocated'] if (datum['network_id'] == network_id and datum['allocated']
and datum['instance_uuid'] is not None and datum['instance_uuid'] is not None
and datum['virtual_interface_id'] is not None): and datum['virtual_interface_id'] is not None):
instance = instances[datum['instance_uuid']] instance = instances[datum['instance_uuid']]
if host and host != instance['host']: if host and host != instance['host']:
continue continue

View File

@ -87,7 +87,7 @@ def return_non_existing_address(*args, **kwarg):
def fake_InstanceMetadata(stubs, inst_data, address=None, def fake_InstanceMetadata(stubs, inst_data, address=None,
sgroups=None, content=[], extra_md={}): sgroups=None, content=[], extra_md={}):
if sgroups is None: if sgroups is None:
sgroups = [{'name': 'default'}] sgroups = [{'name': 'default'}]

View File

@ -62,7 +62,7 @@ class QuotaIntegrationTestCase(test.TestCase):
def rpc_call_wrapper(context, topic, msg, timeout=None): def rpc_call_wrapper(context, topic, msg, timeout=None):
"""Stub out the scheduler creating the instance entry.""" """Stub out the scheduler creating the instance entry."""
if (topic == CONF.scheduler_topic and if (topic == CONF.scheduler_topic and
msg['method'] == 'run_instance'): msg['method'] == 'run_instance'):
scheduler = scheduler_driver.Scheduler scheduler = scheduler_driver.Scheduler
instance = scheduler().create_instance_db_entry( instance = scheduler().create_instance_db_entry(
context, context,

View File

@ -357,7 +357,7 @@ class Domain(object):
def attachDeviceFlags(self, xml, flags): def attachDeviceFlags(self, xml, flags):
if (flags & VIR_DOMAIN_AFFECT_LIVE and if (flags & VIR_DOMAIN_AFFECT_LIVE and
self._state != VIR_DOMAIN_RUNNING): self._state != VIR_DOMAIN_RUNNING):
raise libvirtError("AFFECT_LIVE only allowed for running domains!") raise libvirtError("AFFECT_LIVE only allowed for running domains!")
self.attachDevice(xml) self.attachDevice(xml)

View File

@ -353,7 +353,7 @@ def get_my_ipv4_address():
for match in re.finditer(route, out[0]): for match in re.finditer(route, out[0]):
subnet = netaddr.IPNetwork(match.group(1) + "/" + match.group(2)) subnet = netaddr.IPNetwork(match.group(1) + "/" + match.group(2))
if (match.group(3) == iface and if (match.group(3) == iface and
netaddr.IPAddress(gateway) in subnet): netaddr.IPAddress(gateway) in subnet):
try: try:
return _get_ipv4_address_for_interface(iface) return _get_ipv4_address_for_interface(iface)
except exception.NovaException: except exception.NovaException:
@ -681,7 +681,7 @@ def is_valid_cidr(address):
ip_segment = address.split('/') ip_segment = address.split('/')
if (len(ip_segment) <= 1 or if (len(ip_segment) <= 1 or
ip_segment[1] == ''): ip_segment[1] == ''):
return False return False
return True return True

View File

@ -45,7 +45,7 @@ def patched_with_engine(f, *a, **kw):
# on that version or higher, this can be removed # on that version or higher, this can be removed
MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3') MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3')
if (not hasattr(migrate, '__version__') or if (not hasattr(migrate, '__version__') or
dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION): dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
migrate_util.with_engine = patched_with_engine migrate_util.with_engine = patched_with_engine

View File

@ -334,7 +334,7 @@ class Tilera(base.NodeDriver):
status = row.get('task_state') status = row.get('task_state')
if (status == baremetal_states.DEPLOYING and if (status == baremetal_states.DEPLOYING and
locals['started'] == False): locals['started'] == False):
LOG.info(_('Tilera deploy started for instance %s') LOG.info(_('Tilera deploy started for instance %s')
% instance['uuid']) % instance['uuid'])
locals['started'] = True locals['started'] = True

View File

@ -149,7 +149,7 @@ class Pdu(base.PowerManager):
def activate_node(self): def activate_node(self):
"""Turns the power to node ON.""" """Turns the power to node ON."""
if (self._is_power(CONF.baremetal.tile_pdu_on) if (self._is_power(CONF.baremetal.tile_pdu_on)
and self.state == baremetal_states.ACTIVE): and self.state == baremetal_states.ACTIVE):
LOG.warning(_("Activate node called, but node %s " LOG.warning(_("Activate node called, but node %s "
"is already active") % self.address) "is already active") % self.address)
self._power_on() self._power_on()

View File

@ -377,7 +377,7 @@ def get_disk_mapping(virt_type, instance,
mapping['disk.local'] = eph_info mapping['disk.local'] = eph_info
for eph in driver.block_device_info_get_ephemerals( for eph in driver.block_device_info_get_ephemerals(
block_device_info): block_device_info):
disk_dev = block_device.strip_dev(eph['device_name']) disk_dev = block_device.strip_dev(eph['device_name'])
disk_bus = get_disk_bus_for_disk_dev(virt_type, disk_dev) disk_bus = get_disk_bus_for_disk_dev(virt_type, disk_dev)

View File

@ -297,7 +297,7 @@ class LibvirtConfigCPU(LibvirtConfigObject):
if (self.sockets is not None and if (self.sockets is not None and
self.cores is not None and self.cores is not None and
self.threads is not None): self.threads is not None):
top = etree.Element("topology") top = etree.Element("topology")
top.set("sockets", str(self.sockets)) top.set("sockets", str(self.sockets))
top.set("cores", str(self.cores)) top.set("cores", str(self.cores))
@ -484,7 +484,7 @@ class LibvirtConfigGuestDisk(LibvirtConfigGuestDevice):
dev.set("device", self.source_device) dev.set("device", self.source_device)
if (self.driver_name is not None or if (self.driver_name is not None or
self.driver_format is not None or self.driver_format is not None or
self.driver_cache is not None): self.driver_cache is not None):
drv = etree.Element("driver") drv = etree.Element("driver")
if self.driver_name is not None: if self.driver_name is not None:
drv.set("name", self.driver_name) drv.set("name", self.driver_name)

View File

@ -2148,7 +2148,7 @@ class LibvirtDriver(driver.ComputeDriver):
'/dev/' + disklocal.target_dev}) '/dev/' + disklocal.target_dev})
for eph in driver.block_device_info_get_ephemerals( for eph in driver.block_device_info_get_ephemerals(
block_device_info): block_device_info):
diskeph = self.get_guest_disk_config( diskeph = self.get_guest_disk_config(
instance, instance,
blockinfo.get_eph_disk(eph), blockinfo.get_eph_disk(eph),
@ -3809,7 +3809,7 @@ class LibvirtDriver(driver.ComputeDriver):
# then ensure we're in 'raw' format so we can extend file system. # then ensure we're in 'raw' format so we can extend file system.
fmt = info['type'] fmt = info['type']
if (size and fmt == 'qcow2' and if (size and fmt == 'qcow2' and
disk.can_resize_fs(info['path'], size, use_cow=True)): disk.can_resize_fs(info['path'], size, use_cow=True)):
path_raw = info['path'] + '_raw' path_raw = info['path'] + '_raw'
utils.execute('qemu-img', 'convert', '-f', 'qcow2', utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'raw', info['path'], path_raw) '-O', 'raw', info['path'], path_raw)

View File

@ -300,7 +300,7 @@ class ImageCacheManager(object):
task_states.RESIZE_MIGRATED, task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH] task_states.RESIZE_FINISH]
if instance['task_state'] in resize_states or \ if instance['task_state'] in resize_states or \
instance['vm_state'] == vm_states.RESIZED: instance['vm_state'] == vm_states.RESIZED:
self.instance_names.add(instance['name'] + '_resize') self.instance_names.add(instance['name'] + '_resize')
self.instance_names.add(instance['uuid'] + '_resize') self.instance_names.add(instance['uuid'] + '_resize')
@ -406,7 +406,7 @@ class ImageCacheManager(object):
# shared storage), then we don't need to checksum again. # shared storage), then we don't need to checksum again.
if (stored_timestamp and if (stored_timestamp and
time.time() - stored_timestamp < time.time() - stored_timestamp <
CONF.checksum_interval_seconds): CONF.checksum_interval_seconds):
return True return True
# NOTE(mikal): If there is no timestamp, then the checksum was # NOTE(mikal): If there is no timestamp, then the checksum was
@ -494,7 +494,7 @@ class ImageCacheManager(object):
self.unexplained_images.remove(base_file) self.unexplained_images.remove(base_file)
if (base_file and os.path.exists(base_file) if (base_file and os.path.exists(base_file)
and os.path.isfile(base_file)): and os.path.isfile(base_file)):
# _verify_checksum returns True if the checksum is ok, and None if # _verify_checksum returns True if the checksum is ok, and None if
# there is no checksum file # there is no checksum file
checksum_result = self._verify_checksum(img_id, base_file) checksum_result = self._verify_checksum(img_id, base_file)

View File

@ -111,7 +111,7 @@ class LibvirtBaseVIFDriver(object):
# to the global config parameter # to the global config parameter
if (model is None and if (model is None and
CONF.libvirt_type in ('kvm', 'qemu') and CONF.libvirt_type in ('kvm', 'qemu') and
CONF.libvirt_use_virtio_for_bridges): CONF.libvirt_use_virtio_for_bridges):
model = "virtio" model = "virtio"
# Workaround libvirt bug, where it mistakenly # Workaround libvirt bug, where it mistakenly
@ -332,7 +332,7 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
network, mapping = vif network, mapping = vif
if (not network.get('multi_host') and if (not network.get('multi_host') and
mapping.get('should_create_bridge')): mapping.get('should_create_bridge')):
if mapping.get('should_create_vlan'): if mapping.get('should_create_vlan'):
iface = CONF.vlan_interface or network['bridge_interface'] iface = CONF.vlan_interface or network['bridge_interface']
LOG.debug(_('Ensuring vlan %(vlan)s and bridge %(bridge)s'), LOG.debug(_('Ensuring vlan %(vlan)s and bridge %(bridge)s'),

View File

@ -153,7 +153,7 @@ class LibvirtNetVolumeDriver(LibvirtBaseVolumeDriver):
conf.source_ports = netdisk_properties.get('ports', []) conf.source_ports = netdisk_properties.get('ports', [])
auth_enabled = netdisk_properties.get('auth_enabled') auth_enabled = netdisk_properties.get('auth_enabled')
if (conf.source_protocol == 'rbd' and if (conf.source_protocol == 'rbd' and
CONF.rbd_secret_uuid): CONF.rbd_secret_uuid):
conf.auth_secret_uuid = CONF.rbd_secret_uuid conf.auth_secret_uuid = CONF.rbd_secret_uuid
auth_enabled = True # Force authentication locally auth_enabled = True # Force authentication locally
if CONF.rbd_user: if CONF.rbd_user:

View File

@ -81,7 +81,7 @@ def check_connection(ssh, connection):
# if the ssh client is not set or the transport is dead, re-connect # if the ssh client is not set or the transport is dead, re-connect
if (ssh is None or if (ssh is None or
ssh.get_transport() is None or ssh.get_transport() is None or
not ssh.get_transport().is_active()): not ssh.get_transport().is_active()):
LOG.debug("Connection to host %s will be established." % LOG.debug("Connection to host %s will be established." %
connection.host) connection.host)
ssh = ssh_connect(connection) ssh = ssh_connect(connection)

View File

@ -177,7 +177,7 @@ class Vim:
# Socket errors which need special handling for they # Socket errors which need special handling for they
# might be caused by ESX API call overload # might be caused by ESX API call overload
if (str(excep).find(ADDRESS_IN_USE_ERROR) != -1 or if (str(excep).find(ADDRESS_IN_USE_ERROR) != -1 or
str(excep).find(CONN_ABORT_ERROR)) != -1: str(excep).find(CONN_ABORT_ERROR)) != -1:
raise error_util.SessionOverLoadException(_("Socket " raise error_util.SessionOverLoadException(_("Socket "
"error in %s: ") % (attr_name), excep) "error in %s: ") % (attr_name), excep)
# Type error that needs special handling for it might be # Type error that needs special handling for it might be

View File

@ -146,7 +146,7 @@ def create_network_spec(client_factory, vif_info):
mac_address = vif_info['mac_address'] mac_address = vif_info['mac_address']
backing = None backing = None
if (network_ref and if (network_ref and
network_ref['type'] == "DistributedVirtualPortgroup"): network_ref['type'] == "DistributedVirtualPortgroup"):
backing_name = ''.join(['ns0:VirtualEthernetCardDistributed', backing_name = ''.join(['ns0:VirtualEthernetCardDistributed',
'VirtualPortBackingInfo']) 'VirtualPortBackingInfo'])
backing = client_factory.create(backing_name) backing = client_factory.create(backing_name)
@ -280,7 +280,7 @@ def get_rdm_disk(hardware_devices, uuid):
if (device.__class__.__name__ == "VirtualDisk" and if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ == device.backing.__class__.__name__ ==
"VirtualDiskRawDiskMappingVer1BackingInfo" and "VirtualDiskRawDiskMappingVer1BackingInfo" and
device.backing.lunUuid == uuid): device.backing.lunUuid == uuid):
return device return device

View File

@ -109,7 +109,7 @@ def find_st(session, data, cluster=None):
for target in adapter.target: for target in adapter.target:
if (getattr(target.transport, 'address', None) and if (getattr(target.transport, 'address', None) and
target.transport.address[0] == target_portal and target.transport.address[0] == target_portal and
target.transport.iScsiName == target_iqn): target.transport.iScsiName == target_iqn):
if not target.lun: if not target.lun:
return result return result
for lun in target.lun: for lun in target.lun:

View File

@ -758,7 +758,7 @@ class XenAPISession(object):
LOG.debug(_("Got exception: %s"), exc) LOG.debug(_("Got exception: %s"), exc)
if (len(exc.details) == 4 and if (len(exc.details) == 4 and
exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and
exc.details[2] == 'Failure'): exc.details[2] == 'Failure'):
params = None params = None
try: try:
# FIXME(comstud): eval is evil. # FIXME(comstud): eval is evil.

View File

@ -828,7 +828,7 @@ class SessionBase(object):
val = params[2] val = params[2]
if (ref in _db_content[cls] and if (ref in _db_content[cls] and
field in _db_content[cls][ref]): field in _db_content[cls][ref]):
_db_content[cls][ref][field] = val _db_content[cls][ref][field] = val
return return
@ -897,7 +897,7 @@ class SessionBase(object):
def _check_session(self, params): def _check_session(self, params):
if (self._session is None or if (self._session is None or
self._session not in _db_content['session']): self._session not in _db_content['session']):
raise Failure(['HANDLE_INVALID', 'session', self._session]) raise Failure(['HANDLE_INVALID', 'session', self._session])
if len(params) == 0 or params[0] != self._session: if len(params) == 0 or params[0] != self._session:
LOG.debug(_('Raising NotImplemented')) LOG.debug(_('Raising NotImplemented'))

View File

@ -515,7 +515,7 @@ def get_vdis_for_instance(context, session, instance, name_label, image,
for bdm in block_device_info['block_device_mapping']: for bdm in block_device_info['block_device_mapping']:
if (block_device.strip_prefix(bdm['mount_device']) == if (block_device.strip_prefix(bdm['mount_device']) ==
block_device.strip_prefix(root_device_name)): block_device.strip_prefix(root_device_name)):
# If we're a root-device, record that fact so we don't download # If we're a root-device, record that fact so we don't download
# a root image via Glance # a root image via Glance
type_ = 'root' type_ = 'root'
@ -1481,7 +1481,7 @@ def set_vm_name_label(session, vm_ref, name_label):
def list_vms(session): def list_vms(session):
for vm_ref, vm_rec in session.get_all_refs_and_recs('VM'): for vm_ref, vm_rec in session.get_all_refs_and_recs('VM'):
if (vm_rec["resident_on"] != session.get_xenapi_host() or if (vm_rec["resident_on"] != session.get_xenapi_host() or
vm_rec["is_a_template"] or vm_rec["is_control_domain"]): vm_rec["is_a_template"] or vm_rec["is_control_domain"]):
continue continue
else: else:
yield vm_ref, vm_rec yield vm_ref, vm_rec

View File

@ -675,7 +675,7 @@ class VMOps(object):
instance=instance) instance=instance)
if (version and agent_build and if (version and agent_build and
cmp_version(version, agent_build['version']) < 0): cmp_version(version, agent_build['version']) < 0):
agent.agent_update(agent_build) agent.agent_update(agent_build)
# if the guest agent is not available, configure the # if the guest agent is not available, configure the

View File

@ -245,7 +245,7 @@ def parse_volume_info(connection_data):
if (volume_id is None or if (volume_id is None or
target_host is None or target_host is None or
target_iqn is None): target_iqn is None):
raise StorageError(_('Unable to obtain target information' raise StorageError(_('Unable to obtain target information'
' %s') % connection_data) ' %s') % connection_data)
volume_info = {} volume_info = {}
@ -254,7 +254,7 @@ def parse_volume_info(connection_data):
volume_info['port'] = target_port volume_info['port'] = target_port
volume_info['targetIQN'] = target_iqn volume_info['targetIQN'] = target_iqn
if ('auth_method' in connection_data and if ('auth_method' in connection_data and
connection_data['auth_method'] == 'CHAP'): connection_data['auth_method'] == 'CHAP'):
volume_info['chapuser'] = connection_data['auth_username'] volume_info['chapuser'] = connection_data['auth_username']
volume_info['chappassword'] = connection_data['auth_password'] volume_info['chappassword'] = connection_data['auth_password']

View File

@ -74,7 +74,7 @@ class AddressTests(base.UserSmokeTestCase):
for rule in groups[0].rules: for rule in groups[0].rules:
if (rule.ip_protocol == 'tcp' and if (rule.ip_protocol == 'tcp' and
int(rule.from_port) <= 22 and int(rule.from_port) <= 22 and
int(rule.to_port) >= 22): int(rule.to_port) >= 22):
ssh_authorized = True ssh_authorized = True
break break
if not ssh_authorized: if not ssh_authorized:

View File

@ -39,7 +39,7 @@ commands =
commands = {posargs} commands = {posargs}
[flake8] [flake8]
ignore = E121,E122,E123,E124,E125,E126,E127,E128,E711,E712,H302,H303,H404,F403,F811,F841,N309 ignore = E121,E122,E123,E124,E126,E127,E128,E711,E712,H302,H303,H404,F403,F811,F841,N309
builtins = _ builtins = _
exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,plugins,tools exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,plugins,tools