Fix and gate on E125

E125 continuation line does not distinguish itself from next logical
line.

E125 makes code more readable, and fixing all the existing issues allows
us to gate on this, making this one less thing reviewers have to check
for.

Fix was made using https://github.com/hhatto/autopep8

Change-Id: Ie569fd74fb84d220ceb87e37b1b1f39143bba80f
This commit is contained in:
Joe Gordon 2013-06-07 17:08:38 -07:00
parent 9d02587d3c
commit efe1957237
57 changed files with 99 additions and 99 deletions

View File

@ -256,7 +256,7 @@ class CloudController(object):
def describe_availability_zones(self, context, **kwargs):
if ('zone_name' in kwargs and
'verbose' in kwargs['zone_name'] and
context.is_admin):
context.is_admin):
return self._describe_availability_zones_verbose(context,
**kwargs)
else:
@ -629,7 +629,7 @@ class CloudController(object):
def _validate_security_group_protocol(self, values):
validprotocols = ['tcp', 'udp', 'icmp', '6', '17', '1']
if 'ip_protocol' in values and \
values['ip_protocol'] not in validprotocols:
values['ip_protocol'] not in validprotocols:
err = _('Invalid IP protocol %s.') % values['ip_protocol']
raise exception.EC2APIError(message=err, code="400")
@ -1074,7 +1074,7 @@ class CloudController(object):
continue
if (bdm['device_name'] == root_device_name and
(bdm['snapshot_id'] or bdm['volume_id'])):
(bdm['snapshot_id'] or bdm['volume_id'])):
assert not bdm['virtual_name']
root_device_type = 'ebs'
@ -1464,7 +1464,7 @@ class CloudController(object):
if (block_device.strip_dev(bdm.get('device_name')) ==
block_device.strip_dev(root_device_name) and
('snapshot_id' in bdm or 'volume_id' in bdm) and
not bdm.get('no_device')):
not bdm.get('no_device')):
root_device_type = 'ebs'
i['rootDeviceName'] = (root_device_name or
block_device.DEFAULT_ROOT_DEV_NAME)

View File

@ -260,7 +260,7 @@ class APIRouterV3(base_wsgi.Router):
# Check whitelist is either empty or if not then the extension
# is in the whitelist
if (not CONF.osapi_v3.extensions_whitelist or
ext.obj.alias in CONF.osapi_v3.extensions_whitelist):
ext.obj.alias in CONF.osapi_v3.extensions_whitelist):
# Check the extension is not in the blacklist
if ext.obj.alias not in CONF.osapi_v3.extensions_blacklist:

View File

@ -164,7 +164,7 @@ class FloatingIPDNSDomainController(object):
if (scope not in ('private', 'public') or
project and av_zone or
scope == 'private' and project or
scope == 'public' and av_zone):
scope == 'public' and av_zone):
raise webob.exc.HTTPUnprocessableEntity()
if scope == 'private':
create_dns_domain = self.network_api.create_private_dns_domain

View File

@ -1299,7 +1299,7 @@ class ServersController(wsgi.Controller):
def _action_change_password(self, req, id, body):
context = req.environ['nova.context']
if (not 'changePassword' in body
or 'adminPass' not in body['changePassword']):
or 'adminPass' not in body['changePassword']):
msg = _("No adminPass was specified")
raise exc.HTTPBadRequest(explanation=msg)
password = body['changePassword']['adminPass']

View File

@ -1174,7 +1174,7 @@ class Controller(wsgi.Controller):
def _action_change_password(self, req, id, body):
context = req.environ['nova.context']
if (not 'changePassword' in body
or 'adminPass' not in body['changePassword']):
or 'adminPass' not in body['changePassword']):
msg = _("No adminPass was specified")
raise exc.HTTPBadRequest(explanation=msg)
password = body['changePassword']['adminPass']

View File

@ -71,7 +71,7 @@ class ViewBuilder(object):
# check for existing key
for limit in limits:
if (limit["uri"] == rate_limit["URI"] and
limit["regex"] == rate_limit["regex"]):
limit["regex"] == rate_limit["regex"]):
_rate_limit_key = limit
break

View File

@ -169,7 +169,7 @@ class URLMap(paste.urlmap.URLMap):
if domain and domain != host and domain != host + ':' + port:
continue
if (path_info == app_url
or path_info.startswith(app_url + '/')):
or path_info.startswith(app_url + '/')):
return app, app_url
return None, None

View File

@ -254,7 +254,7 @@ class XMLDeserializer(TextDeserializer):
for node in parent.childNodes:
if (node.localName == name and
node.namespaceURI and
node.namespaceURI == namespace):
node.namespaceURI == namespace):
return node
return None
@ -1020,7 +1020,7 @@ class Resource(wsgi.Application):
meth = getattr(self.controller, action)
except AttributeError:
if (not self.wsgi_actions or
action not in _ROUTES_METHODS + ['action']):
action not in _ROUTES_METHODS + ['action']):
# Propagate the error
raise
else:

View File

@ -246,7 +246,7 @@ def mappings_prepend_dev(mappings):
for m in mappings:
virtual = m['virtual']
if (is_swap_or_ephemeral(virtual) and
(not m['device'].startswith('/'))):
(not m['device'].startswith('/'))):
m['device'] = '/dev/' + m['device']
return mappings

View File

@ -22,7 +22,7 @@ import sys
# implementation which doesn't work for IPv6. What we're checking here is
# that the magic environment variable was set when the import happened.
if ('eventlet' in sys.modules and
os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'):
os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'):
raise ImportError('eventlet imported before nova/cmd/__init__ '
'(env var set to %s)'
% os.environ.get('EVENTLET_NO_GREENDNS'))

View File

@ -133,7 +133,7 @@ def check_instance_state(vm_state=None, task_state=(None,),
state=instance['vm_state'],
method=f.__name__)
if (task_state is not None and
instance['task_state'] not in task_state):
instance['task_state'] not in task_state):
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance['uuid'],
@ -849,7 +849,7 @@ class API(base.Base):
virtual_name = bdm.get('virtual_name')
if (virtual_name is not None and
block_device.is_swap_or_ephemeral(virtual_name)):
block_device.is_swap_or_ephemeral(virtual_name)):
size = self._volume_size(instance_type, virtual_name)
if size == 0:
continue
@ -923,7 +923,7 @@ class API(base.Base):
image_properties = image.get('properties', {})
if (block_device_mapping or
image_properties.get('mappings') or
image_properties.get('block_device_mapping')):
image_properties.get('block_device_mapping')):
instance['shutdown_terminate'] = False
def _populate_instance_names(self, instance, num_instances):

View File

@ -396,7 +396,7 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_uuid,
**kwargs)
if (instance_ref['host'] == self.host and
instance_ref['node'] in self.driver.get_available_nodes()):
instance_ref['node'] in self.driver.get_available_nodes()):
rt = self._get_resource_tracker(instance_ref.get('node'))
rt.update_usage(context, instance_ref)
@ -759,7 +759,7 @@ class ComputeManager(manager.SchedulerDependentManager):
def _get_instance_nw_info(self, context, instance):
"""Get a list of dictionaries of network data of an instance."""
if (not hasattr(instance, 'system_metadata') or
len(instance['system_metadata']) == 0):
len(instance['system_metadata']) == 0):
# NOTE(danms): Several places in the code look up instances without
# pulling system_metadata for performance, and call this function.
# If we get an instance without it, re-fetch so that the call
@ -827,7 +827,7 @@ class ComputeManager(manager.SchedulerDependentManager):
continue
if ((bdm['snapshot_id'] is not None) and
(bdm['volume_id'] is None)):
(bdm['volume_id'] is None)):
# TODO(yamahata): default name and description
snapshot = self.volume_api.get_snapshot(context,
bdm['snapshot_id'])

View File

@ -395,7 +395,7 @@ class ResourceTracker(object):
# same node resize. record usage for whichever instance type the
# instance is *not* in:
if (instance['instance_type_id'] ==
migration['old_instance_type_id']):
migration['old_instance_type_id']):
itype = self._get_instance_type(context, instance, 'new_',
migration['new_instance_type_id'])
else:

View File

@ -4473,7 +4473,7 @@ def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
if (rd_req < current_usage['curr_reads'] or
rd_bytes < current_usage['curr_read_bytes'] or
wr_req < current_usage['curr_writes'] or
wr_bytes < current_usage['curr_write_bytes']):
wr_bytes < current_usage['curr_write_bytes']):
LOG.info(_("Volume(%s) has lower stats then what is in "
"the database. Instance must have been rebooted "
"or crashed. Updating totals.") % id)

View File

@ -240,7 +240,7 @@ def _downgrade_bdm_v2(meta, bdm_table):
for instance_uuid, in itertools.chain(
instance_q.execute().fetchall(),
instance_shadow_q.execute().fetchall()):
instance_shadow_q.execute().fetchall()):
# Get all the bdms for an instance
bdm_q = select(
[bdm_table.c.id, bdm_table.c.source_type, bdm_table.c.guest_format]

View File

@ -47,7 +47,7 @@ def patched_with_engine(f, *a, **kw):
# on that version or higher, this can be removed
MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3')
if (not hasattr(migrate, '__version__') or
dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
migrate_util.with_engine = patched_with_engine

View File

@ -621,7 +621,7 @@ class FloatingIP(object):
for db_domain in db_domain_list:
if (db_domain in floating_driver_domain_list or
db_domain in instance_driver_domain_list):
db_domain in instance_driver_domain_list):
domain_entry = self._prepare_domain_entry(context,
db_domain)
if domain_entry:

View File

@ -111,7 +111,7 @@ class MiniDNS(dns_driver.DNSDriver):
for line in infile:
entry = self.parse_line(line)
if ((not entry) or
entry['name'] != self.qualify(name, domain)):
entry['name'] != self.qualify(name, domain)):
outfile.write(line)
else:
deleted = True
@ -133,7 +133,7 @@ class MiniDNS(dns_driver.DNSDriver):
for line in infile:
entry = self.parse_line(line)
if (entry and
entry['name'] == self.qualify(name, domain)):
entry['name'] == self.qualify(name, domain)):
outfile.write("%s %s %s\n" %
(address, self.qualify(name, domain), entry['type']))
else:
@ -162,7 +162,7 @@ class MiniDNS(dns_driver.DNSDriver):
for line in infile:
entry = self.parse_line(line)
if (entry and
entry['name'] == self.qualify(name, domain)):
entry['name'] == self.qualify(name, domain)):
entries.append(entry['address'])
infile.close()
return entries
@ -195,7 +195,7 @@ class MiniDNS(dns_driver.DNSDriver):
for line in infile:
entry = self.parse_line(line)
if ((not entry) or
entry['domain'] != fqdomain.lower()):
entry['domain'] != fqdomain.lower()):
outfile.write(line)
else:
LOG.info(_("deleted %s"), entry)

View File

@ -69,7 +69,7 @@ class SecurityGroupBase(object):
# specified, but only if a source group was specified.
ip_proto_upper = ip_protocol.upper() if ip_protocol else ''
if (ip_proto_upper == 'ICMP' and
from_port is None and to_port is None):
from_port is None and to_port is None):
from_port = -1
to_port = -1
elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None
@ -101,14 +101,14 @@ class SecurityGroupBase(object):
# Verify that from_port must always be less than
# or equal to to_port
if (ip_protocol.upper() in ['TCP', 'UDP'] and
(from_port > to_port)):
(from_port > to_port)):
raise exception.InvalidPortRange(from_port=from_port,
to_port=to_port, msg="Former value cannot"
" be greater than the later")
# Verify valid TCP, UDP port ranges
if (ip_protocol.upper() in ['TCP', 'UDP'] and
(from_port < 1 or to_port > 65535)):
(from_port < 1 or to_port > 65535)):
raise exception.InvalidPortRange(from_port=from_port,
to_port=to_port, msg="Valid TCP ports should"
" be between 1-65535")
@ -116,7 +116,7 @@ class SecurityGroupBase(object):
# Verify ICMP type and code
if (ip_protocol.upper() == "ICMP" and
(from_port < -1 or from_port > 255 or
to_port < -1 or to_port > 255)):
to_port < -1 or to_port > 255)):
raise exception.InvalidPortRange(from_port=from_port,
to_port=to_port, msg="For ICMP, the"
" type:code must be valid")

View File

@ -93,7 +93,7 @@ def send_update(context, old_instance, new_instance, service=None, host=None):
update_with_state_change = True
elif CONF.notify_on_state_change:
if (CONF.notify_on_state_change.lower() == "vm_and_task_state" and
old_task_state != new_task_state):
old_task_state != new_task_state):
# yes, the task state is changing:
update_with_state_change = True
@ -141,7 +141,7 @@ def send_update_with_states(context, instance, old_vm_state, new_vm_state,
fire_update = True
elif CONF.notify_on_state_change:
if (CONF.notify_on_state_change.lower() == "vm_and_task_state" and
old_task_state != new_task_state):
old_task_state != new_task_state):
# yes, the task state is changing:
fire_update = True
@ -227,7 +227,7 @@ def bandwidth_usage(instance_ref, audit_start,
admin_context = nova.context.get_admin_context(read_deleted='yes')
if (instance_ref.get('info_cache') and
instance_ref['info_cache'].get('network_info') is not None):
instance_ref['info_cache'].get('network_info') is not None):
cached_info = instance_ref['info_cache']['network_info']
nw_info = network_model.NetworkInfo.hydrate(cached_info)

View File

@ -334,7 +334,7 @@ class NovaObject(object):
"""
for name in self.fields.keys() + self.obj_extra_fields:
if (hasattr(self, get_attrname(name)) or
name in self.obj_extra_fields):
name in self.obj_extra_fields):
yield name, getattr(self, name)
items = lambda self: list(self.iteritems())

View File

@ -265,7 +265,7 @@ class Instance(base.NovaObject):
changes = self.obj_what_changed()
for field in self.fields:
if (hasattr(self, base.get_attrname(field)) and
isinstance(self[field], base.NovaObject)):
isinstance(self[field], base.NovaObject)):
getattr(self, '_save_%s' % field)(context)
elif field in changes:
updates[field] = self[field]
@ -296,7 +296,7 @@ class Instance(base.NovaObject):
expected_attrs=extra)
for field in self.fields:
if (hasattr(self, base.get_attrname(field)) and
self[field] != current[field]):
self[field] != current[field]):
self[field] = current[field]
self.obj_reset_changes()

View File

@ -239,7 +239,7 @@ class BucketHandler(BaseRequestHandler):
bucket_name))
terse = int(self.get_argument("terse", 0))
if (not path.startswith(self.application.directory) or
not os.path.isdir(path)):
not os.path.isdir(path)):
self.set_404()
return
object_names = []
@ -290,7 +290,7 @@ class BucketHandler(BaseRequestHandler):
path = os.path.abspath(os.path.join(
self.application.directory, bucket_name))
if (not path.startswith(self.application.directory) or
os.path.exists(path)):
os.path.exists(path)):
self.set_status(403)
return
fileutils.ensure_tree(path)
@ -300,7 +300,7 @@ class BucketHandler(BaseRequestHandler):
path = os.path.abspath(os.path.join(
self.application.directory, bucket_name))
if (not path.startswith(self.application.directory) or
not os.path.isdir(path)):
not os.path.isdir(path)):
self.set_404()
return
if len(os.listdir(path)) > 0:
@ -316,7 +316,7 @@ class ObjectHandler(BaseRequestHandler):
object_name = urllib.unquote(object_name)
path = self._object_path(bucket, object_name)
if (not path.startswith(self.application.directory) or
not os.path.isfile(path)):
not os.path.isfile(path)):
self.set_404()
return
info = os.stat(path)
@ -334,7 +334,7 @@ class ObjectHandler(BaseRequestHandler):
bucket_dir = os.path.abspath(os.path.join(
self.application.directory, bucket))
if (not bucket_dir.startswith(self.application.directory) or
not os.path.isdir(bucket_dir)):
not os.path.isdir(bucket_dir)):
self.set_404()
return
path = self._object_path(bucket, object_name)
@ -354,7 +354,7 @@ class ObjectHandler(BaseRequestHandler):
object_name = urllib.unquote(object_name)
path = self._object_path(bucket, object_name)
if (not path.startswith(self.application.directory) or
not os.path.isfile(path)):
not os.path.isfile(path)):
self.set_404()
return
os.unlink(path)

View File

@ -142,7 +142,7 @@ class HostState(object):
def update_from_compute_node(self, compute):
"""Update information about a host from its compute_node info."""
if (self.updated and compute['updated_at']
and self.updated > compute['updated_at']):
and self.updated > compute['updated_at']):
return
all_ram_mb = compute['memory_mb']

View File

@ -97,7 +97,7 @@ class SchedulerOptions(object):
last_modified = self._get_file_timestamp(filename)
if (not last_modified or not self.last_modified or
last_modified > self.last_modified):
last_modified > self.last_modified):
self.data = self._load_file(self._get_file_handle(filename))
self.last_modified = last_modified
if not self.data:

View File

@ -33,7 +33,7 @@ import sys
# implementation which doesn't work for IPv6. What we're checking here is
# that the magic environment variable was set when the import happened.
if ('eventlet' in sys.modules and
os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'):
os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'):
raise ImportError('eventlet imported before nova/cmd/__init__ '
'(env var set to %s)'
% os.environ.get('EVENTLET_NO_GREENDNS'))

View File

@ -476,7 +476,7 @@ class CinderCloudTestCase(test.TestCase):
self.volume_api.delete(self.context, vol['id'])
for uuid in (inst1['uuid'], inst2['uuid']):
for bdm in db.block_device_mapping_get_all_by_instance(
self.context, uuid):
self.context, uuid):
db.block_device_mapping_destroy(self.context, bdm['id'])
db.instance_destroy(self.context, inst2['uuid'])
db.instance_destroy(self.context, inst1['uuid'])

View File

@ -1821,7 +1821,7 @@ class ServersControllerCreateTest(test.TestCase):
def rpc_call_wrapper(context, topic, msg, timeout=None):
"""Stub out the scheduler creating the instance entry."""
if (topic == CONF.scheduler_topic and
msg['method'] == 'run_instance'):
msg['method'] == 'run_instance'):
request_spec = msg['args']['request_spec']
num_instances = request_spec.get('num_instances', 1)
instances = []
@ -1954,7 +1954,7 @@ class ServersControllerCreateTest(test.TestCase):
req.headers["content-type"] = "application/json"
with testtools.ExpectedException(
webob.exc.HTTPBadRequest,
'Image 76fa36fc-c930-4bf3-8c8a-ea2a2420deb6 is not active.'):
'Image 76fa36fc-c930-4bf3-8c8a-ea2a2420deb6 is not active.'):
self.controller.create(req, body)
def test_create_server_image_too_large(self):
@ -1983,7 +1983,7 @@ class ServersControllerCreateTest(test.TestCase):
req.headers["content-type"] = "application/json"
with testtools.ExpectedException(
webob.exc.HTTPBadRequest,
"Instance type's disk is too small for requested image."):
"Instance type's disk is too small for requested image."):
self.controller.create(req, body)
# TODO(cyeoh): bp-v3-api-unittests

View File

@ -1827,7 +1827,7 @@ class ServersControllerCreateTest(test.TestCase):
def rpc_call_wrapper(context, topic, msg, timeout=None):
"""Stub out the scheduler creating the instance entry."""
if (topic == CONF.scheduler_topic and
msg['method'] == 'run_instance'):
msg['method'] == 'run_instance'):
request_spec = msg['args']['request_spec']
num_instances = request_spec.get('num_instances', 1)
instances = []
@ -1958,7 +1958,7 @@ class ServersControllerCreateTest(test.TestCase):
req.headers["content-type"] = "application/json"
with testtools.ExpectedException(
webob.exc.HTTPBadRequest,
'Image 76fa36fc-c930-4bf3-8c8a-ea2a2420deb6 is not active.'):
'Image 76fa36fc-c930-4bf3-8c8a-ea2a2420deb6 is not active.'):
self.controller.create(req, body)
def test_create_server_image_too_large(self):
@ -1987,7 +1987,7 @@ class ServersControllerCreateTest(test.TestCase):
req.headers["content-type"] = "application/json"
with testtools.ExpectedException(
webob.exc.HTTPBadRequest,
"Instance type's disk is too small for requested image."):
"Instance type's disk is too small for requested image."):
self.controller.create(req, body)
def test_create_instance_invalid_negative_min(self):

View File

@ -1402,7 +1402,7 @@ class ComputeTestCase(BaseTestCase):
instance['vm_state'] = 'some_random_state'
with testtools.ExpectedException(
exception.InstanceNotRescuable, expected_message):
exception.InstanceNotRescuable, expected_message):
self.compute.rescue_instance(
self.context, instance=instance,
rescue_password='password')
@ -4536,7 +4536,7 @@ class ComputeTestCase(BaseTestCase):
self.assertNotEqual(migration_ref, None)
for migration in migrations:
if (migration['instance_uuid'] ==
migration_ref['instance_uuid']):
migration_ref['instance_uuid']):
migration['status'] = 'confirmed'
self.stubs.Set(db, 'instance_get_by_uuid',
@ -7063,7 +7063,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertThat(bdms, matchers.DictListMatches(expected_result))
for bdm in db.block_device_mapping_get_all_by_instance(
self.context, instance['uuid']):
self.context, instance['uuid']):
db.block_device_mapping_destroy(self.context, bdm['id'])
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.compute.terminate_instance(self.context, instance)
@ -8876,7 +8876,7 @@ class EvacuateHostTestCase(BaseTestCase):
# cleanup
for bdms in db.block_device_mapping_get_all_by_instance(
self.context, self.inst_ref['uuid']):
self.context, self.inst_ref['uuid']):
db.block_device_mapping_destroy(self.context, bdms['id'])
def test_rebuild_on_host_with_shared_storage(self):

View File

@ -291,7 +291,7 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
template_data = self._read_template(name)
if (self.generate_samples and
not os.path.exists(self._get_sample(name))):
not os.path.exists(self._get_sample(name))):
self._write_sample(name, response_data)
sample_data = response_data
else:
@ -429,7 +429,7 @@ class ApiSamplesTrap(ApiSampleTestBase):
# NOTE(danms): if you add an extension, it must come with
# api_samples tests!
if (extension not in tests and
extension not in do_not_approve_additions):
extension not in do_not_approve_additions):
missing_tests.append(extension)
if missing_tests:

View File

@ -410,7 +410,7 @@ class XMLMatches(object):
if len(expected) == 0 and len(actual) == 0:
# No children, compare text values
if ('DONTCARE' not in (expected.text, actual.text) and
expected.text != actual.text):
expected.text != actual.text):
return XMLTextValueMismatch(state, expected.text,
actual.text)
else:
@ -423,7 +423,7 @@ class XMLMatches(object):
# allow for, say, arbitrary ordering of some
# elements
if (expected[expected_idx].tag in
(etree.Comment, etree.ProcessingInstruction)):
(etree.Comment, etree.ProcessingInstruction)):
expected_idx += 1
continue
@ -447,7 +447,7 @@ class XMLMatches(object):
if expected_idx < len(expected):
for node in expected[expected_idx:]:
if (node.tag in
(etree.Comment, etree.ProcessingInstruction)):
(etree.Comment, etree.ProcessingInstruction)):
continue
return XMLExpectedChild(state, node.tag, actual_idx)

View File

@ -200,7 +200,7 @@ def get_associated(context, network_id, host=None, address=None):
for datum in fixed_ips:
if (datum['network_id'] == network_id and datum['allocated']
and datum['instance_uuid'] is not None
and datum['virtual_interface_id'] is not None):
and datum['virtual_interface_id'] is not None):
instance = instances[datum['instance_uuid']]
if host and host != instance['host']:
continue

View File

@ -87,7 +87,7 @@ def return_non_existing_address(*args, **kwarg):
def fake_InstanceMetadata(stubs, inst_data, address=None,
sgroups=None, content=[], extra_md={}):
sgroups=None, content=[], extra_md={}):
if sgroups is None:
sgroups = [{'name': 'default'}]

View File

@ -62,7 +62,7 @@ class QuotaIntegrationTestCase(test.TestCase):
def rpc_call_wrapper(context, topic, msg, timeout=None):
"""Stub out the scheduler creating the instance entry."""
if (topic == CONF.scheduler_topic and
msg['method'] == 'run_instance'):
msg['method'] == 'run_instance'):
scheduler = scheduler_driver.Scheduler
instance = scheduler().create_instance_db_entry(
context,

View File

@ -357,7 +357,7 @@ class Domain(object):
def attachDeviceFlags(self, xml, flags):
if (flags & VIR_DOMAIN_AFFECT_LIVE and
self._state != VIR_DOMAIN_RUNNING):
self._state != VIR_DOMAIN_RUNNING):
raise libvirtError("AFFECT_LIVE only allowed for running domains!")
self.attachDevice(xml)

View File

@ -353,7 +353,7 @@ def get_my_ipv4_address():
for match in re.finditer(route, out[0]):
subnet = netaddr.IPNetwork(match.group(1) + "/" + match.group(2))
if (match.group(3) == iface and
netaddr.IPAddress(gateway) in subnet):
netaddr.IPAddress(gateway) in subnet):
try:
return _get_ipv4_address_for_interface(iface)
except exception.NovaException:
@ -681,7 +681,7 @@ def is_valid_cidr(address):
ip_segment = address.split('/')
if (len(ip_segment) <= 1 or
ip_segment[1] == ''):
ip_segment[1] == ''):
return False
return True

View File

@ -45,7 +45,7 @@ def patched_with_engine(f, *a, **kw):
# on that version or higher, this can be removed
MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3')
if (not hasattr(migrate, '__version__') or
dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
migrate_util.with_engine = patched_with_engine

View File

@ -334,7 +334,7 @@ class Tilera(base.NodeDriver):
status = row.get('task_state')
if (status == baremetal_states.DEPLOYING and
locals['started'] == False):
locals['started'] == False):
LOG.info(_('Tilera deploy started for instance %s')
% instance['uuid'])
locals['started'] = True

View File

@ -149,7 +149,7 @@ class Pdu(base.PowerManager):
def activate_node(self):
"""Turns the power to node ON."""
if (self._is_power(CONF.baremetal.tile_pdu_on)
and self.state == baremetal_states.ACTIVE):
and self.state == baremetal_states.ACTIVE):
LOG.warning(_("Activate node called, but node %s "
"is already active") % self.address)
self._power_on()

View File

@ -377,7 +377,7 @@ def get_disk_mapping(virt_type, instance,
mapping['disk.local'] = eph_info
for eph in driver.block_device_info_get_ephemerals(
block_device_info):
block_device_info):
disk_dev = block_device.strip_dev(eph['device_name'])
disk_bus = get_disk_bus_for_disk_dev(virt_type, disk_dev)

View File

@ -297,7 +297,7 @@ class LibvirtConfigCPU(LibvirtConfigObject):
if (self.sockets is not None and
self.cores is not None and
self.threads is not None):
self.threads is not None):
top = etree.Element("topology")
top.set("sockets", str(self.sockets))
top.set("cores", str(self.cores))
@ -484,7 +484,7 @@ class LibvirtConfigGuestDisk(LibvirtConfigGuestDevice):
dev.set("device", self.source_device)
if (self.driver_name is not None or
self.driver_format is not None or
self.driver_cache is not None):
self.driver_cache is not None):
drv = etree.Element("driver")
if self.driver_name is not None:
drv.set("name", self.driver_name)

View File

@ -2148,7 +2148,7 @@ class LibvirtDriver(driver.ComputeDriver):
'/dev/' + disklocal.target_dev})
for eph in driver.block_device_info_get_ephemerals(
block_device_info):
block_device_info):
diskeph = self.get_guest_disk_config(
instance,
blockinfo.get_eph_disk(eph),
@ -3809,7 +3809,7 @@ class LibvirtDriver(driver.ComputeDriver):
# then ensure we're in 'raw' format so we can extend file system.
fmt = info['type']
if (size and fmt == 'qcow2' and
disk.can_resize_fs(info['path'], size, use_cow=True)):
disk.can_resize_fs(info['path'], size, use_cow=True)):
path_raw = info['path'] + '_raw'
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'raw', info['path'], path_raw)

View File

@ -300,7 +300,7 @@ class ImageCacheManager(object):
task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH]
if instance['task_state'] in resize_states or \
instance['vm_state'] == vm_states.RESIZED:
instance['vm_state'] == vm_states.RESIZED:
self.instance_names.add(instance['name'] + '_resize')
self.instance_names.add(instance['uuid'] + '_resize')
@ -406,7 +406,7 @@ class ImageCacheManager(object):
# shared storage), then we don't need to checksum again.
if (stored_timestamp and
time.time() - stored_timestamp <
CONF.checksum_interval_seconds):
CONF.checksum_interval_seconds):
return True
# NOTE(mikal): If there is no timestamp, then the checksum was
@ -494,7 +494,7 @@ class ImageCacheManager(object):
self.unexplained_images.remove(base_file)
if (base_file and os.path.exists(base_file)
and os.path.isfile(base_file)):
and os.path.isfile(base_file)):
# _verify_checksum returns True if the checksum is ok, and None if
# there is no checksum file
checksum_result = self._verify_checksum(img_id, base_file)

View File

@ -111,7 +111,7 @@ class LibvirtBaseVIFDriver(object):
# to the global config parameter
if (model is None and
CONF.libvirt_type in ('kvm', 'qemu') and
CONF.libvirt_use_virtio_for_bridges):
CONF.libvirt_use_virtio_for_bridges):
model = "virtio"
# Workaround libvirt bug, where it mistakenly
@ -332,7 +332,7 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
network, mapping = vif
if (not network.get('multi_host') and
mapping.get('should_create_bridge')):
mapping.get('should_create_bridge')):
if mapping.get('should_create_vlan'):
iface = CONF.vlan_interface or network['bridge_interface']
LOG.debug(_('Ensuring vlan %(vlan)s and bridge %(bridge)s'),

View File

@ -153,7 +153,7 @@ class LibvirtNetVolumeDriver(LibvirtBaseVolumeDriver):
conf.source_ports = netdisk_properties.get('ports', [])
auth_enabled = netdisk_properties.get('auth_enabled')
if (conf.source_protocol == 'rbd' and
CONF.rbd_secret_uuid):
CONF.rbd_secret_uuid):
conf.auth_secret_uuid = CONF.rbd_secret_uuid
auth_enabled = True # Force authentication locally
if CONF.rbd_user:

View File

@ -81,7 +81,7 @@ def check_connection(ssh, connection):
# if the ssh client is not set or the transport is dead, re-connect
if (ssh is None or
ssh.get_transport() is None or
not ssh.get_transport().is_active()):
not ssh.get_transport().is_active()):
LOG.debug("Connection to host %s will be established." %
connection.host)
ssh = ssh_connect(connection)

View File

@ -177,7 +177,7 @@ class Vim:
# Socket errors which need special handling for they
# might be caused by ESX API call overload
if (str(excep).find(ADDRESS_IN_USE_ERROR) != -1 or
str(excep).find(CONN_ABORT_ERROR)) != -1:
str(excep).find(CONN_ABORT_ERROR)) != -1:
raise error_util.SessionOverLoadException(_("Socket "
"error in %s: ") % (attr_name), excep)
# Type error that needs special handling for it might be

View File

@ -146,7 +146,7 @@ def create_network_spec(client_factory, vif_info):
mac_address = vif_info['mac_address']
backing = None
if (network_ref and
network_ref['type'] == "DistributedVirtualPortgroup"):
network_ref['type'] == "DistributedVirtualPortgroup"):
backing_name = ''.join(['ns0:VirtualEthernetCardDistributed',
'VirtualPortBackingInfo'])
backing = client_factory.create(backing_name)
@ -280,7 +280,7 @@ def get_rdm_disk(hardware_devices, uuid):
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskRawDiskMappingVer1BackingInfo" and
device.backing.lunUuid == uuid):
device.backing.lunUuid == uuid):
return device

View File

@ -109,7 +109,7 @@ def find_st(session, data, cluster=None):
for target in adapter.target:
if (getattr(target.transport, 'address', None) and
target.transport.address[0] == target_portal and
target.transport.iScsiName == target_iqn):
target.transport.iScsiName == target_iqn):
if not target.lun:
return result
for lun in target.lun:

View File

@ -758,7 +758,7 @@ class XenAPISession(object):
LOG.debug(_("Got exception: %s"), exc)
if (len(exc.details) == 4 and
exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and
exc.details[2] == 'Failure'):
exc.details[2] == 'Failure'):
params = None
try:
# FIXME(comstud): eval is evil.

View File

@ -828,7 +828,7 @@ class SessionBase(object):
val = params[2]
if (ref in _db_content[cls] and
field in _db_content[cls][ref]):
field in _db_content[cls][ref]):
_db_content[cls][ref][field] = val
return
@ -897,7 +897,7 @@ class SessionBase(object):
def _check_session(self, params):
if (self._session is None or
self._session not in _db_content['session']):
self._session not in _db_content['session']):
raise Failure(['HANDLE_INVALID', 'session', self._session])
if len(params) == 0 or params[0] != self._session:
LOG.debug(_('Raising NotImplemented'))

View File

@ -515,7 +515,7 @@ def get_vdis_for_instance(context, session, instance, name_label, image,
for bdm in block_device_info['block_device_mapping']:
if (block_device.strip_prefix(bdm['mount_device']) ==
block_device.strip_prefix(root_device_name)):
block_device.strip_prefix(root_device_name)):
# If we're a root-device, record that fact so we don't download
# a root image via Glance
type_ = 'root'
@ -1481,7 +1481,7 @@ def set_vm_name_label(session, vm_ref, name_label):
def list_vms(session):
for vm_ref, vm_rec in session.get_all_refs_and_recs('VM'):
if (vm_rec["resident_on"] != session.get_xenapi_host() or
vm_rec["is_a_template"] or vm_rec["is_control_domain"]):
vm_rec["is_a_template"] or vm_rec["is_control_domain"]):
continue
else:
yield vm_ref, vm_rec

View File

@ -675,7 +675,7 @@ class VMOps(object):
instance=instance)
if (version and agent_build and
cmp_version(version, agent_build['version']) < 0):
cmp_version(version, agent_build['version']) < 0):
agent.agent_update(agent_build)
# if the guest agent is not available, configure the

View File

@ -245,7 +245,7 @@ def parse_volume_info(connection_data):
if (volume_id is None or
target_host is None or
target_iqn is None):
target_iqn is None):
raise StorageError(_('Unable to obtain target information'
' %s') % connection_data)
volume_info = {}
@ -254,7 +254,7 @@ def parse_volume_info(connection_data):
volume_info['port'] = target_port
volume_info['targetIQN'] = target_iqn
if ('auth_method' in connection_data and
connection_data['auth_method'] == 'CHAP'):
connection_data['auth_method'] == 'CHAP'):
volume_info['chapuser'] = connection_data['auth_username']
volume_info['chappassword'] = connection_data['auth_password']

View File

@ -74,7 +74,7 @@ class AddressTests(base.UserSmokeTestCase):
for rule in groups[0].rules:
if (rule.ip_protocol == 'tcp' and
int(rule.from_port) <= 22 and
int(rule.to_port) >= 22):
int(rule.to_port) >= 22):
ssh_authorized = True
break
if not ssh_authorized:

View File

@ -39,7 +39,7 @@ commands =
commands = {posargs}
[flake8]
ignore = E121,E122,E123,E124,E125,E126,E127,E128,E711,E712,H302,H303,H404,F403,F811,F841,N309
ignore = E121,E122,E123,E124,E126,E127,E128,E711,E712,H302,H303,H404,F403,F811,F841,N309
builtins = _
exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,plugins,tools