Fix all pep8 E265 errors

Fixed all pep8 E265 errors and changed tox.ini to no longer
ignore them.  Also removed an N536 comment missed from a
previous change.

Change-Id: Ie6db8406c3b884c95b2a54a7598ea83476b8dba1
This commit is contained in:
Brian Haley 2018-04-30 16:35:52 -04:00
parent 0aeccc5500
commit c3b83a9ca6
44 changed files with 113 additions and 114 deletions

View File

@ -285,7 +285,7 @@ class QosAgentExtension(l2_agent_extension.L2AgentExtension):
if old_qos_policy:
if self._policy_rules_modified(old_qos_policy, qos_policy):
for port in self.policy_map.get_ports(qos_policy):
#NOTE(QoS): for now, just reflush the rules on the port.
# NOTE(QoS): for now, just reflush the rules on the port.
# Later, we may want to apply the difference
# between the old and new rule lists.
self.qos_driver.delete(port, old_qos_policy)

View File

@ -124,7 +124,7 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
fixed_ip = fip['fixed_ip_address']
self._add_floating_ip_rule(floating_ip, fixed_ip)
fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id)
#Add routing rule in fip namespace
# Add routing rule in fip namespace
fip_ns_name = self.fip_ns.get_name()
if self.rtr_fip_subnet is None:
self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate(
@ -156,7 +156,7 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
table=dvr_fip_ns.FIP_RT_TBL,
priority=rule_pr)
self.fip_ns.deallocate_rule_priority(floating_ip)
#TODO(rajeev): Handle else case - exception/log?
# TODO(rajeev): Handle else case - exception/log?
def floating_ip_removed_dist(self, fip_cidr):
"""Remove floating IP from FIP namespace."""

View File

@ -150,7 +150,7 @@ class RouterInfo(object):
for del_route in removes:
if route['destination'] == del_route['destination']:
removes.remove(del_route)
#replace success even if there is no existing route
# replace success even if there is no existing route
self.update_routing_table('replace', route)
for route in removes:
LOG.debug("Removed route entry is '%s'", route)

View File

@ -491,7 +491,7 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
'-j RETURN', comment=ic.DHCP_CLIENT)]
def _drop_dhcp_rule(self, ipv4_rules, ipv6_rules):
#Note(nati) Drop dhcp packet from VM
# Note(nati) Drop dhcp packet from VM
ipv4_rules += [comment_rule('-p udp -m udp --sport 67 '
'--dport 68 '
'-j DROP', comment=ic.DHCP_SPOOF)]
@ -593,7 +593,7 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
ethertype = sg_rule.get('ethertype')
ipset_name = self.ipset.get_name(remote_gid, ethertype)
if not self.ipset.set_name_exists(ipset_name):
#NOTE(mangelajo): ipsets for empty groups are not created
# NOTE(mangelajo): ipsets for empty groups are not created
# thus we can't reference them.
return None
ipset_direction = IPSET_DIRECTION[sg_rule.get('direction')]
@ -703,7 +703,7 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
return args
def _ip_prefix_arg(self, direction, ip_prefix):
#NOTE (nati) : source_group_id is converted to list of source_
# NOTE (nati) : source_group_id is converted to list of source_
# ip_prefix in server side
if ip_prefix:
if '/' not in ip_prefix:

View File

@ -378,7 +378,7 @@ class KeepalivedManager(object):
if throttle_restart_value is not None:
self._throttle_spawn(throttle_restart_value)
#pylint: disable=method-hidden
# pylint: disable=method-hidden
def _throttle_spawn(self, threshold):
self.spawn = utils.throttler(threshold)(self.spawn)

View File

@ -112,10 +112,10 @@ class TcCommand(ip_lib.IPDevice):
for line in cmd_result.split("\n"):
m = filters_pattern.match(line.strip())
if m:
#NOTE(slaweq): because tc is giving bw limit in SI units
# NOTE(slaweq): because tc is giving bw limit in SI units
# we need to calculate it as 1000bit = 1kbit:
bw_limit = convert_to_kilobits(m.group(1), constants.SI_BASE)
#NOTE(slaweq): because tc is giving burst limit in IEC units
# NOTE(slaweq): because tc is giving burst limit in IEC units
# we need to calculate it as 1024bit = 1kbit:
burst_limit = convert_to_kilobits(
m.group(2), constants.IEC_BASE)
@ -133,10 +133,10 @@ class TcCommand(ip_lib.IPDevice):
qdisc_name = m.group(1)
if qdisc_name != "tbf":
return None, None
#NOTE(slaweq): because tc is giving bw limit in SI units
# NOTE(slaweq): because tc is giving bw limit in SI units
# we need to calculate it as 1000bit = 1kbit:
bw_limit = convert_to_kilobits(m.group(2), constants.SI_BASE)
#NOTE(slaweq): because tc is giving burst limit in IEC units
# NOTE(slaweq): because tc is giving burst limit in IEC units
# we need to calculate it as 1024bit = 1kbit:
burst_limit = convert_to_kilobits(m.group(3), constants.IEC_BASE)
return bw_limit, burst_limit
@ -148,7 +148,7 @@ class TcCommand(ip_lib.IPDevice):
means that it is fine to limit egress traffic from instance point of
view.
"""
#because replace of tc filters is not working properly and it's adding
# because replace of tc filters is not working properly and it's adding
# new filters each time instead of replacing existing one first old
# ingress qdisc should be deleted and then added new one so update will
# be called to do that:
@ -172,7 +172,7 @@ class TcCommand(ip_lib.IPDevice):
return self._replace_tbf_qdisc(bw_limit, burst_limit, latency_value)
def delete_filters_bw_limit(self):
#NOTE(slaweq): For limit traffic egress from instance we need to use
# NOTE(slaweq): For limit traffic egress from instance we need to use
# qdisc "ingress" because it is ingress traffic from interface POV:
self._delete_qdisc("ingress")
@ -220,7 +220,7 @@ class TcCommand(ip_lib.IPDevice):
self.get_ingress_qdisc_burst_value(bw_limit, burst_limit),
BURST_UNIT
)
#NOTE(slaweq): it is made in exactly same way how openvswitch is doing
# NOTE(slaweq): it is made in exactly same way how openvswitch is doing
# it when configuing ingress traffic limit on port. It can be found in
# lib/netdev-linux.c#L4698 in openvswitch sources:
cmd = [

View File

@ -227,7 +227,7 @@ def kill_process(pid, signal, run_as_root=False):
def _get_conf_base(cfg_root, uuid, ensure_conf_dir):
#TODO(mangelajo): separate responsibilities here, ensure_conf_dir
# TODO(mangelajo): separate responsibilities here, ensure_conf_dir
# should be a separate function
conf_dir = os.path.abspath(os.path.normpath(cfg_root))
conf_base = os.path.join(conf_dir, uuid)

View File

@ -13,7 +13,7 @@
from neutron.api.rpc.callbacks import resource_manager
#TODO(ajo): consider adding locking to _get_manager, it's
# TODO(ajo): consider adding locking to _get_manager, it's
# safe for eventlet, but not for normal threading.
def _get_manager():
return resource_manager.ConsumerResourceCallbacksManager()

View File

@ -236,7 +236,7 @@ class CachedResourceConsumerTracker(object):
_cached_version_tracker = None
#NOTE(ajo): add locking if we ever stop using greenthreads
# NOTE(ajo): add locking if we ever stop using greenthreads
def _get_cached_tracker():
global _cached_version_tracker
if not _cached_version_tracker:

View File

@ -640,7 +640,7 @@ def wait_until_true(predicate, timeout=60, sleep=1, exception=None):
eventlet.sleep(sleep)
except eventlet.Timeout:
if exception is not None:
#pylint: disable=raising-bad-type
# pylint: disable=raising-bad-type
raise exception
raise WaitTimeout("Timed out after %d seconds" % timeout)
@ -803,5 +803,5 @@ def bytes_to_bits(value):
def bits_to_kilobits(value, base):
#NOTE(slaweq): round up that even 1 bit will give 1 kbit as a result
# NOTE(slaweq): round up that even 1 bit will give 1 kbit as a result
return int((value + (base - 1)) / base)

View File

@ -18,7 +18,7 @@ from neutron._i18n import _
allowed_address_pair_opts = [
#TODO(limao): use quota framework when it support quota for attributes
# TODO(limao): use quota framework when it support quota for attributes
cfg.IntOpt('max_allowed_address_pair', default=10,
help=_("Maximum number of allowed address pairs")),
]

View File

@ -406,7 +406,7 @@ class AgentExtRpcCallback(object):
def __init__(self, plugin=None):
super(AgentExtRpcCallback, self).__init__()
self.plugin = plugin
#TODO(ajo): fix the resources circular dependency issue by dynamically
# TODO(ajo): fix the resources circular dependency issue by dynamically
# registering object types in the RPC callbacks api
resources_rpc = importutils.import_module(
'neutron.api.rpc.handlers.resources_rpc')

View File

@ -49,7 +49,7 @@ class ExtraRoute_dbonly_mixin(l3_db.L3_NAT_dbonly_mixin):
r = router['router']
if 'routes' in r:
with context.session.begin(subtransactions=True):
#check if route exists and have permission to access
# check if route exists and have permission to access
router_db = self._get_router(context, id)
self._update_extra_routes(context, router_db, r['routes'])
# NOTE(yamamoto): expire to ensure the following update_router
@ -59,14 +59,14 @@ class ExtraRoute_dbonly_mixin(l3_db.L3_NAT_dbonly_mixin):
context, id, router)
def _validate_routes_nexthop(self, cidrs, ips, routes, nexthop):
#Note(nati): Nexthop should be connected,
# Note(nati): Nexthop should be connected,
# so we need to check
# nexthop belongs to one of cidrs of the router ports
if not netaddr.all_matching_cidrs(nexthop, cidrs):
raise xroute_exc.InvalidRoutes(
routes=routes,
reason=_('the nexthop is not connected with router'))
#Note(nati) nexthop should not be same as fixed_ips
# Note(nati) nexthop should not be same as fixed_ips
if nexthop in ips:
raise xroute_exc.InvalidRoutes(
routes=routes,

View File

@ -558,7 +558,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
def delete_router(self, context, id):
registry.notify(resources.ROUTER, events.BEFORE_DELETE,
self, context=context, router_id=id)
#TODO(nati) Refactor here when we have router insertion model
# TODO(nati) Refactor here when we have router insertion model
router = self._ensure_router_not_in_use(context, id)
original = self._make_router_dict(router)
self._delete_current_gw_port(context, id, router, None)

View File

@ -74,7 +74,7 @@ def generate_records_for_existing():
for row in session.query(model):
# NOTE(kevinbenton): without this disabled, pylint complains
# about a missing 'dml' argument.
#pylint: disable=no-value-for-parameter
# pylint: disable=no-value-for-parameter
res = session.execute(
standardattrs.insert().values(resource_type=table))
session.execute(

View File

@ -51,7 +51,7 @@ def update_existing_records():
for row in session.query(TBL_MODEL):
# NOTE from kevinbenton: without this disabled, pylint complains
# about a missing 'dml' argument.
#pylint: disable=no-value-for-parameter
# pylint: disable=no-value-for-parameter
res = session.execute(
standardattrs.insert().values(resource_type=TBL)
)

View File

@ -71,7 +71,7 @@ def generate_records_for_existing():
for row in session.query(TABLE_MODEL):
# NOTE(kevinbenton): without this disabled, pylint complains
# about a missing 'dml' argument.
#pylint: disable=no-value-for-parameter
# pylint: disable=no-value-for-parameter
res = session.execute(
standardattrs.insert().values(resource_type=TABLE,
description=row[1])

View File

@ -649,7 +649,7 @@ def main():
CONF(project='neutron')
return_val = False
for config in get_alembic_configs():
#TODO(gongysh) enable logging
# TODO(gongysh) enable logging
return_val |= bool(CONF.command.func(config, CONF.command.name))
if CONF.command.name == 'has_offline_migrations' and not return_val:

View File

@ -104,7 +104,7 @@ class DbQuotaDriver(object):
'used': used,
'reserved': res_reserve_info.get(key, 0),
}
#update with specific tenant limits
# update with specific tenant limits
quota_objs = quota_obj.Quota.get_objects(context, project_id=tenant_id)
for item in quota_objs:
tenant_quota_ext[item['resource']]['limit'] = item['limit']

View File

@ -121,7 +121,7 @@ class HasStandardAttributes(object):
# NOTE(kevinbenton): we have to disable the following pylint check because
# it thinks we are overriding this method in the __init__ method.
#pylint: disable=method-hidden
# pylint: disable=method-hidden
@declarative.declared_attr
def standard_attr(cls):
return sa.orm.relationship(StandardAttribute,

View File

@ -38,7 +38,7 @@ COMMAND_V2 = {
'neutron.debug.commands.ExecProbe'),
'ping-all': importutils.import_class(
'neutron.debug.commands.PingAll'),
#TODO(nati) ping, netcat , nmap, bench
# TODO(nati) ping, netcat , nmap, bench
}
COMMANDS = {'2.0': COMMAND_V2}

View File

@ -50,7 +50,7 @@ class QosRule(base.NeutronDbObject):
# 1.2: Added QosMinimumBandwidthRule
# 1.3: Added direction for BandwidthLimitRule
#
#NOTE(mangelajo): versions need to be handled from the top QosRule object
# NOTE(mangelajo): versions need to be handled from the top QosRule object
# because it's the only reference QosPolicy can make
# to them via obj_relationships version map
VERSION = '1.3'

View File

@ -88,7 +88,7 @@ class CommonAgentLoop(service.Service):
configurations = {'extensions': self.ext_manager.names()}
configurations.update(self.mgr.get_agent_configurations())
#TODO(mangelajo): optimize resource_versions (see ovs agent)
# TODO(mangelajo): optimize resource_versions (see ovs agent)
self.agent_state = {
'binary': self.agent_binary,
'host': cfg.CONF.host,

View File

@ -187,10 +187,10 @@ class EmbSwitch(object):
@param rate_kbps: device rate in kbps
"""
vf_index = self._get_vf_index(pci_slot)
#NOTE(ralonsoh): ip link sets rate in Mbps therefore we need to convert
#the rate_kbps value from kbps to Mbps.
#Zero means to disable the rate so the lowest rate available is 1Mbps.
#Floating numbers are not allowed
# NOTE(ralonsoh): ip link sets rate in Mbps therefore we need to
# convert the rate_kbps value from kbps to Mbps.
# Zero means to disable the rate so the lowest rate available is 1Mbps.
# Floating numbers are not allowed
if rate_kbps > 0 and rate_kbps < 1000:
rate_mbps = 1
else:
@ -441,15 +441,15 @@ class ESwitchManager(object):
@param pci_slot: VF PCI slot
@param rate_type: rate to clear ('rate', 'min_tx_rate')
"""
#NOTE(Moshe Levi): we don't use the self._get_emb_eswitch here, because
#when clearing the VF it may be not assigned. This happens when
#libvirt releases the VF back to the hypervisor on delete VM. Therefore
#we should just clear the VF rate according to pci_slot no matter
#if VF is assigned or not.
# NOTE(Moshe Levi): we don't use the self._get_emb_eswitch here,
# because when clearing the VF it may be not assigned. This happens
# when libvirt releases the VF back to the hypervisor on delete VM.
# Therefore we should just clear the VF rate according to pci_slot no
# matter if VF is assigned or not.
embedded_switch = self.pci_slot_map.get(pci_slot)
if embedded_switch:
#NOTE(Moshe Levi): check the pci_slot is not assigned to some
#other port before resetting the rate.
# NOTE(Moshe Levi): check the pci_slot is not assigned to some
# other port before resetting the rate.
if embedded_switch.get_pci_device(pci_slot) is None:
embedded_switch.set_device_rate(pci_slot, rate_type, 0)
else:

View File

@ -134,7 +134,7 @@ class SriovNicSwitchAgent(object):
configurations = {'device_mappings': physical_devices_mappings,
'extensions': self.ext_manager.names()}
#TODO(mangelajo): optimize resource_versions (see ovs agent)
# TODO(mangelajo): optimize resource_versions (see ovs agent)
self.agent_state = {
'binary': 'neutron-sriov-nic-agent',
'host': self.conf.host,

View File

@ -105,9 +105,9 @@ class QosOVSAgentDriver(qos.QosLinuxAgentDriver):
port_name = vif_port.port_name
port = self.br_int.get_port_ofport(port_name)
mark = rule.dscp_mark
#mark needs to be bit shifted 2 left to not overwrite the
#lower 2 bits of type of service packet header.
#source: man ovs-ofctl (/mod_nw_tos)
# mark needs to be bit shifted 2 left to not overwrite the
# lower 2 bits of type of service packet header.
# source: man ovs-ofctl (/mod_nw_tos)
mark = str(mark << 2)
# reg2 is a metadata field that does not alter packets.

View File

@ -255,7 +255,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin,
self.prevent_arp_spoofing = (
not self.sg_agent.firewall.provides_arp_spoofing_protection)
#TODO(mangelajo): optimize resource_versions to only report
# TODO(mangelajo): optimize resource_versions to only report
# versions about resources which are common,
# or which are used by specific extensions.
self.agent_state = {
@ -1268,7 +1268,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin,
events['added'] = [e for e in events['added']
if e['name'] != p]
#TODO(rossella_s): scanning the ancillary bridge won't be needed
# TODO(rossella_s): scanning the ancillary bridge won't be needed
# anymore when https://review.openstack.org/#/c/203381 since the bridge
# id stored in external_ids will be used to identify the bridge the
# port belongs to

View File

@ -222,7 +222,7 @@ class DhcpFilter(base_resource_filter.BaseResourceFilter):
network is already hosted by enough number of agents.
"""
agents_per_network = cfg.CONF.dhcp_agents_per_network
#TODO(gongysh) don't schedule the networks with only
# TODO(gongysh) don't schedule the networks with only
# subnets whose enable_dhcp is false
with context.session.begin(subtransactions=True):
network_hosted_agents = plugin.get_dhcp_agents_hosting_networks(

View File

@ -170,8 +170,8 @@ class Host(fixtures.Fixture):
self.ovs_agent.agent_cfg_fixture.get_br_int_name()))
def setup_host_with_linuxbridge_agent(self):
#First we need to provide connectivity for agent to prepare proper
#bridge mappings in agent's config:
# First we need to provide connectivity for agent to prepare proper
# bridge mappings in agent's config:
self.host_namespace = self.useFixture(
net_helpers.NamespaceFixture(prefix="host-")
).name
@ -365,13 +365,13 @@ class Environment(fixtures.Fixture):
def _setUp(self):
self.temp_dir = self.useFixture(fixtures.TempDir()).path
#we need this bridge before rabbit and neutron service will start
# we need this bridge before rabbit and neutron service will start
self.central_data_bridge = self.useFixture(
net_helpers.OVSBridgeFixture('cnt-data')).bridge
self.central_external_bridge = self.useFixture(
net_helpers.OVSBridgeFixture('cnt-ex')).bridge
#Get rabbitmq address (and cnt-data network)
# Get rabbitmq address (and cnt-data network)
rabbitmq_ip_address = self._configure_port_for_rabbitmq()
self.rabbitmq_environment = self.useFixture(
process.RabbitmqEnvironmentFixture(host=rabbitmq_ip_address)
@ -406,7 +406,7 @@ class Environment(fixtures.Fixture):
return rabbitmq_ip
def _get_network_range(self):
#NOTE(slaweq): We need to choose IP address on which rabbitmq will be
# NOTE(slaweq): We need to choose IP address on which rabbitmq will be
# available because LinuxBridge agents are spawned in their own
# namespaces and need to know where the rabbitmq server is listening.
# For ovs agent it is not necessary because agents are spawned in

View File

@ -63,7 +63,7 @@ class TestOVSAgent(base.OVSAgentTestFramework):
ofports = [port.ofport for port in self.agent.int_br.get_vif_ports()
if port.port_name in portnames]
#wait until ports are marked dead, with drop flow
# wait until ports are marked dead, with drop flow
utils.wait_until_true(
lambda: num_ports_with_drop_flows(
ofports,
@ -71,12 +71,12 @@ class TestOVSAgent(base.OVSAgentTestFramework):
constants.LOCAL_SWITCHING
)) == len(ofports))
#delete the ports on bridge
# delete the ports on bridge
for port in self.ports:
self.agent.int_br.delete_port(port['vif_name'])
self.wait_until_ports_state(self.ports, up=False)
#verify no stale drop flows
# verify no stale drop flows
self.assertEqual(0,
num_ports_with_drop_flows(
ofports,

View File

@ -399,7 +399,7 @@ class TestSanityCheck(testlib_api.SqlTestCaseLight):
self.addCleanup(self._drop_table, ha_router_agent_port_bindings)
# NOTE(haleyb): without this disabled, pylint complains
# about a missing 'dml' argument.
#pylint: disable=no-value-for-parameter
# pylint: disable=no-value-for-parameter
conn.execute(ha_router_agent_port_bindings.insert(), [
{'port_id': '1234', 'router_id': '12345',
'l3_agent_id': '123'},
@ -424,7 +424,7 @@ class TestSanityCheck(testlib_api.SqlTestCaseLight):
self.addCleanup(self._drop_table, routerports)
# NOTE(haleyb): without this disabled, pylint complains
# about a missing 'dml' argument.
#pylint: disable=no-value-for-parameter
# pylint: disable=no-value-for-parameter
conn.execute(routerports.insert(), [
{'router_id': '1234', 'port_id': '12345',
'port_type': '123'},
@ -449,7 +449,7 @@ class TestSanityCheck(testlib_api.SqlTestCaseLight):
self.addCleanup(self._drop_table, floatingips)
# NOTE(haleyb): without this disabled, pylint complains
# about a missing 'dml' argument.
#pylint: disable=no-value-for-parameter
# pylint: disable=no-value-for-parameter
conn.execute(floatingips.insert(), [
{'floating_network_id': '12345',
'fixed_port_id': '1234567',
@ -476,7 +476,7 @@ class TestSanityCheck(testlib_api.SqlTestCaseLight):
self.addCleanup(self._drop_table, floatingips)
# NOTE(haleyb): without this disabled, pylint complains
# about a missing 'dml' argument.
#pylint: disable=no-value-for-parameter
# pylint: disable=no-value-for-parameter
conn.execute(floatingips.insert(), [
{'floating_network_id': '12345',
'fixed_port_id': '1234567',

View File

@ -34,12 +34,12 @@ from neutron.tests.unit.api.v2 import test_base
_uuid = test_base._uuid
#TODO(mangelajo): replace all 'IPv4', 'IPv6' to constants
# TODO(mangelajo): replace all 'IPv4', 'IPv6' to constants
FAKE_PREFIX = {'IPv4': '10.0.0.0/24',
'IPv6': 'fe80::/48'}
FAKE_IP = {'IPv4': '10.0.0.1',
'IPv6': 'fe80::1'}
#TODO(mangelajo): replace all '*_sgid' strings for the constants
# TODO(mangelajo): replace all '*_sgid' strings for the constants
FAKE_SGID = 'fake_sgid'
OTHER_SGID = 'other_sgid'
_IPv6 = constants.IPv6

View File

@ -1363,7 +1363,7 @@ class SecurityGroupAgentRpcApiTestCase(base.BaseTestCase):
None, security_groups=[])
self.assertFalse(self.mock_cast.called)
#Note(nati) bn -> binary_name
# Note(nati) bn -> binary_name
# id -> device_id
PHYSDEV_MOD = '-m physdev'
@ -3217,7 +3217,7 @@ class TestSecurityGroupAgentWithOVSIptables(
self._verify_mock_calls()
def _regex(self, value):
#Note(nati): tap is prefixed on the device
# Note(nati): tap is prefixed on the device
# in the OVSHybridIptablesFirewallDriver
value = value.replace('tap_port', 'taptap_port')

View File

@ -1052,7 +1052,7 @@ class TestPortsV2(NeutronDbPluginV2TestCase):
def test_create_ports_bulk_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
# ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
@ -1092,7 +1092,7 @@ class TestPortsV2(NeutronDbPluginV2TestCase):
def test_create_ports_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
# ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
@ -2827,7 +2827,7 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
def test_create_networks_bulk_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
# ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
@ -2858,7 +2858,7 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
return real_has_attr(item, attr)
orig = directory.get_plugin().create_network
#ensures the API choose the emulation code path
# ensures the API choose the emulation code path
with mock.patch('six.moves.builtins.hasattr',
new=fakehasattr):
method_to_patch = _get_create_db_method('network')
@ -3307,7 +3307,7 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
def test_create_subnets_bulk_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
# ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
@ -3324,7 +3324,7 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
def test_create_subnets_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
# ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
@ -4767,12 +4767,12 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
data['subnet']['gateway_ip'] = '192.168.0.9'
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
#check res code and contents
# check res code and contents
res = req.get_response(self.api)
self.assertEqual(200, res.status_code)
self._verify_updated_subnet_allocation_pools(res,
with_gateway_ip)
#GET subnet to verify DB updated correctly
# GET subnet to verify DB updated correctly
req = self.new_show_request('subnets', subnet['subnet']['id'],
self.fmt)
res = req.get_response(self.api)
@ -4785,7 +4785,7 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
def test_update_subnet_allocation_pools_and_gateway_ip(self):
self._test_update_subnet_allocation_pools(with_gateway_ip=True)
#updating alloc pool to something outside subnet.cidr
# updating alloc pool to something outside subnet.cidr
def test_update_subnet_allocation_pools_invalid_pool_for_cidr(self):
"""Test update alloc pool to something outside subnet.cidr.
@ -4805,7 +4805,7 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
self.assertEqual(webob.exc.HTTPClientError.code,
res.status_int)
#updating alloc pool on top of existing subnet.gateway_ip
# updating alloc pool on top of existing subnet.gateway_ip
def test_update_subnet_allocation_pools_over_gateway_ip_returns_409(self):
allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}]
with self.network() as network:

View File

@ -77,7 +77,7 @@ class Foxinsocks(api_extensions.ExtensionDescriptor):
request_exts = []
def _goose_handler(req, res):
#NOTE: This only handles JSON responses.
# NOTE: This only handles JSON responses.
# You can use content type header to test for XML.
data = jsonutils.loads(res.body)
data['FOXNSOX:googoose'] = req.GET.get('chewing')
@ -89,7 +89,7 @@ class Foxinsocks(api_extensions.ExtensionDescriptor):
request_exts.append(req_ext1)
def _bands_handler(req, res):
#NOTE: This only handles JSON responses.
# NOTE: This only handles JSON responses.
# You can use content type header to test for XML.
data = jsonutils.loads(res.body)
data['FOXNSOX:big_bands'] = 'Pig Bands!'

View File

@ -1929,7 +1929,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
None,
None,
exc.HTTPBadRequest.code)
#remove properly to clean-up
# remove properly to clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
@ -3532,7 +3532,7 @@ class L3AgentDbTestCaseBase(L3NatTestCaseMixin):
self.assertIn(router1_id, device_list)
self.assertIn(router2_id, device_list)
#Verify if no router pass in, return empty list
# Verify if no router pass in, return empty list
ifaces = self.plugin._get_sync_interfaces(admin_ctx, None)
self.assertEqual(0, len(ifaces))

View File

@ -1546,7 +1546,7 @@ class TestSecurityGroups(SecurityGroupDBTestCase):
def test_create_security_group_rule_bulk_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
# ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
@ -1617,7 +1617,7 @@ class TestSecurityGroups(SecurityGroupDBTestCase):
def test_create_security_group_rule_duplicate_rule_in_post_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
# ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
@ -1654,7 +1654,7 @@ class TestSecurityGroups(SecurityGroupDBTestCase):
def test_create_security_group_rule_duplicate_rule_db_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
# ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False

View File

@ -157,7 +157,7 @@ class TimeStampChangedsinceTestCase(test_db_base_plugin_v2.
self.assertIn(new_second[resource_type]['id'],
[n['id'] for n in resources[resource_type + 's']])
#test first < second < changed_since
# test first < second < changed_since
resources = self._return_by_timedelay(new_second, 3)
self.assertEqual({resource_type + 's': []}, resources)

View File

@ -20,7 +20,7 @@ class AllowedAddrPairsIfaceObjTestCase(obj_test_base.BaseObjectIfaceTestCase):
_test_class = allowedaddresspairs.AllowedAddressPair
#TODO(mhickey): Add common base db test class specifically for port extensions
# TODO(mhickey): Add common base db test class specifically for port extensions
class AllowedAddrPairsDbObjTestCase(obj_test_base.BaseDbObjectTestCase,
testlib_api.SqlTestCase):

View File

@ -446,7 +446,7 @@ class QosPolicyDbObjectTestCase(test_base.BaseDbObjectTestCase,
policy_obj.obj_to_primitive('1.2')
def test_object_version_degradation_to_1_0(self):
#NOTE(mangelajo): we should not check .VERSION, since that's the
# NOTE(mangelajo): we should not check .VERSION, since that's the
# local version on the class definition
policy_obj, rule_objs = self._create_test_policy_with_rules(
[qos_consts.RULE_TYPE_BANDWIDTH_LIMIT,
@ -461,7 +461,7 @@ class QosPolicyDbObjectTestCase(test_base.BaseDbObjectTestCase,
self.assertNotIn(rule_objs[2], policy_obj_v1_0.rules)
def test_object_version_degradation_1_2_to_1_1(self):
#NOTE(mangelajo): we should not check .VERSION, since that's the
# NOTE(mangelajo): we should not check .VERSION, since that's the
# local version on the class definition
policy_obj, rule_objs = self._create_test_policy_with_rules(
[qos_consts.RULE_TYPE_BANDWIDTH_LIMIT,
@ -476,7 +476,7 @@ class QosPolicyDbObjectTestCase(test_base.BaseDbObjectTestCase,
self.assertNotIn(rule_objs[2], policy_obj_v1_1.rules)
def test_object_version_degradation_1_3_to_1_2(self):
#NOTE(mangelajo): we should not check .VERSION, since that's the
# NOTE(mangelajo): we should not check .VERSION, since that's the
# local version on the class definition
policy_obj, rule_objs = self._create_test_policy_with_rules(
[qos_consts.RULE_TYPE_BANDWIDTH_LIMIT,

View File

@ -554,7 +554,7 @@ class TestCommonAgentLoop(base.BaseTestCase):
self.agent.network_ports[NETWORK_ID].append(
port_2_data
)
#check update port:
# check update port:
self.agent._update_network_ports(
NETWORK_2_ID, port_2_data['port_id'], port_2_data['device']
)
@ -573,13 +573,13 @@ class TestCommonAgentLoop(base.BaseTestCase):
self.agent.network_ports[NETWORK_ID].append(
port_2_data
)
#check removing port from network when other ports are still there:
# check removing port from network when other ports are still there:
cleaned_port_id = self.agent._clean_network_ports(DEVICE_1)
self.assertIn(NETWORK_ID, self.agent.network_ports.keys())
self.assertNotIn(port_1_data, self.agent.network_ports[NETWORK_ID])
self.assertIn(port_2_data, self.agent.network_ports[NETWORK_ID])
self.assertEqual(PORT_1, cleaned_port_id)
#and now remove last port from network:
# and now remove last port from network:
cleaned_port_id = self.agent._clean_network_ports(
port_2_data['device']
)

View File

@ -147,7 +147,7 @@ class TestMeteringOperations(base.BaseTestCase):
if len(fake_notifier.NOTIFICATIONS) > 1:
for n in fake_notifier.NOTIFICATIONS:
if n['event_type'] == 'l3.meter':
#skip the first notification because the time is 0
# skip the first notification because the time is 0
count += 1
if count > 1:
break

View File

@ -242,13 +242,13 @@ class Request(wsgi.Request):
if _format in ['json']:
return 'application/{0}'.format(_format)
#Then look up content header
# Then look up content header
type_from_header = self.get_content_type()
if type_from_header:
return type_from_header
ctypes = ['application/json']
#Finally search in Accept-* headers
# Finally search in Accept-* headers
bm = self.accept.best_match(ctypes)
return bm or 'application/json'
@ -629,7 +629,7 @@ class Resource(Application):
controller_method = getattr(self.controller, action)
try:
#NOTE(salvatore-orlando): the controller method must have
# NOTE(salvatore-orlando): the controller method must have
# an argument whose name is 'request'
return controller_method(request=request, **action_args)
except TypeError:

View File

@ -138,15 +138,14 @@ commands = sphinx-build -W -b linkcheck doc/source doc/build/linkcheck
# E126 continuation line over-indented for hanging indent
# E128 continuation line under-indented for visual indent
# E129 visually indented line with same indent as next logical line
# E265 block comment should start with '# '
# H404 multi line docstring should start with a summary
# H405 multi line docstring summary not separated with an empty line
# N530 direct neutron imports not allowed
# TODO(ihrachys) figure out what to do with N534 and N536
# TODO(ihrachys) figure out what to do with N534
# N534 Untranslated exception message
# TODO(amotoki) check the following new rules should be fixed or ignored
# E731 do not assign a lambda expression, use a def
ignore = E125,E126,E128,E129,E265,E731,H404,H405,N530,N534
ignore = E125,E126,E128,E129,E731,H404,H405,N530,N534
# H106: Don't put vim configuration in source files
# H203: Use assertIs(Not)None to check for None
# H204: Use assert(Not)Equal to check for equality