Fix some pylint indentation warnings

Running with a stricter .pylintrc generates a lot of
C0330 warnings (hanging/continued indentation). Fix
the ones in neutron/plugins.

Trivialfix

Change-Id: Id9138652f5f07ef12fa682e182fe210019e8f975
This commit is contained in:
Brian Haley 2022-11-03 22:27:55 -04:00
parent ba795c6692
commit b1714a2b9d
19 changed files with 201 additions and 206 deletions

View File

@ -121,7 +121,8 @@ def delete_distributed_port_binding_if_stale(context, binding):
LOG.debug("Distributed port: Deleting binding %s", binding)
context.session.delete(binding)
for bindlv in (context.session.query(models.PortBindingLevel).
filter_by(port_id=binding.port_id, host=binding.host)):
filter_by(port_id=binding.port_id,
host=binding.host)):
context.session.delete(bindlv)
LOG.debug("For port %(port_id)s, host %(host)s, "
"cleared binding levels",

View File

@ -231,8 +231,8 @@ class OpenFlowSwitchMixin(object):
(dp, ofp, ofpp) = self._get_dp()
match = self._match(ofp, ofpp, match, **match_kwargs)
if isinstance(instructions, str):
debtcollector.deprecate("Use of string instruction is "
"deprecated", removal_version='U')
debtcollector.deprecate(
"Use of string instruction is deprecated", removal_version='U')
jsonlist = ofctl_string.ofp_instruction_from_str(
ofp, instructions)
instructions = ofproto_parser.ofp_instruction_from_jsondict(

View File

@ -660,8 +660,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin,
network=network, address=str(mac))
def _add_port_to_updated_smartnic_ports(self, mac, vif_name, iface_id,
vif_type, vm_uuid='',
mtu=None):
vif_type, vm_uuid='', mtu=None):
if mtu is None:
mtu = plugin_utils.get_deployment_physnet_mtu()
self.updated_smartnic_ports.append({
@ -1324,8 +1323,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin,
if netaddr.IPNetwork(ip).version == 6}
# Allow neighbor advertisements for LLA address.
ipv6_addresses |= {str(netutils.get_ipv6_addr_by_EUI64(
n_const.IPv6_LLA_PREFIX, mac))
for mac in mac_addresses}
n_const.IPv6_LLA_PREFIX, mac)) for mac in mac_addresses}
if not has_zero_prefixlen_address(ipv6_addresses):
# Install protection only when prefix is not zero because a /0
# prefix allows any address anyway and the nd_target can only
@ -1845,10 +1843,8 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin,
for vlan_mappings in self.vlan_manager:
for lvm in vlan_mappings.values():
for port in lvm.vif_ports.values():
if (
port.port_name in port_tags and
port_tags[port.port_name] != lvm.vlan
):
if (port.port_name in port_tags and
port_tags[port.port_name] != lvm.vlan):
LOG.info(
"Port '%(port_name)s' has lost "
"its vlan tag '%(vlan_tag)d'! "
@ -1856,8 +1852,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin,
"'%(new_vlan_tag)s'.",
{'port_name': port.port_name,
'vlan_tag': lvm.vlan,
'new_vlan_tag': port_tags[port.port_name]}
)
'new_vlan_tag': port_tags[port.port_name]})
changed_ports.add(port.vif_id)
if changed_ports:
# explicitly mark these DOWN on the server since they have been
@ -2892,8 +2887,8 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin,
"in both the Agent and Server side."))
def _get_network_mtu(self, network_id):
port_network = self.plugin_rpc.get_network_details(self.context,
network_id, self.agent_id, self.conf.host)
port_network = self.plugin_rpc.get_network_details(
self.context, network_id, self.agent_id, self.conf.host)
return port_network['mtu']
def _validate_rp_pkt_processing_cfg(self):

View File

@ -642,8 +642,10 @@ class OVNMechanismDriver(api.MechanismDriver):
ipv6_opts = ', '.join(result.invalid_ipv6)
LOG.info('The following extra DHCP options for port %(port_id)s '
'are not supported by OVN. IPv4: "%(ipv4_opts)s" and '
'IPv6: "%(ipv6_opts)s"', {'port_id': port['id'],
'ipv4_opts': ipv4_opts, 'ipv6_opts': ipv6_opts})
'IPv6: "%(ipv6_opts)s"',
{'port_id': port['id'],
'ipv4_opts': ipv4_opts,
'ipv6_opts': ipv6_opts})
def create_port_precommit(self, context):
"""Allocate resources for a new port.

View File

@ -361,7 +361,8 @@ class OVNClientQosExtension(object):
updated_fip_ids.add(floatingip['id'])
for router in (qos_binding.QosPolicyRouterGatewayIPBinding.
get_routers_by_network_id(admin_context, network['id'])):
get_routers_by_network_id(admin_context,
network['id'])):
router_dict = self._plugin_l3._make_router_dict(router)
self.update_router(txn, router_dict)
updated_router_ids.add(router.id)

View File

@ -295,11 +295,11 @@ class OvsdbNbOvnIdl(nb_impl_idl.OvnNbApiIdlImpl, Backend):
"delete by lport-name"))
def get_all_stateless_fip_nats(self):
cmd = self.db_find('NAT',
cmd = self.db_find(
'NAT',
('external_ids', '!=', {ovn_const.OVN_FIP_EXT_ID_KEY: ''}),
('options', '=', {'stateless': 'true'}),
('type', '=', 'dnat_and_snat')
)
('type', '=', 'dnat_and_snat'))
return cmd.execute(check_error=True)
def get_all_logical_switches_with_ports(self):

View File

@ -432,7 +432,8 @@ class OVNClient(object):
ha_ch_grp = self._nb_idl.ha_chassis_group_get(
ha_ch_grp_name).execute(check_error=True)
txn.add(self._nb_idl.db_set(
'HA_Chassis_Group', ha_ch_grp_name, ('external_ids',
'HA_Chassis_Group', ha_ch_grp_name,
('external_ids',
{ovn_const.OVN_AZ_HINTS_EXT_ID_KEY: ','.join(az_hints)})))
# Get the chassis belonging to the AZ hints
@ -1552,8 +1553,8 @@ class OVNClient(object):
LOG.debug("Router %s not found", port['device_id'])
else:
network_ids = {port['network_id'] for port in router_ports}
for net in self._plugin.get_networks(admin_context,
filters={'id': network_ids}):
for net in self._plugin.get_networks(
admin_context, filters={'id': network_ids}):
if net['mtu'] > network['mtu']:
options[ovn_const.OVN_ROUTER_PORT_GW_MTU_OPTION] = str(
network['mtu'])
@ -1631,8 +1632,8 @@ class OVNClient(object):
cidr = subnet['cidr']
if ovn_conf.is_ovn_emit_need_to_frag_enabled():
provider_net = self._plugin.get_network(context,
router[l3.EXTERNAL_GW_INFO]['network_id'])
provider_net = self._plugin.get_network(
context, router[l3.EXTERNAL_GW_INFO]['network_id'])
self.set_gateway_mtu(context, provider_net)
if utils.is_snat_enabled(router) and cidr:
@ -1760,8 +1761,8 @@ class OVNClient(object):
if (ovn_conf.is_ovn_emit_need_to_frag_enabled() and
router.get('gw_port_id')):
provider_net = self._plugin.get_network(context,
router[l3.EXTERNAL_GW_INFO]['network_id'])
provider_net = self._plugin.get_network(
context, router[l3.EXTERNAL_GW_INFO]['network_id'])
self.set_gateway_mtu(context, provider_net, txn=txn)
cidr = None
@ -2175,8 +2176,7 @@ class OVNClient(object):
# Set lsp DHCP options
txn.add(self._nb_idl.set_lswitch_port(
lport_name=port['id'],
**columns))
lport_name=port['id'], **columns))
def _update_subnet_dhcp_options(self, subnet, network, txn):
if utils.is_dhcp_options_ignored(subnet):

View File

@ -355,7 +355,8 @@ class OvnNbSynchronizer(OvnDbSynchronizer):
continue
for ovn_fip in ovn_fips:
if (ovn_fip['logical_ip'] == db_fip['fixed_ip_address'] and
ovn_fip['external_ip'] == db_fip['floating_ip_address']):
ovn_fip['external_ip'] ==
db_fip['floating_ip_address']):
break
else:
to_add.append(db_fip)
@ -363,7 +364,8 @@ class OvnNbSynchronizer(OvnDbSynchronizer):
for ovn_fip in ovn_fips:
for db_fip in db_fips:
if (ovn_fip['logical_ip'] == db_fip['fixed_ip_address'] and
ovn_fip['external_ip'] == db_fip['floating_ip_address']):
ovn_fip['external_ip'] ==
db_fip['floating_ip_address']):
break
else:
to_remove.append(ovn_fip)

View File

@ -560,8 +560,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
port['status'] = const.PORT_STATUS_DOWN
super(Ml2Plugin, self).update_port(
mech_context._plugin_context, port['id'],
{port_def.RESOURCE_NAME:
{'status': const.PORT_STATUS_DOWN}})
{port_def.RESOURCE_NAME: {'status': const.PORT_STATUS_DOWN}})
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
self._clear_port_binding(mech_context, binding, port,
@ -1505,8 +1504,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
subnet_ids = [f['subnet_id'] for f in port['fixed_ips']]
if (db.is_dhcp_active_on_any_subnet(context, subnet_ids) and
len(self.get_dhcp_agents_hosting_networks(context,
[port['network_id']]))):
len(self.get_dhcp_agents_hosting_networks(
context, [port['network_id']]))):
# the agents will tell us when the dhcp config is ready so we setup
# a provisioning component to prevent the port from going ACTIVE
# until a dhcp_ready_on_port notification is received.
@ -1655,8 +1654,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
name=pdata.get('name'),
network_id=pdata.get('network_id'),
admin_state_up=pdata.get('admin_state_up'),
status=pdata.get('status',
const.PORT_STATUS_ACTIVE),
status=pdata.get('status', const.PORT_STATUS_ACTIVE),
device_id=pdata.get('device_id'),
device_owner=pdata.get('device_owner'),
description=pdata.get('description'))
@ -1820,8 +1818,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
filters = {'port_id': [id]}
security_groups = (
super(Ml2Plugin, self)._get_port_security_group_bindings(
context, filters)
)
context, filters))
if security_groups:
raise psec_exc.PortSecurityPortHasSecurityGroup()
@ -2567,8 +2564,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
.pop_segments_from_network(network_copy)
if event == events.PRECOMMIT_CREATE:
network_segments = [network_segment
for network_segment in network_segments
network_segments = (
[network_segment for network_segment in network_segments
# A segment popped from a network could have its
# segmentation_id set to None if the segment
# being created is partial.
@ -2578,7 +2575,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
network_segment[api.NETWORK_TYPE] ==
changed_segment[api.NETWORK_TYPE] and
network_segment[api.PHYSICAL_NETWORK] ==
changed_segment[api.PHYSICAL_NETWORK])]
changed_segment[api.PHYSICAL_NETWORK])])
elif event == events.PRECOMMIT_DELETE:
network_segments.append(changed_segment)
@ -2615,8 +2612,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
def _make_port_binding_dict(self, binding, fields=None):
res = {key: binding[key] for key in (
pbe_ext.HOST, pbe_ext.VIF_TYPE, pbe_ext.VNIC_TYPE,
pbe_ext.STATUS)}
pbe_ext.HOST, pbe_ext.VIF_TYPE, pbe_ext.VNIC_TYPE, pbe_ext.STATUS)}
if isinstance(binding, ports_obj.PortBinding):
res[pbe_ext.PROFILE] = binding.profile or {}
res[pbe_ext.VIF_DETAILS] = binding.vif_details or {}
@ -2639,8 +2635,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
port_dict['status'] = const.PORT_STATUS_DOWN
super(Ml2Plugin, self).update_port(
mech_context._plugin_context, port_dict['id'],
{port_def.RESOURCE_NAME:
{'status': const.PORT_STATUS_DOWN}})
{port_def.RESOURCE_NAME: {'status': const.PORT_STATUS_DOWN}})
self._update_port_dict_binding(port_dict,
mech_context._binding)
mech_context._binding.persist_state_to_session(
@ -2793,8 +2788,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
port_dict['status'] = const.PORT_STATUS_DOWN
super(Ml2Plugin, self).update_port(
context, port_dict['id'],
{port_def.RESOURCE_NAME:
{'status': const.PORT_STATUS_DOWN}})
{port_def.RESOURCE_NAME: {'status': const.PORT_STATUS_DOWN}})
levels = db.get_binding_level_objs(context, port_id,
inactive_binding.host)
bind_context = driver_context.PortContext(self, context, port_dict,