Fix some pylint indentation warnings

Running with a stricter .pylintrc generates a lot of
C0330 warnings (hanging/continued indentation). Fix
the ones in neutron/plugins.

Trivialfix

Change-Id: Id9138652f5f07ef12fa682e182fe210019e8f975
This commit is contained in:
Brian Haley 2022-11-03 22:27:55 -04:00
parent ba795c6692
commit b1714a2b9d
19 changed files with 201 additions and 206 deletions

View File

@ -121,7 +121,8 @@ def delete_distributed_port_binding_if_stale(context, binding):
LOG.debug("Distributed port: Deleting binding %s", binding)
context.session.delete(binding)
for bindlv in (context.session.query(models.PortBindingLevel).
filter_by(port_id=binding.port_id, host=binding.host)):
filter_by(port_id=binding.port_id,
host=binding.host)):
context.session.delete(bindlv)
LOG.debug("For port %(port_id)s, host %(host)s, "
"cleared binding levels",
@ -271,7 +272,7 @@ def get_distributed_port_bindings(context, port_id):
with db_api.CONTEXT_READER.using(context):
bindings = (context.session.query(models.DistributedPortBinding).
filter(models.DistributedPortBinding.port_id.startswith(
port_id)).all())
port_id)).all())
if not bindings:
LOG.debug("No bindings for distributed port %s", port_id)
return bindings
@ -345,7 +346,7 @@ def _prevent_segment_delete_with_port_bound(resource, event, trigger,
if auto_delete_port_ids:
LOG.debug("Auto-deleting dhcp port(s) on segment %s: %s",
payload.resource_id, ", ".join(auto_delete_port_ids))
payload.resource_id, ", ".join(auto_delete_port_ids))
plugin = directory.get_plugin()
for port_id in auto_delete_port_ids:
try:

View File

@ -323,8 +323,8 @@ class PortContext(MechanismDriverContext, api.PortContext):
network_id = self._network_context.current['id']
return self._plugin.type_manager.allocate_dynamic_segment(
self._plugin_context, network_id, segment)
self._plugin_context, network_id, segment)
def release_dynamic_segment(self, segment_id):
return self._plugin.type_manager.release_dynamic_segment(
self._plugin_context, segment_id)
self._plugin_context, segment_id)

View File

@ -263,8 +263,8 @@ class L2populationMechanismDriver(api.MechanismDriver):
port_context, context.bottom_bound_segment, port, agent_host,
include_ha_router_ports=True)
if (fdb_entries and
l3plugin.list_router_ids_on_host(
admin_context, agent_host, [port['device_id']])):
l3plugin.list_router_ids_on_host(
admin_context, agent_host, [port['device_id']])):
# NOTE(slaweq): in case this is HA router, remove unicast
# entries to this port but don't remove flood entry
self._remove_flooding(fdb_entries)
@ -316,8 +316,8 @@ class L2populationMechanismDriver(api.MechanismDriver):
# Notify other agents to add fdb rule for current port
if (port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE and
not l3_hamode_db.is_ha_router_port(
port_context, port['device_owner'], port['device_id'])):
not l3_hamode_db.is_ha_router_port(
port_context, port['device_owner'], port['device_id'])):
other_fdb_ports[agent_ip] += self._get_port_fdb_entries(port)
self.L2populationAgentNotify.add_fdb_entries(self.rpc_ctx,
@ -352,10 +352,10 @@ class L2populationMechanismDriver(api.MechanismDriver):
const.FLOODING_ENTRY)
# Notify other agents to remove fdb rules for current port
if (port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE and
(include_ha_router_ports or
not l3_hamode_db.is_ha_router_port(context,
port['device_owner'],
port['device_id']))):
(include_ha_router_ports or
not l3_hamode_db.is_ha_router_port(context,
port['device_owner'],
port['device_id']))):
fdb_entries = self._get_port_fdb_entries(port)
other_fdb_entries[network_id]['ports'][agent_ip] += fdb_entries

View File

@ -276,8 +276,8 @@ class AgentMechanismDriverBase(api.MechanismDriver, metaclass=abc.ABCMeta):
"host %(host)s reports being responsible for resource "
"provider %(rsc_provider)s: %(agents)s",
{'host': context.current['binding:host_id'],
'rsc_provider': allocation[group],
'agents': [agent['id'] for agent in agents]})
'rsc_provider': allocation[group],
'agents': [agent['id'] for agent in agents]})
return False
else:
# not responsible, must be somebody else

View File

@ -132,7 +132,7 @@ class PciOsWrapper(object):
with open(cls.NUMVFS_PATH % dev_name) as f:
numvfs = int(f.read())
LOG.debug("Number of VFs configured on device %s: %s",
dev_name, numvfs)
dev_name, numvfs)
return numvfs
except IOError:
LOG.warning("Error reading sriov_numvfs file for device %s, "

View File

@ -231,8 +231,8 @@ class OpenFlowSwitchMixin(object):
(dp, ofp, ofpp) = self._get_dp()
match = self._match(ofp, ofpp, match, **match_kwargs)
if isinstance(instructions, str):
debtcollector.deprecate("Use of string instruction is "
"deprecated", removal_version='U')
debtcollector.deprecate(
"Use of string instruction is deprecated", removal_version='U')
jsonlist = ofctl_string.ofp_instruction_from_str(
ofp, instructions)
instructions = ofproto_parser.ofp_instruction_from_jsondict(

View File

@ -307,7 +307,7 @@ class OVSDVRNeutronAgent(object):
def _add_arp_dvr_mac_for_phys_br(self, physical_network, mac):
self.int_br.add_dvr_gateway_mac_arp_vlan(
mac=mac, port=self.int_ofports[physical_network])
mac=mac, port=self.int_ofports[physical_network])
def _remove_dvr_mac_for_phys_br(self, physical_network, mac):
# REVISIT(yamamoto): match in_port as well?
@ -322,7 +322,7 @@ class OVSDVRNeutronAgent(object):
def _add_arp_dvr_mac_for_tun_br(self, mac):
self.int_br.add_dvr_gateway_mac_arp_tun(
mac=mac, port=self.patch_tun_ofport)
mac=mac, port=self.patch_tun_ofport)
def _remove_dvr_mac_for_tun_br(self, mac):
self.int_br.remove_dvr_mac_tun(mac=mac, port=self.patch_tun_ofport)

View File

@ -660,8 +660,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin,
network=network, address=str(mac))
def _add_port_to_updated_smartnic_ports(self, mac, vif_name, iface_id,
vif_type, vm_uuid='',
mtu=None):
vif_type, vm_uuid='', mtu=None):
if mtu is None:
mtu = plugin_utils.get_deployment_physnet_mtu()
self.updated_smartnic_ports.append({
@ -815,10 +814,10 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin,
def _process_removed_ports(removed_ports):
for ovs_port in removed_ports:
self._add_port_to_updated_smartnic_ports(
ovs_port['vif_mac'],
ovs_port['vif_name'],
ovs_port['vif_id'],
portbindings.VIF_TYPE_UNBOUND)
ovs_port['vif_mac'],
ovs_port['vif_name'],
ovs_port['vif_id'],
portbindings.VIF_TYPE_UNBOUND)
_process_removed_ports(removed_ports)
_process_added_ports(added_ports)
@ -1324,8 +1323,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin,
if netaddr.IPNetwork(ip).version == 6}
# Allow neighbor advertisements for LLA address.
ipv6_addresses |= {str(netutils.get_ipv6_addr_by_EUI64(
n_const.IPv6_LLA_PREFIX, mac))
for mac in mac_addresses}
n_const.IPv6_LLA_PREFIX, mac)) for mac in mac_addresses}
if not has_zero_prefixlen_address(ipv6_addresses):
# Install protection only when prefix is not zero because a /0
# prefix allows any address anyway and the nd_target can only
@ -1845,10 +1843,8 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin,
for vlan_mappings in self.vlan_manager:
for lvm in vlan_mappings.values():
for port in lvm.vif_ports.values():
if (
port.port_name in port_tags and
port_tags[port.port_name] != lvm.vlan
):
if (port.port_name in port_tags and
port_tags[port.port_name] != lvm.vlan):
LOG.info(
"Port '%(port_name)s' has lost "
"its vlan tag '%(vlan_tag)d'! "
@ -1856,8 +1852,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin,
"'%(new_vlan_tag)s'.",
{'port_name': port.port_name,
'vlan_tag': lvm.vlan,
'new_vlan_tag': port_tags[port.port_name]}
)
'new_vlan_tag': port_tags[port.port_name]})
changed_ports.add(port.vif_id)
if changed_ports:
# explicitly mark these DOWN on the server since they have been
@ -2184,8 +2179,8 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin,
(skipped_devices, binding_no_activated_devices,
need_binding_devices, failed_devices['added'],
devices_not_in_datapath, migrating_devices) = (
self.treat_devices_added_or_updated(
devices_added_updated, provisioning_needed, re_added))
self.treat_devices_added_or_updated(
devices_added_updated, provisioning_needed, re_added))
LOG.info("process_network_ports - iteration:%(iter_num)d - "
"treat_devices_added_or_updated completed. "
"Skipped %(num_skipped)d and no activated binding "
@ -2638,8 +2633,8 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin,
failed_devices_retries_map):
(new_failed_devices_retries_map, devices_not_to_retry,
ancillary_devices_not_to_retry) = self._get_devices_not_to_retry(
failed_devices, failed_ancillary_devices,
failed_devices_retries_map)
failed_devices, failed_ancillary_devices,
failed_devices_retries_map)
self._remove_devices_not_to_retry(
failed_devices, failed_ancillary_devices, devices_not_to_retry,
ancillary_devices_not_to_retry)
@ -2761,10 +2756,10 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin,
self.activated_bindings = set()
(port_info, ancillary_port_info, consecutive_resyncs,
ports_not_ready_yet) = (self.process_port_info(
start, polling_manager, sync, ovs_restarted,
ports, ancillary_ports, updated_ports_copy,
consecutive_resyncs, ports_not_ready_yet,
failed_devices, failed_ancillary_devices))
start, polling_manager, sync, ovs_restarted,
ports, ancillary_ports, updated_ports_copy,
consecutive_resyncs, ports_not_ready_yet,
failed_devices, failed_ancillary_devices))
sync = False
self.process_deleted_ports(port_info)
self.process_deactivated_bindings(port_info)
@ -2787,7 +2782,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin,
LOG.debug("Starting to process devices in:%s",
port_info)
provisioning_needed = (
ovs_restarted or bridges_recreated)
ovs_restarted or bridges_recreated)
failed_devices = self.process_network_ports(
port_info, provisioning_needed)
LOG.info("Agent rpc_loop - iteration:%(iter_num)d - "
@ -2892,16 +2887,16 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin,
"in both the Agent and Server side."))
def _get_network_mtu(self, network_id):
port_network = self.plugin_rpc.get_network_details(self.context,
network_id, self.agent_id, self.conf.host)
port_network = self.plugin_rpc.get_network_details(
self.context, network_id, self.agent_id, self.conf.host)
return port_network['mtu']
def _validate_rp_pkt_processing_cfg(self):
if self.rp_pp_with_direction and self.rp_pp_without_direction:
raise ValueError(_(
'%s and %s configuration options are mutually exclusive.') %
(n_const.RP_PP_WITHOUT_DIRECTION,
n_const.RP_PP_WITH_DIRECTION))
(n_const.RP_PP_WITHOUT_DIRECTION,
n_const.RP_PP_WITH_DIRECTION))
def validate_local_ip(local_ip):

View File

@ -132,8 +132,8 @@ class OVNMechanismDriver(api.MechanismDriver):
OVN_MIN_GENEVE_MAX_HEADER_SIZE):
LOG.critical('Geneve max_header_size set too low for OVN '
'(%d vs %d)',
cfg.CONF.ml2_type_geneve.max_header_size,
OVN_MIN_GENEVE_MAX_HEADER_SIZE)
cfg.CONF.ml2_type_geneve.max_header_size,
OVN_MIN_GENEVE_MAX_HEADER_SIZE)
raise SystemExit(1)
self._setup_vif_port_bindings()
if impl_idl_ovn.OvsdbSbOvnIdl.schema_has_table('Chassis_Private'):
@ -642,8 +642,10 @@ class OVNMechanismDriver(api.MechanismDriver):
ipv6_opts = ', '.join(result.invalid_ipv6)
LOG.info('The following extra DHCP options for port %(port_id)s '
'are not supported by OVN. IPv4: "%(ipv4_opts)s" and '
'IPv6: "%(ipv6_opts)s"', {'port_id': port['id'],
'ipv4_opts': ipv4_opts, 'ipv6_opts': ipv6_opts})
'IPv6: "%(ipv6_opts)s"',
{'port_id': port['id'],
'ipv4_opts': ipv4_opts,
'ipv6_opts': ipv6_opts})
def create_port_precommit(self, context):
"""Allocate resources for a new port.
@ -961,7 +963,7 @@ class OVNMechanismDriver(api.MechanismDriver):
if not agents:
LOG.warning('Refusing to bind port %(port_id)s due to '
'no OVN chassis for host: %(host)s',
{'port_id': port['id'], 'host': bind_host})
{'port_id': port['id'], 'host': bind_host})
return
agent = agents[0]
if not agent.alive:
@ -1065,8 +1067,8 @@ class OVNMechanismDriver(api.MechanismDriver):
if not nat['external_ids'].get(ovn_const.OVN_FIP_EXT_MAC_KEY):
self.nb_ovn.db_set('NAT', nat['_uuid'],
('external_ids',
{ovn_const.OVN_FIP_EXT_MAC_KEY:
nat['external_mac']})).execute()
{ovn_const.OVN_FIP_EXT_MAC_KEY:
nat['external_mac']})).execute()
if up and ovn_conf.is_ovn_distributed_floating_ip():
mac = nat['external_ids'][ovn_const.OVN_FIP_EXT_MAC_KEY]
@ -1075,13 +1077,13 @@ class OVNMechanismDriver(api.MechanismDriver):
port_id, mac)
self.nb_ovn.db_set(
'NAT', nat['_uuid'], ('external_mac', mac)).execute(
check_error=True)
check_error=True)
else:
if nat['external_mac']:
LOG.debug("Clearing up external_mac of port %s", port_id)
self.nb_ovn.db_clear(
'NAT', nat['_uuid'], 'external_mac').execute(
check_error=True)
check_error=True)
def _should_notify_nova(self, db_port):
# NOTE(twilson) It is possible for a test to override a config option

View File

@ -361,7 +361,8 @@ class OVNClientQosExtension(object):
updated_fip_ids.add(floatingip['id'])
for router in (qos_binding.QosPolicyRouterGatewayIPBinding.
get_routers_by_network_id(admin_context, network['id'])):
get_routers_by_network_id(admin_context,
network['id'])):
router_dict = self._plugin_l3._make_router_dict(router)
self.update_router(txn, router_dict)
updated_router_ids.add(router.id)

View File

@ -295,11 +295,11 @@ class OvsdbNbOvnIdl(nb_impl_idl.OvnNbApiIdlImpl, Backend):
"delete by lport-name"))
def get_all_stateless_fip_nats(self):
cmd = self.db_find('NAT',
cmd = self.db_find(
'NAT',
('external_ids', '!=', {ovn_const.OVN_FIP_EXT_ID_KEY: ''}),
('options', '=', {'stateless': 'true'}),
('type', '=', 'dnat_and_snat')
)
('type', '=', 'dnat_and_snat'))
return cmd.execute(check_error=True)
def get_all_logical_switches_with_ports(self):
@ -730,7 +730,7 @@ class OvsdbNbOvnIdl(nb_impl_idl.OvnNbApiIdlImpl, Backend):
rc = self.db_find_rows('Load_Balancer', (
'external_ids', '=',
{ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY:
pf_const.PORT_FORWARDING_PLUGIN,
pf_const.PORT_FORWARDING_PLUGIN,
ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: lrouter_name}))
return [ovn_obj for ovn_obj in rc.execute(check_error=True)
if ovn_const.OVN_FIP_EXT_ID_KEY in ovn_obj.external_ids]
@ -742,7 +742,7 @@ class OvsdbNbOvnIdl(nb_impl_idl.OvnNbApiIdlImpl, Backend):
result = self.db_find('Load_Balancer', (
'external_ids', '=',
{ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY:
pf_const.PORT_FORWARDING_PLUGIN,
pf_const.PORT_FORWARDING_PLUGIN,
ovn_const.OVN_FIP_EXT_ID_KEY: fip_id})).execute(check_error=True)
return result[0] if result else None
@ -758,8 +758,8 @@ class OvsdbNbOvnIdl(nb_impl_idl.OvnNbApiIdlImpl, Backend):
for nat in self.get_lrouter_nat_rules(utils.ovn_name(router_id)):
if (nat['type'] == 'dnat_and_snat' and
nat['logical_ip'] == logical_ip and
nat['external_ip'] == external_ip):
nat['logical_ip'] == logical_ip and
nat['external_ip'] == external_ip):
return nat
def check_revision_number(self, name, resource, resource_type,
@ -803,7 +803,7 @@ class OvsdbNbOvnIdl(nb_impl_idl.OvnNbApiIdlImpl, Backend):
for row in self._tables['Port_Group'].rows.values():
name = getattr(row, 'name')
if not (ovn_const.OVN_SG_EXT_ID_KEY in row.external_ids or
name == ovn_const.OVN_DROP_PORT_GROUP_NAME):
name == ovn_const.OVN_DROP_PORT_GROUP_NAME):
continue
data = {}
for row_key in getattr(row, "_data", {}):
@ -890,8 +890,8 @@ class OvsdbSbOvnIdl(sb_impl_idl.OvnSbApiIdlImpl, Backend):
card_serial_number):
for ch in self.chassis_list().execute(check_error=True):
if ('{}={}'
.format(ovn_const.CMS_OPT_CARD_SERIAL_NUMBER,
card_serial_number)
.format(ovn_const.CMS_OPT_CARD_SERIAL_NUMBER,
card_serial_number)
in utils.get_ovn_chassis_other_config(ch).get(
ovn_const.OVN_CMS_OPTIONS, '').split(',')):
return ch

View File

@ -778,10 +778,10 @@ class DBInconsistenciesPeriodics(SchemaAwarePeriodicsBase):
self._ovn_client.update_port_dhcp_options(
port_info, txn))
txn.add(self._nb_idl.set_lswitch_port(
lport_name=port['id'],
dhcpv4_options=dhcpv4_options,
dhcpv6_options=dhcpv6_options,
if_exists=False))
lport_name=port['id'],
dhcpv4_options=dhcpv4_options,
dhcpv6_options=dhcpv6_options,
if_exists=False))
raise periodics.NeverAgain()

View File

@ -288,7 +288,7 @@ class OVNClient(object):
if 'subnet_id' in ip
]
subnets = self._plugin.get_subnets(
context, filters={'id': subnet_ids})
context, filters={'id': subnet_ids})
if subnets:
for ip in ip_subnets:
ip_addr = ip['ip_address']
@ -380,7 +380,7 @@ class OVNClient(object):
'additional_chassis'):
mdst = port.get(
portbindings.PROFILE, {}).get(
ovn_const.MIGRATING_ATTR)
ovn_const.MIGRATING_ATTR)
if mdst:
# Let OVN know that the port should be configured on
# destination too
@ -432,8 +432,9 @@ class OVNClient(object):
ha_ch_grp = self._nb_idl.ha_chassis_group_get(
ha_ch_grp_name).execute(check_error=True)
txn.add(self._nb_idl.db_set(
'HA_Chassis_Group', ha_ch_grp_name, ('external_ids',
{ovn_const.OVN_AZ_HINTS_EXT_ID_KEY: ','.join(az_hints)})))
'HA_Chassis_Group', ha_ch_grp_name,
('external_ids',
{ovn_const.OVN_AZ_HINTS_EXT_ID_KEY: ','.join(az_hints)})))
# Get the chassis belonging to the AZ hints
ch_list = self._sb_idl.get_gateway_chassis_from_cms_options(
@ -448,7 +449,7 @@ class OVNClient(object):
ch_to_del = all_ch - az_chassis
for ch in ch_to_del:
txn.add(self._nb_idl.ha_chassis_group_del_chassis(
ha_ch_grp_name, ch, if_exists=True))
ha_ch_grp_name, ch, if_exists=True))
# Find the highest priority chassis in the HA Chassis Group. If
# it exists and still belongs to the same AZ, keep it as the highest
@ -459,8 +460,8 @@ class OVNClient(object):
priority = ovn_const.HA_CHASSIS_GROUP_HIGHEST_PRIORITY
if high_prio_ch and high_prio_ch.chassis_name in az_chassis:
txn.add(self._nb_idl.ha_chassis_group_add_chassis(
ha_ch_grp_name, high_prio_ch.chassis_name,
priority=priority))
ha_ch_grp_name, high_prio_ch.chassis_name,
priority=priority))
az_chassis.remove(high_prio_ch.chassis_name)
priority -= 1
@ -469,7 +470,7 @@ class OVNClient(object):
# Chassis as the highest priority one.
for ch in random.sample(list(az_chassis), len(az_chassis)):
txn.add(self._nb_idl.ha_chassis_group_add_chassis(
ha_ch_grp_name, ch, priority=priority))
ha_ch_grp_name, ch, priority=priority))
priority -= 1
return ha_ch_grp.uuid
@ -553,8 +554,8 @@ class OVNClient(object):
# bound, has no device_owner and it is OVN LB VIP port.
# For more details check related bug #1789686.
if (port.get('name').startswith(ovn_const.LB_VIP_PORT_PREFIX) and
not port.get('device_owner') and
port.get(portbindings.VIF_TYPE) ==
not port.get('device_owner') and
port.get(portbindings.VIF_TYPE) ==
portbindings.VIF_TYPE_UNBOUND):
kwargs['addresses'] = []
@ -672,8 +673,8 @@ class OVNClient(object):
# bound, has no device_owner and it is OVN LB VIP port.
# For more details check related bug #1789686.
if (port.get('name').startswith(ovn_const.LB_VIP_PORT_PREFIX) and
not port.get('device_owner') and
port.get(portbindings.VIF_TYPE) ==
not port.get('device_owner') and
port.get(portbindings.VIF_TYPE) ==
portbindings.VIF_TYPE_UNBOUND):
columns_dict['addresses'] = []
@ -707,17 +708,17 @@ class OVNClient(object):
# without extra dhcp options and security group, while updating
# includes the new attributes setting to port.
txn.add(self._nb_idl.set_lswitch_port(
lport_name=port['id'],
external_ids=external_ids,
parent_name=port_info.parent_name,
tag=port_info.tag,
options=port_info.options,
enabled=port['admin_state_up'],
port_security=port_info.port_security,
dhcpv4_options=dhcpv4_options,
dhcpv6_options=dhcpv6_options,
if_exists=False,
**columns_dict))
lport_name=port['id'],
external_ids=external_ids,
parent_name=port_info.parent_name,
tag=port_info.tag,
options=port_info.options,
enabled=port['admin_state_up'],
port_security=port_info.port_security,
dhcpv4_options=dhcpv4_options,
dhcpv6_options=dhcpv6_options,
if_exists=False,
**columns_dict))
# Determine if security groups or fixed IPs are updated.
old_sg_ids = set(utils.get_ovn_port_security_groups(ovn_port))
@ -738,7 +739,7 @@ class OVNClient(object):
# port_security is disabled, or it's a trusted port, then
# allow all traffic.
elif ((not new_sg_ids and
not utils.is_port_security_enabled(port)) or
not utils.is_port_security_enabled(port)) or
utils.is_lsp_trusted(port)):
self._del_port_from_drop_port_group(port['id'], txn)
@ -1034,9 +1035,9 @@ class OVNClient(object):
def _delete_floatingip(self, fip, lrouter, txn=None):
commands = [self._nb_idl.delete_nat_rule_in_lrouter(
lrouter, type='dnat_and_snat',
logical_ip=fip['logical_ip'],
external_ip=fip['external_ip'])]
lrouter, type='dnat_and_snat',
logical_ip=fip['logical_ip'],
external_ip=fip['external_ip'])]
try:
port_id = (
fip['external_ids'].get(ovn_const.OVN_FIP_PORT_EXT_ID_KEY))
@ -1552,11 +1553,11 @@ class OVNClient(object):
LOG.debug("Router %s not found", port['device_id'])
else:
network_ids = {port['network_id'] for port in router_ports}
for net in self._plugin.get_networks(admin_context,
filters={'id': network_ids}):
for net in self._plugin.get_networks(
admin_context, filters={'id': network_ids}):
if net['mtu'] > network['mtu']:
options[ovn_const.OVN_ROUTER_PORT_GW_MTU_OPTION] = str(
network['mtu'])
network['mtu'])
break
return options
@ -1631,8 +1632,8 @@ class OVNClient(object):
cidr = subnet['cidr']
if ovn_conf.is_ovn_emit_need_to_frag_enabled():
provider_net = self._plugin.get_network(context,
router[l3.EXTERNAL_GW_INFO]['network_id'])
provider_net = self._plugin.get_network(
context, router[l3.EXTERNAL_GW_INFO]['network_id'])
self.set_gateway_mtu(context, provider_net)
if utils.is_snat_enabled(router) and cidr:
@ -1760,8 +1761,8 @@ class OVNClient(object):
if (ovn_conf.is_ovn_emit_need_to_frag_enabled() and
router.get('gw_port_id')):
provider_net = self._plugin.get_network(context,
router[l3.EXTERNAL_GW_INFO]['network_id'])
provider_net = self._plugin.get_network(
context, router[l3.EXTERNAL_GW_INFO]['network_id'])
self.set_gateway_mtu(context, provider_net, txn=txn)
cidr = None
@ -1839,8 +1840,8 @@ class OVNClient(object):
break
cmd = self._nb_idl.delete_lswitch_port(
lport_name=port_to_del,
lswitch_name=utils.ovn_name(network_id))
lport_name=port_to_del,
lswitch_name=utils.ovn_name(network_id))
self._transaction([cmd])
def _gen_network_parameters(self, network):
@ -2090,7 +2091,7 @@ class OVNClient(object):
# Add subnet host_routes to 'classless_static_route' dhcp option
routes.extend(['%s,%s' % (route['destination'], route['nexthop'])
for route in subnet['host_routes']])
for route in subnet['host_routes']])
if routes:
# if there are static routes, then we need to add the
@ -2171,12 +2172,11 @@ class OVNClient(object):
**port_dhcp_options))
columns = ({'dhcpv6_options': lsp_dhcp_options} if
subnet['ip_version'] == const.IP_VERSION_6 else {
'dhcpv4_options': lsp_dhcp_options})
'dhcpv4_options': lsp_dhcp_options})
# Set lsp DHCP options
txn.add(self._nb_idl.set_lswitch_port(
lport_name=port['id'],
**columns))
lport_name=port['id'], **columns))
def _update_subnet_dhcp_options(self, subnet, network, txn):
if utils.is_dhcp_options_ignored(subnet):
@ -2285,7 +2285,7 @@ class OVNClient(object):
def _add_port_to_drop_port_group(self, port, txn):
txn.add(self._nb_idl.pg_add_ports(ovn_const.OVN_DROP_PORT_GROUP_NAME,
port))
port))
def _del_port_from_drop_port_group(self, port, txn):
pg_name = ovn_const.OVN_DROP_PORT_GROUP_NAME
@ -2424,7 +2424,7 @@ class OVNClient(object):
def is_dns_required_for_port(self, port):
try:
if not all([port['dns_name'], port['dns_assignment'],
port['device_id']]):
port['device_id']]):
return False
except KeyError:
# Possible that dns extension is not enabled.
@ -2499,7 +2499,7 @@ class OVNClient(object):
for hostname, ips in records_to_add.items():
if ls_dns_record.records.get(hostname) != ips:
txn.add(self._nb_idl.dns_add_record(
ls_dns_record.uuid, hostname, ips))
ls_dns_record.uuid, hostname, ips))
def add_txns_to_remove_port_dns_records(self, txn, port):
lswitch_name = utils.ovn_name(port['network_id'])
@ -2531,9 +2531,9 @@ class OVNClient(object):
for hostname in hostnames:
if ls_dns_record.records.get(hostname):
txn.add(self._nb_idl.dns_remove_record(
ls_dns_record.uuid, hostname, if_exists=True))
ls_dns_record.uuid, hostname, if_exists=True))
for ip in ips:
ptr_record = netaddr.IPAddress(ip).reverse_dns.rstrip(".")
if ls_dns_record.records.get(ptr_record):
txn.add(self._nb_idl.dns_remove_record(
ls_dns_record.uuid, ptr_record, if_exists=True))
ls_dns_record.uuid, ptr_record, if_exists=True))

View File

@ -207,7 +207,7 @@ class OvnNbSynchronizer(OvnDbSynchronizer):
# ports sync operation later.
for n_port in db_ports:
if ((utils.is_security_groups_enabled(n_port) or
utils.is_port_security_enabled(n_port)) and
utils.is_port_security_enabled(n_port)) and
n_port['id'] in ovn_ports):
txn.add(self.ovn_api.pg_add_ports(
pg, n_port['id']))
@ -330,7 +330,7 @@ class OvnNbSynchronizer(OvnDbSynchronizer):
for db_route in db_routes:
for ovn_route in ovn_routes:
if (ovn_route['destination'] == db_route['destination'] and
ovn_route['nexthop'] == db_route['nexthop']):
ovn_route['nexthop'] == db_route['nexthop']):
break
else:
to_add.append(db_route)
@ -338,7 +338,7 @@ class OvnNbSynchronizer(OvnDbSynchronizer):
for ovn_route in ovn_routes:
for db_route in db_routes:
if (ovn_route['destination'] == db_route['destination'] and
ovn_route['nexthop'] == db_route['nexthop']):
ovn_route['nexthop'] == db_route['nexthop']):
break
else:
to_remove.append(ovn_route)
@ -355,7 +355,8 @@ class OvnNbSynchronizer(OvnDbSynchronizer):
continue
for ovn_fip in ovn_fips:
if (ovn_fip['logical_ip'] == db_fip['fixed_ip_address'] and
ovn_fip['external_ip'] == db_fip['floating_ip_address']):
ovn_fip['external_ip'] ==
db_fip['floating_ip_address']):
break
else:
to_add.append(db_fip)
@ -363,7 +364,8 @@ class OvnNbSynchronizer(OvnDbSynchronizer):
for ovn_fip in ovn_fips:
for db_fip in db_fips:
if (ovn_fip['logical_ip'] == db_fip['fixed_ip_address'] and
ovn_fip['external_ip'] == db_fip['floating_ip_address']):
ovn_fip['external_ip'] ==
db_fip['floating_ip_address']):
break
else:
to_remove.append(ovn_fip)
@ -651,7 +653,7 @@ class OvnNbSynchronizer(OvnDbSynchronizer):
LOG.warning("Deleting the router %s from OVN NB DB",
lrouter['name'])
txn.add(self.ovn_api.delete_lrouter(
utils.ovn_name(lrouter['name'])))
utils.ovn_name(lrouter['name'])))
for lrport_info in del_lrouter_ports_list:
LOG.warning("Router Port found in OVN but not in "
@ -660,9 +662,9 @@ class OvnNbSynchronizer(OvnDbSynchronizer):
LOG.warning("Deleting the port %s from OVN NB DB",
lrport_info['port'])
txn.add(self.ovn_api.delete_lrouter_port(
utils.ovn_lrouter_port_name(lrport_info['port']),
utils.ovn_name(lrport_info['lrouter']),
if_exists=False))
utils.ovn_lrouter_port_name(lrport_info['port']),
utils.ovn_name(lrport_info['lrouter']),
if_exists=False))
for sroute in update_sroutes_list:
if sroute['add']:
LOG.warning("Router %(id)s static routes %(route)s "
@ -913,9 +915,9 @@ class OvnNbSynchronizer(OvnDbSynchronizer):
LOG.debug('OVN sync metadata ports started')
for net in self.core_plugin.get_networks(ctx):
metadata_ports = self.core_plugin.get_ports(
ctx, filters=dict(
network_id=[net['id']],
device_owner=[constants.DEVICE_OWNER_DISTRIBUTED]))
ctx, filters=dict(
network_id=[net['id']],
device_owner=[constants.DEVICE_OWNER_DISTRIBUTED]))
if not metadata_ports:
LOG.warning('Missing metadata port found in Neutron for '
@ -1115,14 +1117,14 @@ class OvnNbSynchronizer(OvnDbSynchronizer):
LOG.debug('Deleting port DHCPv4 options for (port %s)',
lport_info['port'])
txn.add(self.ovn_api.delete_dhcp_options(
ovn_all_dhcp_options['ports_v4'].pop(
lport_info['port'])['uuid']))
ovn_all_dhcp_options['ports_v4'].pop(
lport_info['port'])['uuid']))
if lport_info['port'] in ovn_all_dhcp_options['ports_v6']:
LOG.debug('Deleting port DHCPv6 options for (port %s)',
lport_info['port'])
txn.add(self.ovn_api.delete_dhcp_options(
ovn_all_dhcp_options['ports_v6'].pop(
lport_info['port'])['uuid']))
ovn_all_dhcp_options['ports_v6'].pop(
lport_info['port'])['uuid']))
self._sync_port_dhcp_options(ctx, ports_need_sync_dhcp_opts,
ovn_all_dhcp_options['ports_v4'],

View File

@ -482,10 +482,10 @@ class TunnelRpcCallbackMixin(object):
return entry
else:
msg = (_("Network type value %(type)s not supported, "
"host: %(host)s with tunnel IP: %(ip)s") %
{'type': tunnel_type,
'host': host or 'legacy mode (no host provided by agent)',
'ip': tunnel_ip})
"host: %(host)s with tunnel IP: %(ip)s") %
{'type': tunnel_type,
'host': host or 'legacy mode (no host provided by agent)',
'ip': tunnel_ip})
raise exc.InvalidInput(error_message=msg)

View File

@ -55,9 +55,9 @@ class SubnetDNSPublishFixedIPExtensionDriver(
if flag:
subnet_obj.SubnetDNSPublishFixedIP(
plugin_context,
subnet_id=db_data['id'],
dns_publish_fixed_ip=flag).create()
plugin_context,
subnet_id=db_data['id'],
dns_publish_fixed_ip=flag).create()
db_data[sn_dns.DNS_PUBLISH_FIXED_IP] = flag
def process_update_subnet(self, plugin_context, request_data, db_data):
@ -72,12 +72,12 @@ class SubnetDNSPublishFixedIPExtensionDriver(
subnet_id = db_data['id']
if new_value:
subnet_obj.SubnetDNSPublishFixedIP(
plugin_context,
subnet_id=subnet_id,
dns_publish_fixed_ip=new_value).create()
plugin_context,
subnet_id=subnet_id,
dns_publish_fixed_ip=new_value).create()
else:
sn_obj = subnet_obj.SubnetDNSPublishFixedIP.get_object(
plugin_context,
subnet_id=subnet_id)
plugin_context,
subnet_id=subnet_id)
sn_obj.delete()
db_data[sn_dns.DNS_PUBLISH_FIXED_IP] = new_value

View File

@ -191,12 +191,12 @@ class TypeManager(stevedore.named.NamedExtensionManager):
else:
network_segments = [
{provider_key: network.pop(provider_key)
for provider_key in provider.ATTRIBUTES}]
for provider_key in provider.ATTRIBUTES}]
return (
[{api.NETWORK_TYPE: network_segment[provider.NETWORK_TYPE],
api.PHYSICAL_NETWORK: network_segment[provider.PHYSICAL_NETWORK],
api.SEGMENTATION_ID: network_segment[provider.SEGMENTATION_ID]}
api.PHYSICAL_NETWORK: network_segment[provider.PHYSICAL_NETWORK],
api.SEGMENTATION_ID: network_segment[provider.SEGMENTATION_ID]}
for network_segment in network_segments])
def initialize(self):

View File

@ -560,8 +560,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
port['status'] = const.PORT_STATUS_DOWN
super(Ml2Plugin, self).update_port(
mech_context._plugin_context, port['id'],
{port_def.RESOURCE_NAME:
{'status': const.PORT_STATUS_DOWN}})
{port_def.RESOURCE_NAME: {'status': const.PORT_STATUS_DOWN}})
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
self._clear_port_binding(mech_context, binding, port,
@ -973,7 +972,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
if (provider_net.SEGMENTATION_ID in mech_driver.obj.
provider_network_attribute_updates_supported()):
if isinstance(mech_driver.obj,
mech_agent.AgentMechanismDriverBase):
mech_agent.AgentMechanismDriverBase):
agent_type = mech_driver.obj.agent_type
agents = self.get_agents(
context, filters={'agent_type': [agent_type]})
@ -1390,7 +1389,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
def _after_create_subnet(self, context, result, mech_context):
# db base plugin post commit ops
self._create_subnet_postcommit(context, result,
network=mech_context.network.current)
network=mech_context.network.current)
# add network to subnet dict to save a DB call on dhcp notification
result['network'] = mech_context.network.current
@ -1505,8 +1504,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
subnet_ids = [f['subnet_id'] for f in port['fixed_ips']]
if (db.is_dhcp_active_on_any_subnet(context, subnet_ids) and
len(self.get_dhcp_agents_hosting_networks(context,
[port['network_id']]))):
len(self.get_dhcp_agents_hosting_networks(
context, [port['network_id']]))):
# the agents will tell us when the dhcp config is ready so we setup
# a provisioning component to prevent the port from going ACTIVE
# until a dhcp_ready_on_port notification is received.
@ -1655,8 +1654,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
name=pdata.get('name'),
network_id=pdata.get('network_id'),
admin_state_up=pdata.get('admin_state_up'),
status=pdata.get('status',
const.PORT_STATUS_ACTIVE),
status=pdata.get('status', const.PORT_STATUS_ACTIVE),
device_id=pdata.get('device_id'),
device_owner=pdata.get('device_owner'),
description=pdata.get('description'))
@ -1671,7 +1669,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
# Determine the MAC address
raw_mac_address = pdata.get('mac_address',
const.ATTR_NOT_SPECIFIED)
const.ATTR_NOT_SPECIFIED)
if raw_mac_address is const.ATTR_NOT_SPECIFIED:
raw_mac_address = macs.pop()
elif self._is_mac_in_use(context, network_id, raw_mac_address):
@ -1683,9 +1681,9 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
# Create the Port object
db_port_obj = ports_obj.Port(context,
mac_address=eui_mac_address,
id=port['port']['id'],
**bulk_port_data)
mac_address=eui_mac_address,
id=port['port']['id'],
**bulk_port_data)
db_port_obj.create()
# Call IPAM to store allocated IP addresses
@ -1695,10 +1693,10 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
ipam_fixed_ips = []
for ip in ipams:
fixed_ip = ports_obj.IPAllocation(
port_id=db_port_obj['id'],
subnet_id=ip['subnet_id'],
network_id=network_id,
ip_address=ip['ip_address'])
port_id=db_port_obj['id'],
subnet_id=ip['subnet_id'],
network_id=network_id,
ip_address=ip['ip_address'])
ipam_fixed_ips.append(fixed_ip)
db_port_obj['fixed_ips'] = ipam_fixed_ips
@ -1765,12 +1763,12 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
port_dict)
port_data.append(
{
'id': db_port_obj['id'],
'port_obj': db_port_obj,
'mech_context': mech_context,
'port_dict': port_dict
})
{
'id': db_port_obj['id'],
'port_obj': db_port_obj,
'mech_context': mech_context,
'port_dict': port_dict
})
# Perform actions after the transaction is committed
completed_ports = []
@ -1779,9 +1777,9 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
port['port_dict'],
port['port_obj'].db_obj)
completed_ports.append(
self._after_create_port(context,
port['port_dict'],
port['mech_context']))
self._after_create_port(context,
port['port_dict'],
port['mech_context']))
return completed_ports
# TODO(yalei) - will be simplified after security group and address pair be
@ -1801,7 +1799,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
if not self._check_update_deletes_allowed_address_pairs(port):
# not a request for deleting the address-pairs
updated_port[addr_apidef.ADDRESS_PAIRS] = (
self.get_allowed_address_pairs(context, id))
self.get_allowed_address_pairs(context, id))
# check if address pairs has been in db, if address pairs could
# be put in extension driver, we can refine here.
@ -1820,8 +1818,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
filters = {'port_id': [id]}
security_groups = (
super(Ml2Plugin, self)._get_port_security_group_bindings(
context, filters)
)
context, filters))
if security_groups:
raise psec_exc.PortSecurityPortHasSecurityGroup()
@ -1858,8 +1855,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
port, id)
if (psec.PORTSECURITY in attrs) and (
original_port[psec.PORTSECURITY] !=
updated_port[psec.PORTSECURITY]):
original_port[psec.PORTSECURITY] !=
updated_port[psec.PORTSECURITY]):
need_port_update_notify = True
# TODO(QoS): Move out to the extension framework somehow.
# Follow https://review.opendev.org/#/c/169223 for a solution.
@ -1995,7 +1992,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
def delete_distributed_port_bindings_by_router_id(self, context,
router_id):
for binding in (context.session.query(models.DistributedPortBinding).
filter_by(router_id=router_id)):
filter_by(router_id=router_id)):
db.clear_binding_levels(context, binding.port_id, binding.host)
context.session.delete(binding)
@ -2333,7 +2330,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
LOG.debug("Current status of the port %s is: %s; "
"New status is: %s", port_id, port.status, status)
if ((port.status != status and
port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE) or
port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE) or
port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE):
attr = {
'id': port.id,
@ -2442,7 +2439,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
for port in ports:
# map back to original requested id
port_id = next((port_id for port_id in port_ids
if port['id'].startswith(port_id)), None)
if port['id'].startswith(port_id)), None)
port['device'] = port_ids_to_devices.get(port_id)
return ports
@ -2485,7 +2482,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
if ip_addresses_s:
substr_filter = or_(*[models_v2.Port.fixed_ips.any(
models_v2.IPAllocation.ip_address.like('%%%s%%' % ip))
for ip in ip_addresses_s])
for ip in ip_addresses_s])
query = query.filter(substr_filter)
if limit:
query = query.limit(limit)
@ -2567,18 +2564,18 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
.pop_segments_from_network(network_copy)
if event == events.PRECOMMIT_CREATE:
network_segments = [network_segment
for network_segment in network_segments
# A segment popped from a network could have its
# segmentation_id set to None if the segment
# being created is partial.
if not ((network_segment[api.SEGMENTATION_ID] ==
changed_segment[api.SEGMENTATION_ID] or
network_segment[api.SEGMENTATION_ID] is None) and
network_segment[api.NETWORK_TYPE] ==
changed_segment[api.NETWORK_TYPE] and
network_segment[api.PHYSICAL_NETWORK] ==
changed_segment[api.PHYSICAL_NETWORK])]
network_segments = (
[network_segment for network_segment in network_segments
# A segment popped from a network could have its
# segmentation_id set to None if the segment
# being created is partial.
if not ((network_segment[api.SEGMENTATION_ID] ==
changed_segment[api.SEGMENTATION_ID] or
network_segment[api.SEGMENTATION_ID] is None) and
network_segment[api.NETWORK_TYPE] ==
changed_segment[api.NETWORK_TYPE] and
network_segment[api.PHYSICAL_NETWORK] ==
changed_segment[api.PHYSICAL_NETWORK])])
elif event == events.PRECOMMIT_DELETE:
network_segments.append(changed_segment)
@ -2615,8 +2612,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
def _make_port_binding_dict(self, binding, fields=None):
res = {key: binding[key] for key in (
pbe_ext.HOST, pbe_ext.VIF_TYPE, pbe_ext.VNIC_TYPE,
pbe_ext.STATUS)}
pbe_ext.HOST, pbe_ext.VIF_TYPE, pbe_ext.VNIC_TYPE, pbe_ext.STATUS)}
if isinstance(binding, ports_obj.PortBinding):
res[pbe_ext.PROFILE] = binding.profile or {}
res[pbe_ext.VIF_DETAILS] = binding.vif_details or {}
@ -2639,8 +2635,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
port_dict['status'] = const.PORT_STATUS_DOWN
super(Ml2Plugin, self).update_port(
mech_context._plugin_context, port_dict['id'],
{port_def.RESOURCE_NAME:
{'status': const.PORT_STATUS_DOWN}})
{port_def.RESOURCE_NAME: {'status': const.PORT_STATUS_DOWN}})
self._update_port_dict_binding(port_dict,
mech_context._binding)
mech_context._binding.persist_state_to_session(
@ -2793,8 +2788,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
port_dict['status'] = const.PORT_STATUS_DOWN
super(Ml2Plugin, self).update_port(
context, port_dict['id'],
{port_def.RESOURCE_NAME:
{'status': const.PORT_STATUS_DOWN}})
{port_def.RESOURCE_NAME: {'status': const.PORT_STATUS_DOWN}})
levels = db.get_binding_level_objs(context, port_id,
inactive_binding.host)
bind_context = driver_context.PortContext(self, context, port_dict,

View File

@ -219,11 +219,11 @@ class RpcCallbacks(type_tunnel.TunnelRpcCallbackMixin):
continue
try:
devices.append(self._get_device_details(
rpc_context,
agent_id=kwargs.get('agent_id'),
host=host,
device=device,
port_context=bound_contexts[device]))
rpc_context,
agent_id=kwargs.get('agent_id'),
host=host,
device=device,
port_context=bound_contexts[device]))
except Exception:
LOG.exception("Failed to get details for device %s",
device)
@ -359,14 +359,14 @@ class RpcCallbacks(type_tunnel.TunnelRpcCallbackMixin):
"""
plugin = directory.get_plugin()
l2pop_driver = plugin.mechanism_manager.mech_drivers.get(
'l2population')
'l2population')
if not l2pop_driver:
return
port = ml2_db.get_port(rpc_context, port_id)
if not port:
return
port_context = plugin.get_bound_port_context(
rpc_context, port_id, host)
rpc_context, port_id, host)
if not port_context:
# port deleted
return