diff --git a/.pylintrc b/.pylintrc index 83dc8431b61..1cae81416bb 100644 --- a/.pylintrc +++ b/.pylintrc @@ -64,7 +64,6 @@ disable= # "R" Refactor recommendations duplicate-code, inconsistent-return-statements, - no-else-return, no-self-use, redefined-argument-from-local, too-few-public-methods, diff --git a/neutron/agent/common/ovs_lib.py b/neutron/agent/common/ovs_lib.py index 579a4c7db52..d97581ba447 100644 --- a/neutron/agent/common/ovs_lib.py +++ b/neutron/agent/common/ovs_lib.py @@ -816,9 +816,8 @@ class OVSBridge(BaseOVS): address = ip_lib.IPDevice(self.br_name).link.address if address: return address - else: - msg = _('Unable to determine mac address for %s') % self.br_name - raise Exception(msg) + msg = _('Unable to determine mac address for %s') % self.br_name + raise Exception(msg) def set_controllers_inactivity_probe(self, interval): """Set bridge controllers inactivity probe interval. @@ -1264,7 +1263,7 @@ class OVSBridge(BaseOVS): if_exists=True) or [] if port_type is None: return ports - elif not isinstance(port_type, list): + if not isinstance(port_type, list): port_type = [port_type] return [port['name'] for port in ports if port['type'] in port_type] @@ -1438,9 +1437,8 @@ def generate_random_cookie(): def check_cookie_mask(cookie): cookie = str(cookie) if '/' not in cookie: - return cookie + '/-1' - else: - return cookie + cookie += '/-1' + return cookie def is_a_flow_line(line): diff --git a/neutron/agent/common/placement_report.py b/neutron/agent/common/placement_report.py index eaccc320b4b..b837062620b 100644 --- a/neutron/agent/common/placement_report.py +++ b/neutron/agent/common/placement_report.py @@ -212,14 +212,13 @@ class PlacementState: # That means the RP for tunnelled networks is not associated # to a physical bridge interface. return [n_const.TRAIT_NETWORK_TUNNEL] - elif device == self._rp_tun_name and device in physical_bridges: + if device == self._rp_tun_name and device in physical_bridges: # The physical network and the tunnelled networks share the # same physical interface. return [n_const.TRAIT_NETWORK_TUNNEL, physnet_trait_mappings[device]] - else: - # Just the physical interface. - return [physnet_trait_mappings.get(device)] + # Just the physical interface. + return [physnet_trait_mappings.get(device)] rp_traits = [] physical_bridges = {br for brs in self._device_mappings.values() for diff --git a/neutron/agent/dhcp/agent.py b/neutron/agent/dhcp/agent.py index ed96b14878b..c133404a4fd 100644 --- a/neutron/agent/dhcp/agent.py +++ b/neutron/agent/dhcp/agent.py @@ -223,10 +223,8 @@ class DhcpAgent(manager.Manager): action, network, segment=sid_segment.get(seg_id), **action_kwargs)) return all(ret) - else: - # In case subnets are not attached to segments. default behavior. - return self._call_driver( - action, network, **action_kwargs) + # In case subnets are not attached to segments. default behavior. + return self._call_driver(action, network, **action_kwargs) def _call_driver(self, action, network, segment=None, **action_kwargs): """Invoke an action on a DHCP driver instance.""" @@ -248,11 +246,12 @@ class DhcpAgent(manager.Manager): self.dhcp_version, self.plugin_rpc, segment) + # NOTE(ihrachys) It's important that we always call the action + # before deciding what to return! rv = getattr(driver, action)(**action_kwargs) if action == 'get_metadata_bind_interface': return rv - else: - return True + return True except exceptions.Conflict: # No need to resync here, the agent will receive the event related # to a status update for the network @@ -635,7 +634,7 @@ class DhcpAgent(manager.Manager): self.schedule_resync("Agent port was modified", port.network_id) return - elif old_ips != new_ips: + if old_ips != new_ips: LOG.debug("Agent IPs on network %s changed from %s to %s", network.id, old_ips, new_ips) driver_action = 'restart' diff --git a/neutron/agent/l2/extensions/dhcp/ipv6.py b/neutron/agent/l2/extensions/dhcp/ipv6.py index 8078db226af..cd756df1322 100644 --- a/neutron/agent/l2/extensions/dhcp/ipv6.py +++ b/neutron/agent/l2/extensions/dhcp/ipv6.py @@ -110,7 +110,7 @@ class DHCPIPv6Responder(dhcp_base.DHCPResponderBase): # Get request Valid Lifetime for IA_NA. # Get request IAID for IA_NA. return opt.data[start:end] - elif iaid: + if iaid: # default IAID return struct.pack('!I', 1) # default time or interval @@ -243,7 +243,7 @@ class DHCPIPv6Responder(dhcp_base.DHCPResponderBase): def get_ret_type(self, req_type): if req_type == 'SOLICIT': return dhcp6.DHCPV6_ADVERTISE - elif req_type in REQ_TYPES_FOR_REPLY: + if req_type in REQ_TYPES_FOR_REPLY: return dhcp6.DHCPV6_REPLY return REQ_TYPE_UNKNOWN diff --git a/neutron/agent/l3/agent.py b/neutron/agent/l3/agent.py index 116df1aca93..33b96a8de7a 100644 --- a/neutron/agent/l3/agent.py +++ b/neutron/agent/l3/agent.py @@ -794,15 +794,14 @@ class L3NATAgent(ha.AgentMixin, routers = [r for r in routers if r['id'] == update.id] if not routers: - removed = self._safe_router_removed(update.id) - if not removed: - self._resync_router(update) - else: + if self._safe_router_removed(update.id): # need to update timestamp of removed router in case # there are older events for the same router in the # processing queue (like events from fullsync) in order to # prevent deleted router re-creation rp.fetched_and_processed(update.timestamp) + else: + self._resync_router(update) LOG.info("Finished a router delete for %s, update_id %s. " "Time elapsed: %.3f", update.id, update.update_id, diff --git a/neutron/agent/l3/dvr_edge_ha_router.py b/neutron/agent/l3/dvr_edge_ha_router.py index ade4e4cf4ca..c206a1588d6 100644 --- a/neutron/agent/l3/dvr_edge_ha_router.py +++ b/neutron/agent/l3/dvr_edge_ha_router.py @@ -31,8 +31,7 @@ class DvrEdgeHaRouter(dvr_edge_router.DvrEdgeRouter, """ def __init__(self, host, *args, **kwargs): - super().__init__(host, - *args, **kwargs) + super().__init__(host, *args, **kwargs) self.enable_snat = None @property @@ -85,16 +84,13 @@ class DvrEdgeHaRouter(dvr_edge_router.DvrEdgeRouter, self.set_ha_port() if (self.is_router_primary() and self.ha_port and self.ha_port['status'] == constants.PORT_STATUS_ACTIVE): - return super().add_centralized_floatingip( - fip, fip_cidr) - else: - return constants.FLOATINGIP_STATUS_ACTIVE + return super().add_centralized_floatingip(fip, fip_cidr) + return constants.FLOATINGIP_STATUS_ACTIVE def remove_centralized_floatingip(self, fip_cidr): self._remove_vip(fip_cidr) if self.is_router_primary(): - super().remove_centralized_floatingip( - fip_cidr) + super().remove_centralized_floatingip(fip_cidr) def get_centralized_fip_cidr_set(self): ex_gw_port = self.get_ex_gw_port() @@ -105,8 +101,7 @@ class DvrEdgeHaRouter(dvr_edge_router.DvrEdgeRouter, return set(self._get_cidrs_from_keepalived(interface_name)) def external_gateway_added(self, ex_gw_port, interface_name): - super().external_gateway_added( - ex_gw_port, interface_name) + super().external_gateway_added(ex_gw_port, interface_name) for port in self.get_snat_interfaces(): snat_interface_name = self._get_snat_int_device_name(port['id']) self._disable_ipv6_addressing_on_interface(snat_interface_name) @@ -124,8 +119,7 @@ class DvrEdgeHaRouter(dvr_edge_router.DvrEdgeRouter, namespace=self.ha_namespace, prefix=constants.SNAT_INT_DEV_PREFIX) self._clear_vips(snat_interface) - super()._external_gateway_removed( - ex_gw_port, interface_name) + super()._external_gateway_removed(ex_gw_port, interface_name) self._clear_vips(interface_name) def external_gateway_updated(self, ex_gw_port, interface_name): diff --git a/neutron/agent/l3/dvr_edge_router.py b/neutron/agent/l3/dvr_edge_router.py index e8fbe968dae..193de8bb848 100644 --- a/neutron/agent/l3/dvr_edge_router.py +++ b/neutron/agent/l3/dvr_edge_router.py @@ -81,12 +81,9 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter): # SNAT might be rescheduled to this agent; need to process like # newly created gateway return self.external_gateway_added(ex_gw_port, interface_name) - else: - preserve_ips = self._list_centralized_floating_ip_cidrs() - self._external_gateway_added(ex_gw_port, - interface_name, - self.snat_namespace.name, - preserve_ips) + preserve_ips = self._list_centralized_floating_ip_cidrs() + self._external_gateway_added( + ex_gw_port, interface_name, self.snat_namespace.name, preserve_ips) def _external_gateway_removed(self, ex_gw_port, interface_name): super().external_gateway_removed(ex_gw_port, @@ -250,9 +247,8 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter): # namespace and Router Namespace, to reduce the complexity. if self.snat_namespace.exists(): return True - else: - LOG.error("The SNAT namespace %s does not exist for " - "the router.", self.snat_namespace.name) + LOG.error("The SNAT namespace %s does not exist for the router.", + self.snat_namespace.name) return False def update_routing_table(self, operation, route): diff --git a/neutron/agent/l3/dvr_local_router.py b/neutron/agent/l3/dvr_local_router.py index 47df0587687..80dffc5b09a 100644 --- a/neutron/agent/l3/dvr_local_router.py +++ b/neutron/agent/l3/dvr_local_router.py @@ -326,15 +326,14 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase): elif operation == 'delete': device.neigh.delete(ip, mac) return True - else: - if operation == 'add': - LOG.warning("Device %s does not exist so ARP entry " - "cannot be updated, will cache " - "information to be applied later " - "when the device exists", - device) - self._cache_arp_entry(ip, mac, subnet_id, operation) - return False + if operation == 'add': + LOG.warning("Device %s does not exist so ARP entry " + "cannot be updated, will cache " + "information to be applied later " + "when the device exists", + device) + self._cache_arp_entry(ip, mac, subnet_id, operation) + return False except Exception: with excutils.save_and_reraise_exception(): LOG.exception("DVR: Failed updating arp entry") @@ -493,8 +492,7 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase): if is_add: exc = 'DVR: error adding redirection logic' else: - exc = ('DVR: snat remove failed to clear the rule ' - 'and device') + exc = 'DVR: snat remove failed to clear the rule and device' LOG.exception(exc) def _snat_redirect_add(self, gateway, sn_port, sn_int): diff --git a/neutron/agent/l3/fip_rule_priority_allocator.py b/neutron/agent/l3/fip_rule_priority_allocator.py index 4ce540bd827..6ac07e2ccef 100644 --- a/neutron/agent/l3/fip_rule_priority_allocator.py +++ b/neutron/agent/l3/fip_rule_priority_allocator.py @@ -28,8 +28,7 @@ class FipPriority: def __eq__(self, other): if isinstance(other, FipPriority): return (self.index == other.index) - else: - return False + return False def __int__(self): return int(self.index) diff --git a/neutron/agent/l3/ha_router.py b/neutron/agent/l3/ha_router.py index 4dfd1833d7a..06b9a213d9f 100644 --- a/neutron/agent/l3/ha_router.py +++ b/neutron/agent/l3/ha_router.py @@ -326,12 +326,11 @@ class HaRouter(router.RouterInfo): """ manager = self.keepalived_manager if manager.get_process().active: - if self.ha_state != 'primary': - conf = manager.get_conf_on_disk() - managed_by_keepalived = conf and ipv6_lladdr in conf - if managed_by_keepalived: - return False - else: + if self.ha_state == 'primary': + return False + conf = manager.get_conf_on_disk() + managed_by_keepalived = conf and ipv6_lladdr in conf + if managed_by_keepalived: return False return True @@ -510,8 +509,7 @@ class HaRouter(router.RouterInfo): self._clear_vips(interface_name) if self.ha_state == 'primary': - super().external_gateway_removed(ex_gw_port, - interface_name) + super().external_gateway_removed(ex_gw_port, interface_name) else: # We are not the primary node, so no need to delete ip addresses. self.driver.unplug(interface_name, diff --git a/neutron/agent/l3/l3_agent_extension_api.py b/neutron/agent/l3/l3_agent_extension_api.py index f519e7be191..3063aa54ad2 100644 --- a/neutron/agent/l3/l3_agent_extension_api.py +++ b/neutron/agent/l3/l3_agent_extension_api.py @@ -55,8 +55,7 @@ class L3AgentExtensionAPI: if project_id: return [ri for ri in self._router_info.values() if ri.router['project_id'] == project_id] - else: - return [] + return [] def is_router_in_namespace(self, router_id): """Given a router_id, make sure that the router is in a local diff --git a/neutron/agent/linux/bridge_lib.py b/neutron/agent/linux/bridge_lib.py index ed56f871d75..60e08676e87 100644 --- a/neutron/agent/linux/bridge_lib.py +++ b/neutron/agent/linux/bridge_lib.py @@ -54,8 +54,7 @@ def catch_exceptions(function): def is_bridged_interface(interface): if not interface: return False - else: - return os.path.exists(BRIDGE_PORT_FS_FOR_DEVICE % interface) + return os.path.exists(BRIDGE_PORT_FS_FOR_DEVICE % interface) def get_interface_ifindex(interface): @@ -87,9 +86,8 @@ class BridgeDevice(ip_lib.IPDevice): path = os.readlink(BRIDGE_PATH_FOR_DEVICE % interface) except OSError: return None - else: - name = path.rpartition('/')[-1] - return cls(name) + name = path.rpartition('/')[-1] + return cls(name) def delbr(self): return self.link.delete() diff --git a/neutron/agent/linux/dhcp.py b/neutron/agent/linux/dhcp.py index 53d45b4aebe..a54e4ffdd36 100644 --- a/neutron/agent/linux/dhcp.py +++ b/neutron/agent/linux/dhcp.py @@ -97,8 +97,7 @@ class DictModel(collections.abc.MutableMapping): """Upgrade item if it needs to be upgraded.""" if needs_upgrade(item): return DictModel(item) - else: - return item + return item for key, value in itertools.chain(temp_dict.items(), kwargs.items()): if isinstance(value, (list, tuple)): diff --git a/neutron/agent/linux/external_process.py b/neutron/agent/linux/external_process.py index 47f50cd543b..0ef1e8e446e 100644 --- a/neutron/agent/linux/external_process.py +++ b/neutron/agent/linux/external_process.py @@ -157,10 +157,8 @@ class ProcessManager(MonitoredProcess): """Returns the file name for a given kind of config file.""" if self.pid_file: return self.pid_file - else: - return utils.get_conf_file_name(self.pids_path, - self.uuid, - self.service_pid_fname) + return utils.get_conf_file_name( + self.pids_path, self.uuid, self.service_pid_fname) @property def pid(self): diff --git a/neutron/agent/linux/ip_lib.py b/neutron/agent/linux/ip_lib.py index 6d9c14765fc..0e3d04bcb57 100644 --- a/neutron/agent/linux/ip_lib.py +++ b/neutron/agent/linux/ip_lib.py @@ -125,12 +125,11 @@ class SubProcessBase: def _run(self, options, command, args): if self.namespace: return self._as_root(options, command, args) - elif self.force_root: + if self.force_root: # Force use of the root helper to ensure that commands # will execute in dom0 when running under XenServer/XCP. return self._execute(options, command, args, run_as_root=True) - else: - return self._execute(options, command, args) + return self._execute(options, command, args) def _as_root(self, options, command, args, use_root_namespace=False): namespace = self.namespace if not use_root_namespace else None @@ -580,7 +579,7 @@ class IpAddrCommand(IpDeviceCommandBase): for filter in filters: if filter == 'permanent' and device['dynamic']: return False - elif not device[filter]: + if not device[filter]: return False return True @@ -803,7 +802,8 @@ def device_exists(device_name, namespace=None): return IPDevice(device_name, namespace=namespace).exists() -def device_exists_with_ips_and_mac(device_name, ip_cidrs, mac, namespace=None): +def device_exists_with_ips_and_mac(device_name, ip_cidrs, + mac, namespace=None) -> bool: """Return True if the device with the given IP addresses and MAC address exists in the namespace. """ @@ -811,14 +811,10 @@ def device_exists_with_ips_and_mac(device_name, ip_cidrs, mac, namespace=None): device = IPDevice(device_name, namespace=namespace) if mac and mac != device.link.address: return False - device_ip_cidrs = [ip['cidr'] for ip in device.addr.list()] - for ip_cidr in ip_cidrs: - if ip_cidr not in device_ip_cidrs: - return False + device_ip_cidrs = {ip['cidr'] for ip in device.addr.list()} except RuntimeError: return False - else: - return True + return not bool(set(ip_cidrs) - device_ip_cidrs) def get_device_mac(device_name, namespace=None): @@ -986,8 +982,7 @@ def list_network_namespaces(**kwargs): """ if cfg.CONF.AGENT.use_helper_for_ns_read: return privileged.list_netns(**kwargs) - else: - return netns.listnetns(**kwargs) + return netns.listnetns(**kwargs) def network_namespace_exists(namespace, try_is_ready=False, **kwargs): @@ -1006,8 +1001,7 @@ def network_namespace_exists(namespace, try_is_ready=False, **kwargs): nspath += '/' + namespace if cfg.CONF.AGENT.use_helper_for_ns_read: return priv_utils.path_exists(nspath) - else: - return path.exists(nspath) + return path.exists(nspath) try: privileged.open_namespace(namespace) @@ -1601,9 +1595,9 @@ def list_ip_routes(namespace, ip_version, scope=None, via=None, table=None, def get_proto(proto_number): if isinstance(proto_number, int) and proto_number in rtnl.rt_proto: return rtnl.rt_proto[proto_number] - elif isinstance(proto_number, str) and proto_number.isnumeric(): + if isinstance(proto_number, str) and proto_number.isnumeric(): return rtnl.rt_proto[int(proto_number)] - elif str(proto_number) in constants.IP_PROTOCOL_NUM_TO_NAME_MAP: + if str(proto_number) in constants.IP_PROTOCOL_NUM_TO_NAME_MAP: return constants.IP_PROTOCOL_NUM_TO_NAME_MAP[str(proto_number)] table = table if table else 'main' diff --git a/neutron/agent/linux/iptables_firewall.py b/neutron/agent/linux/iptables_firewall.py index 2fd329df171..a4cfc9a54cd 100644 --- a/neutron/agent/linux/iptables_firewall.py +++ b/neutron/agent/linux/iptables_firewall.py @@ -735,8 +735,7 @@ class IptablesFirewallDriver(firewall.FirewallDriver): remote_gid = self._get_any_remote_group_id_in_rule(sg_rule) if self.enable_ipset and remote_gid: return self._generate_ipset_rule_args(sg_rule, remote_gid) - else: - return self._generate_plain_rule_args(sg_rule) + return self._generate_plain_rule_args(sg_rule) def _convert_sgr_to_iptables_rules(self, security_group_rules): iptables_rules = [] diff --git a/neutron/agent/linux/iptables_manager.py b/neutron/agent/linux/iptables_manager.py index 59059a58ac9..f7f4eb347f0 100644 --- a/neutron/agent/linux/iptables_manager.py +++ b/neutron/agent/linux/iptables_manager.py @@ -88,8 +88,7 @@ def comment_rule(rule, comment): def get_chain_name(chain_name, wrap=True): if wrap: return chain_name[:constants.MAX_IPTABLES_CHAIN_LEN_WRAP] - else: - return chain_name[:constants.MAX_IPTABLES_CHAIN_LEN_NOWRAP] + return chain_name[:constants.MAX_IPTABLES_CHAIN_LEN_NOWRAP] class IptablesRule: @@ -162,8 +161,7 @@ class IptablesTable: def _select_chain_set(self, wrap): if wrap: return self.chains - else: - return self.unwrapped_chains + return self.unwrapped_chains def remove_chain(self, name, wrap=True): """Remove named chain. diff --git a/neutron/agent/linux/openvswitch_firewall/firewall.py b/neutron/agent/linux/openvswitch_firewall/firewall.py index 99f41b50d99..b0b10dfa153 100644 --- a/neutron/agent/linux/openvswitch_firewall/firewall.py +++ b/neutron/agent/linux/openvswitch_firewall/firewall.py @@ -785,7 +785,7 @@ class OVSFirewallDriver(firewall.FirewallDriver): self.remove_port_filter(port) self._initialize_egress_no_port_security(port['device']) return - elif not self.is_port_managed(port): + if not self.is_port_managed(port): try: self._remove_egress_no_port_security(port['device']) except exceptions.OVSFWPortNotHandled as e: diff --git a/neutron/agent/linux/openvswitch_firewall/rules.py b/neutron/agent/linux/openvswitch_firewall/rules.py index 99b3dc45830..66e4e700c20 100644 --- a/neutron/agent/linux/openvswitch_firewall/rules.py +++ b/neutron/agent/linux/openvswitch_firewall/rules.py @@ -171,7 +171,7 @@ def flow_priority_offset(rule, conjunction=False): if protocol in [n_consts.PROTO_NUM_ICMP, n_consts.PROTO_NUM_IPV6_ICMP]: if 'port_range_min' not in rule: return conj_offset + 1 - elif 'port_range_max' not in rule: + if 'port_range_max' not in rule: return conj_offset + 2 return conj_offset + 3 diff --git a/neutron/agent/linux/tc_lib.py b/neutron/agent/linux/tc_lib.py index b6ece9ee5a5..5ae2e364ec5 100644 --- a/neutron/agent/linux/tc_lib.py +++ b/neutron/agent/linux/tc_lib.py @@ -96,9 +96,8 @@ def convert_to_kilobits(value, base): value = int(value) if input_in_bits: return utils.bits_to_kilobits(value, base) - else: - bits_value = utils.bytes_to_bits(value) - return utils.bits_to_kilobits(bits_value, base) + bits_value = utils.bytes_to_bits(value) + return utils.bits_to_kilobits(bits_value, base) unit = value[-1:] if unit not in UNITS.keys(): raise InvalidUnit(unit=unit) diff --git a/neutron/agent/linux/utils.py b/neutron/agent/linux/utils.py index a128fd74ed6..952924264b3 100644 --- a/neutron/agent/linux/utils.py +++ b/neutron/agent/linux/utils.py @@ -409,8 +409,7 @@ def read_if_exists(path: str, run_as_root=False) -> str: """ if run_as_root: return priv_utils.read_file(path) - else: - return utils.read_file(path) + return utils.read_file(path) class UnixDomainHTTPConnection(httplib.HTTPConnection): diff --git a/neutron/agent/metadata/agent.py b/neutron/agent/metadata/agent.py index bb1c6f780d0..b9a35d31293 100644 --- a/neutron/agent/metadata/agent.py +++ b/neutron/agent/metadata/agent.py @@ -139,7 +139,7 @@ class MetadataProxyHandler(proxy_base.MetadataProxyHandlerBase): num_ports = len(ports) if num_ports == 1: return ports[0]['device_id'], ports[0]['tenant_id'] - elif num_ports == 0: + if num_ports == 0: LOG.error("No port found in network %s with IP address %s", network_id, remote_address) return None, None diff --git a/neutron/agent/metadata/proxy_base.py b/neutron/agent/metadata/proxy_base.py index 40846df9fc9..2b933bd3bea 100644 --- a/neutron/agent/metadata/proxy_base.py +++ b/neutron/agent/metadata/proxy_base.py @@ -82,8 +82,7 @@ class MetadataProxyHandlerBase(metaclass=abc.ABCMeta): if instance_id: res = self._proxy_request(instance_id, project_id, req) return res - else: - return webob.exc.HTTPNotFound() + return webob.exc.HTTPNotFound() except Exception: LOG.exception("Unexpected error.") @@ -175,25 +174,23 @@ class MetadataProxyHandlerBase(metaclass=abc.ABCMeta): req.response.body = resp.content LOG.debug(str(resp)) return req.response - elif resp.status_code == 403: + if resp.status_code == 403: LOG.warning( 'The remote metadata server responded with Forbidden. This ' 'response usually occurs when shared secrets do not match.' ) return webob.exc.HTTPForbidden() - elif resp.status_code == 500: + if resp.status_code == 500: msg = _( 'Remote metadata server experienced an internal server error.' ) LOG.warning(msg) explanation = str(msg) return webob.exc.HTTPInternalServerError(explanation=explanation) - elif resp.status_code in (400, 404, 409, 502, 503, 504): + if resp.status_code in (400, 404, 409, 502, 503, 504): webob_exc_cls = webob.exc.status_map.get(resp.status_code) return webob_exc_cls() - else: - raise Exception(_('Unexpected response code: %s') % - resp.status_code) + raise Exception(_('Unexpected response code: %s') % resp.status_code) class UnixDomainMetadataProxyBase(metaclass=abc.ABCMeta): diff --git a/neutron/agent/ovn/extensions/qos_hwol.py b/neutron/agent/ovn/extensions/qos_hwol.py index ac1c6c73bb7..c492bc9b66d 100644 --- a/neutron/agent/ovn/extensions/qos_hwol.py +++ b/neutron/agent/ovn/extensions/qos_hwol.py @@ -251,10 +251,9 @@ class QoSHardwareOffloadExtension(extension_manager.OVNAgentExtension): def _kbps_2_mbps(rate_kbps): if rate_kbps == 0: # Delete the BW setting. return 0 - elif 0 < rate_kbps < 1000: # Any value under 1000kbps --> 1Mbps + if 0 < rate_kbps < 1000: # Any value under 1000kbps --> 1Mbps return 1 - else: - return int(rate_kbps / 1000.0) + return int(rate_kbps / 1000.0) def _get_port_representor(self, port_id): port_name = self.get_port(port_id) diff --git a/neutron/agent/ovn/metadata/server.py b/neutron/agent/ovn/metadata/server.py index f13c694374f..7aa22ea7f22 100644 --- a/neutron/agent/ovn/metadata/server.py +++ b/neutron/agent/ovn/metadata/server.py @@ -76,7 +76,7 @@ class MetadataProxyHandler(proxy_base.MetadataProxyHandlerBase): external_ids = ports[0].external_ids return (external_ids[ovn_const.OVN_DEVID_EXT_ID_KEY], external_ids[ovn_const.OVN_PROJID_EXT_ID_KEY]) - elif num_ports == 0: + if num_ports == 0: LOG.error("No port found in network %s with IP address %s", network_id, remote_address) elif num_ports > 1: diff --git a/neutron/agent/ovsdb/api.py b/neutron/agent/ovsdb/api.py index bafc569cce8..36a41979576 100644 --- a/neutron/agent/ovsdb/api.py +++ b/neutron/agent/ovsdb/api.py @@ -21,8 +21,8 @@ def val_to_py(val): if isinstance(val, abc.Sequence) and len(val) == 2: if val[0] == "uuid": return uuid.UUID(val[1]) - elif val[0] == "set": + if val[0] == "set": return [val_to_py(x) for x in val[1]] - elif val[0] == "map": + if val[0] == "map": return {val_to_py(x): val_to_py(y) for x, y in val[1]} return val diff --git a/neutron/api/api_common.py b/neutron/api/api_common.py index caae2736783..08a037f1f21 100644 --- a/neutron/api/api_common.py +++ b/neutron/api/api_common.py @@ -165,8 +165,7 @@ def get_path_url(request): parsed.path, parsed.params, parsed.query, parsed.fragment) return urllib.parse.urlunparse(new_parsed) - else: - return request.path_url + return request.path_url def get_limit_and_marker(request): @@ -347,11 +346,10 @@ class PaginationEmulatedHelper(PaginationHelper): if self.page_reverse: # don't wrap return items[max(i - self.limit, 0):i] - else: - if self.marker: - # skip the matched marker - i += 1 - return items[i:i + self.limit] + if self.marker: + # skip the matched marker + i += 1 + return items[i:i + self.limit] def get_links(self, items): return get_pagination_links( @@ -456,20 +454,19 @@ def convert_exception_to_http_exc(e, faults, language): new_body['NeutronError']['message'] = joined_msg converted_exceptions[0].body = serializer.serialize(new_body) return converted_exceptions[0] - else: - # multiple error types so we turn it into a Conflict with the - # inner codes and bodies packed in - new_exception = exceptions.Conflict() - inner_error_strings = [] - for c in converted_exceptions: - c_body = jsonutils.loads(c.body) - err = ('HTTP {} {}: {}'.format( - c.code, c_body['NeutronError']['type'], - c_body['NeutronError']['message'])) - inner_error_strings.append(err) - new_exception.msg = "\n".join(inner_error_strings) - return convert_exception_to_http_exc( - new_exception, faults, language) + # multiple error types so we turn it into a Conflict with the + # inner codes and bodies packed in + new_exception = exceptions.Conflict() + inner_error_strings = [] + for c in converted_exceptions: + c_body = jsonutils.loads(c.body) + err = 'HTTP {} {}: {}'.format( + c.code, c_body['NeutronError']['type'], + c_body['NeutronError']['message']) + inner_error_strings.append(err) + new_exception.msg = "\n".join(inner_error_strings) + return convert_exception_to_http_exc(new_exception, faults, + language) e = translate(e, language) body = serializer.serialize( diff --git a/neutron/api/rpc/handlers/dhcp_rpc.py b/neutron/api/rpc/handlers/dhcp_rpc.py index fa2c768e869..6841f08f0cd 100644 --- a/neutron/api/rpc/handlers/dhcp_rpc.py +++ b/neutron/api/rpc/handlers/dhcp_rpc.py @@ -116,11 +116,9 @@ class DhcpRpcCallback: try: if action == 'create_port': return p_utils.create_port(plugin, context, port) - elif action == 'update_port': + if action == 'update_port': return plugin.update_port(context, port['id'], port) - else: - msg = _('Unrecognized action') - raise exceptions.Invalid(message=msg) + raise exceptions.Invalid(message=_('Unrecognized action')) except (db_exc.DBReferenceError, exceptions.NetworkNotFound, exceptions.SubnetNotFound, @@ -134,10 +132,9 @@ class DhcpRpcCallback: try: subnet_id = port['port']['fixed_ips'][0]['subnet_id'] plugin.get_subnet(context, subnet_id) + ctxt.reraise = True except exceptions.SubnetNotFound: pass - else: - ctxt.reraise = True if ctxt.reraise: net_id = port['port']['network_id'] LOG.warning("Action %(action)s for network %(net_id)s " diff --git a/neutron/api/rpc/handlers/l3_rpc.py b/neutron/api/rpc/handlers/l3_rpc.py index a7801d8420e..82c0eed75e8 100644 --- a/neutron/api/rpc/handlers/l3_rpc.py +++ b/neutron/api/rpc/handlers/l3_rpc.py @@ -143,12 +143,9 @@ class L3RpcCallback: def _routers_to_sync(self, context, router_ids, host=None): if extensions.is_extension_supported( self.l3plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): - routers = ( - self.l3plugin.list_active_sync_routers_on_active_l3_agent( - context, host, router_ids)) - else: - routers = self.l3plugin.get_sync_data(context, router_ids) - return routers + return self.l3plugin.list_active_sync_routers_on_active_l3_agent( + context, host, router_ids) + return self.l3plugin.get_sync_data(context, router_ids) def _ensure_host_set_on_ports(self, context, host, routers): for router in routers: diff --git a/neutron/api/v2/base.py b/neutron/api/v2/base.py index 6cba14cc70a..8490b6855a3 100644 --- a/neutron/api/v2/base.py +++ b/neutron/api/v2/base.py @@ -262,14 +262,13 @@ class Controller: return ret_value return _handle_action - else: - raise AttributeError() + raise AttributeError() def _get_pagination_helper(self, request): if self._allow_pagination and self._native_pagination: return api_common.PaginationNativeHelper(request, self._primary_key) - elif self._allow_pagination: + if self._allow_pagination: return api_common.PaginationEmulatedHelper(request, self._primary_key) return api_common.NoPaginationHelper(request, self._primary_key) @@ -277,7 +276,7 @@ class Controller: def _get_sorting_helper(self, request): if self._allow_sorting and self._native_sorting: return api_common.SortingNativeHelper(request, self._attr_info) - elif self._allow_sorting: + if self._allow_sorting: return api_common.SortingEmulatedHelper(request, self._attr_info) return api_common.NoSortingHelper(request, self._attr_info) @@ -530,14 +529,13 @@ class Controller: if emulated: return self._emulate_bulk_create(obj_creator, request, body, parent_id) + if self._collection in body: + # This is weird but fixing it requires changes to the + # plugin interface + kwargs.update({self._collection: body}) else: - if self._collection in body: - # This is weird but fixing it requires changes to the - # plugin interface - kwargs.update({self._collection: body}) - else: - kwargs.update({self._resource: body}) - return obj_creator(request.context, **kwargs) + kwargs.update({self._resource: body}) + return obj_creator(request.context, **kwargs) except Exception: # In case of failure the plugin will always raise an # exception. Cancel the reservation @@ -557,15 +555,12 @@ class Controller: [self._filter_attributes( obj, fields_to_strip=fields_to_strip) for obj in objs]}) - else: - if self._collection in body: - # Emulate atomic bulk behavior - objs = do_create(body, bulk=True, emulated=True) - return notify({self._collection: objs}) - else: - obj = do_create(body) - return notify({self._resource: self._view(request.context, - obj)}) + if self._collection in body: + # Emulate atomic bulk behavior + objs = do_create(body, bulk=True, emulated=True) + return notify({self._collection: objs}) + obj = do_create(body) + return notify({self._resource: self._view(request.context, obj)}) def delete(self, request, id, **kwargs): """Deletes the specified entity.""" diff --git a/neutron/api/wsgi.py b/neutron/api/wsgi.py index 5b0a59dd327..e9df70b236b 100644 --- a/neutron/api/wsgi.py +++ b/neutron/api/wsgi.py @@ -730,8 +730,7 @@ class Controller: LOG.debug("%(url)s returned with HTTP %(status)d", dict(url=req.url, status=response.status_int)) return response - else: - return result + return result def _serialize(self, data, content_type): """Serialize the given dict to the provided content_type. diff --git a/neutron/cmd/sanity/checks.py b/neutron/cmd/sanity/checks.py index 8231aeee6e0..18c8014eb03 100644 --- a/neutron/cmd/sanity/checks.py +++ b/neutron/cmd/sanity/checks.py @@ -182,8 +182,7 @@ def ofctl_arg_supported(cmd, **kwargs): LOG.exception("Unexpected exception while checking supported" " feature via command: %s", full_args) return False - else: - return True + return True def arp_responder_supported(): @@ -258,7 +257,7 @@ def dnsmasq_local_service_supported(): LOG.debug("Exception while checking dnsmasq version. " "dnsmasq: No such file or directory") return False - elif returncode == 1: + if returncode == 1: return False return True diff --git a/neutron/cmd/upgrade_checks/checks.py b/neutron/cmd/upgrade_checks/checks.py index 77428152804..925f5825cd9 100644 --- a/neutron/cmd/upgrade_checks/checks.py +++ b/neutron/cmd/upgrade_checks/checks.py @@ -235,13 +235,12 @@ class CoreChecks(base.BaseChecks): return upgradecheck.Result( upgradecheck.Code.SUCCESS, _("Number of workers already " "defined in config")) - else: - return upgradecheck.Result( - upgradecheck.Code.WARNING, - _("The default number of workers " - "has changed. Please see release notes for the new values, " - "but it is strongly encouraged for deployers to manually " - "set the values for api_workers and rpc_workers.")) + return upgradecheck.Result( + upgradecheck.Code.WARNING, + _("The default number of workers " + "has changed. Please see release notes for the new values, " + "but it is strongly encouraged for deployers to manually " + "set the values for api_workers and rpc_workers.")) @staticmethod def network_mtu_check(checker): @@ -263,10 +262,9 @@ class CoreChecks(base.BaseChecks): upgradecheck.Code.WARNING, _("The 'mtu' attribute of networks %s are not set " "This attribute can't be null now.") % networks_list) - else: - return upgradecheck.Result( - upgradecheck.Code.SUCCESS, - _("The 'mtu' attribute of all networks are set.")) + return upgradecheck.Result( + upgradecheck.Code.SUCCESS, + _("The 'mtu' attribute of all networks are set.")) @staticmethod def ovn_db_revision_check(checker): @@ -313,10 +311,8 @@ class CoreChecks(base.BaseChecks): upgradecheck.Code.WARNING, _("NIC Switch agents detected on hosts %s, please ensure the " "hosts run with a kernel version 3.13 or newer.") % hosts) - else: - return upgradecheck.Result( - upgradecheck.Code.SUCCESS, - _("No NIC Switch agents detected.")) + return upgradecheck.Result( + upgradecheck.Code.SUCCESS, _("No NIC Switch agents detected.")) @staticmethod def vlan_allocations_segid_check(checker): @@ -558,19 +554,18 @@ class CoreChecks(base.BaseChecks): return upgradecheck.Result( upgradecheck.Code.SUCCESS, _('Version of OVN supports iPXE over IPv6.')) - else: - return upgradecheck.Result( - upgradecheck.Code.WARNING, - _('Version of OVN does not support iPXE over IPv6 but ' - '``disable_ovn_dhcp_for_baremetal_ports`` is set to ' - '``False``. In case if provisioning of baremetal nodes ' - 'is required, please make sure that either ' - '``disable_ovn_dhcp_for_baremetal_ports`` option is set to ' - '``True`` and Neutron DHCP agent is available or use ' - 'OVN with patch https://github.com/ovn-org/ovn/commit/' - 'c5fd51bd154147a567097eaf61fbebc0b5b39e28 which added ' - 'support for iPXE over IPv6. It is available in ' - 'OVN >= 23.06.0.')) + return upgradecheck.Result( + upgradecheck.Code.WARNING, + _('Version of OVN does not support iPXE over IPv6 but ' + '``disable_ovn_dhcp_for_baremetal_ports`` is set to ' + '``False``. In case if provisioning of baremetal nodes ' + 'is required, please make sure that either ' + '``disable_ovn_dhcp_for_baremetal_ports`` option is set to ' + '``True`` and Neutron DHCP agent is available or use ' + 'OVN with patch https://github.com/ovn-org/ovn/commit/' + 'c5fd51bd154147a567097eaf61fbebc0b5b39e28 which added ' + 'support for iPXE over IPv6. It is available in ' + 'OVN >= 23.06.0.')) @staticmethod def ml2_ovs_igmp_flood_check(checker): diff --git a/neutron/common/cache_utils.py b/neutron/common/cache_utils.py index c03c10d3d1e..040dd4df829 100644 --- a/neutron/common/cache_utils.py +++ b/neutron/common/cache_utils.py @@ -33,8 +33,7 @@ def get_cache(conf): """Used to get cache client""" if conf.cache.enabled: return _get_cache_region(conf) - else: - return False + return False def _get_cache_region(conf): diff --git a/neutron/common/ovn/acl.py b/neutron/common/ovn/acl.py index f5ed8edab66..75ea27c5f8e 100644 --- a/neutron/common/ovn/acl.py +++ b/neutron/common/ovn/acl.py @@ -194,33 +194,29 @@ def add_sg_rule_acl_for_port_group(port_group, r, stateful, match): def _get_subnet_from_cache(plugin, admin_context, subnet_cache, subnet_id): if subnet_id in subnet_cache: return subnet_cache[subnet_id] - else: - subnet = plugin.get_subnet(admin_context, subnet_id) - if subnet: - subnet_cache[subnet_id] = subnet - return subnet + subnet = plugin.get_subnet(admin_context, subnet_id) + if subnet: + subnet_cache[subnet_id] = subnet + return subnet def _get_sg_ports_from_cache(plugin, admin_context, sg_ports_cache, sg_id): if sg_id in sg_ports_cache: return sg_ports_cache[sg_id] - else: - filters = {'security_group_id': [sg_id]} - sg_ports = plugin._get_port_security_group_bindings( - admin_context, filters) - if sg_ports: - sg_ports_cache[sg_id] = sg_ports - return sg_ports + filters = {'security_group_id': [sg_id]} + sg_ports = plugin._get_port_security_group_bindings(admin_context, filters) + if sg_ports: + sg_ports_cache[sg_id] = sg_ports + return sg_ports def _get_sg_from_cache(plugin, admin_context, sg_cache, sg_id): if sg_id in sg_cache: return sg_cache[sg_id] - else: - sg = plugin.get_security_group(admin_context, sg_id) - if sg: - sg_cache[sg_id] = sg - return sg + sg = plugin.get_security_group(admin_context, sg_id) + if sg: + sg_cache[sg_id] = sg + return sg def acl_remote_group_id(r, ip_version): diff --git a/neutron/common/ovn/utils.py b/neutron/common/ovn/utils.py index d0b23afa8be..6dd91c419f6 100644 --- a/neutron/common/ovn/utils.py +++ b/neutron/common/ovn/utils.py @@ -190,8 +190,7 @@ def ovn_context(txn_var_name='txn', idl_var_name='idl'): else: kwargs[txn_var_name] = new_txn return f(*args, **kwargs) - else: - return f(*args, **kwargs) + return f(*args, **kwargs) return wrapped return decorator @@ -561,8 +560,7 @@ def get_revision_number(resource, resource_type): constants.TYPE_ADDRESS_GROUPS, constants.TYPE_FLOATINGIPS, constants.TYPE_SUBNETS): return resource['revision_number'] - else: - raise ovn_exc.UnknownResourceType(resource_type=resource_type) + raise ovn_exc.UnknownResourceType(resource_type=resource_type) def remove_macs_from_lsp_addresses(addresses): @@ -635,7 +633,7 @@ def sort_ips_by_version(addresses): def is_lsp_router_port(neutron_port=None, lsp=None): if neutron_port: return neutron_port.get('device_owner') in const.ROUTER_PORT_OWNERS - elif lsp: + if lsp: return (lsp.external_ids.get(constants.OVN_DEVICE_OWNER_EXT_ID_KEY) in const.ROUTER_PORT_OWNERS) return False @@ -760,11 +758,11 @@ def is_gateway_chassis_invalid(chassis_name, gw_chassis, """ if chassis_name not in chassis_physnets: return True - elif physnet and physnet not in chassis_physnets.get(chassis_name): + if physnet and physnet not in chassis_physnets.get(chassis_name): return True - elif gw_chassis and chassis_name not in gw_chassis: + if gw_chassis and chassis_name not in gw_chassis: return True - elif az_hints and not set(az_hints) & set(chassis_with_azs.get( + if az_hints and not set(az_hints) & set(chassis_with_azs.get( chassis_name, [])): return True return False diff --git a/neutron/common/utils.py b/neutron/common/utils.py index bae4a44aaa3..3c7a498c82e 100644 --- a/neutron/common/utils.py +++ b/neutron/common/utils.py @@ -1021,7 +1021,7 @@ def get_sql_random_method(sql_dialect_name): if sql_dialect_name == sqlite_dialect.name: return sql_func.random # https://dev.mysql.com/doc/refman/8.0/en/mathematical-functions.html - elif sql_dialect_name == mysql_dialect.name: + if sql_dialect_name == mysql_dialect.name: return sql_func.rand diff --git a/neutron/db/_utils.py b/neutron/db/_utils.py index f067ebaabef..b782c6433a7 100644 --- a/neutron/db/_utils.py +++ b/neutron/db/_utils.py @@ -35,8 +35,7 @@ def context_if_transaction(context, transaction, writer=True): if transaction: return (db_api.CONTEXT_WRITER.using(context) if writer else db_api.CONTEXT_READER.using(context)) - else: - return _noop_context_manager() + return _noop_context_manager() def safe_creation(context, create_fn, delete_fn, create_bindings, diff --git a/neutron/db/agents_db.py b/neutron/db/agents_db.py index 02f8bb61350..5491128be8f 100644 --- a/neutron/db/agents_db.py +++ b/neutron/db/agents_db.py @@ -149,14 +149,13 @@ class AgentAvailabilityZoneMixin(az_ext.AvailabilityZonePluginBase): for k, v in self._list_availability_zones( context, filters).items() if not filter_states or v in filter_states] - else: - # NOTE(hichihara): 'tenant_id' is dummy for policy check. - # it is not visible via API. - return [{'state': v, - 'name': k[0], 'resource': k[1], - 'tenant_id': context.tenant_id} - for k, v in self._list_availability_zones( - context, filters).items()] + # NOTE(hichihara): 'tenant_id' is dummy for policy check. it is not + # visible via API. + return [{'state': v, + 'name': k[0], 'resource': k[1], + 'tenant_id': context.tenant_id} + for k, v in self._list_availability_zones( + context, filters).items()] @db_api.retry_if_session_inactive() def validate_availability_zones(self, context, resource_type, diff --git a/neutron/db/agentschedulers_db.py b/neutron/db/agentschedulers_db.py index 320eadbc75f..2de9b7bf40d 100644 --- a/neutron/db/agentschedulers_db.py +++ b/neutron/db/agentschedulers_db.py @@ -66,19 +66,18 @@ class AgentSchedulerDbMixin(agents_db.AgentDbMixin): if active is None: # filtering by activeness is disabled, all agents are eligible return True - else: - # note(rpodolyaka): original behaviour is saved here: if active - # filter is set, only agents which are 'up' - # (i.e. have a recent heartbeat timestamp) - # are eligible, even if active is False - if agent_utils.is_agent_down(agent['heartbeat_timestamp']): - LOG.warning('Agent %(agent)s is down. Type: %(type)s, host: ' - '%(host)s, heartbeat: %(heartbeat)s', - {'agent': agent['id'], 'type': agent['agent_type'], - 'host': agent['host'], - 'heartbeat': agent['heartbeat_timestamp']}) - return False - return True + # note(rpodolyaka): original behaviour is saved here: if active + # filter is set, only agents which are 'up' + # (i.e. have a recent heartbeat timestamp) + # are eligible, even if active is False + if agent_utils.is_agent_down(agent['heartbeat_timestamp']): + LOG.warning('Agent %(agent)s is down. Type: %(type)s, host: ' + '%(host)s, heartbeat: %(heartbeat)s', + {'agent': agent['id'], 'type': agent['agent_type'], + 'host': agent['host'], + 'heartbeat': agent['heartbeat_timestamp']}) + return False + return True def update_agent(self, context, id, agent): original_agent = self.get_agent(context, id) @@ -442,10 +441,9 @@ class DhcpAgentSchedulerDbMixin(dhcpagentscheduler if net_ids: return {'networks': self.get_networks(context, filters={'id': net_ids})} - else: - # Exception will be thrown if the requested agent does not exist. - self._get_agent(context, id) - return {'networks': []} + # Exception will be thrown if the requested agent does not exist. + self._get_agent(context, id) + return {'networks': []} def list_active_networks_on_active_dhcp_agent(self, context, host): try: @@ -465,8 +463,7 @@ class DhcpAgentSchedulerDbMixin(dhcpagentscheduler if net_ids: return network.Network.get_objects(context, id=net_ids, admin_state_up=[True]) - else: - return [] + return [] def list_dhcp_agents_hosting_network(self, context, network_id): dhcp_agents = self.get_dhcp_agents_hosting_networks( @@ -475,8 +472,7 @@ class DhcpAgentSchedulerDbMixin(dhcpagentscheduler if agent_ids: return { 'agents': self.get_agents(context, filters={'id': agent_ids})} - else: - return {'agents': []} + return {'agents': []} def schedule_network(self, context, created_network): if self.network_scheduler and cfg.CONF.network_auto_schedule: diff --git a/neutron/db/db_base_plugin_common.py b/neutron/db/db_base_plugin_common.py index 887271b28f8..d0c88fbe8f4 100644 --- a/neutron/db/db_base_plugin_common.py +++ b/neutron/db/db_base_plugin_common.py @@ -50,10 +50,9 @@ def convert_result_to_dict(f): if result is None: return None - elif isinstance(result, list): + if isinstance(result, list): return [r.to_dict() for r in result] - else: - return result.to_dict() + return result.to_dict() return inner @@ -74,8 +73,7 @@ def filter_fields(f): if isinstance(result, list): return [_do_filter(obj) for obj in result] - else: - return _do_filter(result) + return _do_filter(result) return inner_filter @@ -86,10 +84,9 @@ def make_result_with_fields(f): result = f(*args, **kwargs) if fields is None: return result - elif isinstance(result, list): + if isinstance(result, list): return [db_utils.resource_fields(r, fields) for r in result] - else: - return db_utils.resource_fields(result, fields) + return db_utils.resource_fields(result, fields) return inner diff --git a/neutron/db/dns_db.py b/neutron/db/dns_db.py index 7118289dd61..2dd98bbcae8 100644 --- a/neutron/db/dns_db.py +++ b/neutron/db/dns_db.py @@ -134,21 +134,20 @@ class DNSDbMixin: self._get_requested_state_for_external_dns_service_update( context, floatingip_data)) if dns_data_db: - if (dns_data_db['published_dns_name'] != current_dns_name or - dns_data_db['published_dns_domain'] != current_dns_domain): - dns_actions_data = DNSActionsData( - previous_dns_name=dns_data_db['published_dns_name'], - previous_dns_domain=dns_data_db['published_dns_domain']) - if current_dns_name and current_dns_domain: - dns_data_db['published_dns_name'] = current_dns_name - dns_data_db['published_dns_domain'] = current_dns_domain - dns_actions_data.current_dns_name = current_dns_name - dns_actions_data.current_dns_domain = current_dns_domain - else: - dns_data_db.delete() - return dns_actions_data - else: + if (dns_data_db['published_dns_name'] == current_dns_name and + dns_data_db['published_dns_domain'] == current_dns_domain): return + dns_actions_data = DNSActionsData( + previous_dns_name=dns_data_db['published_dns_name'], + previous_dns_domain=dns_data_db['published_dns_domain']) + if current_dns_name and current_dns_domain: + dns_data_db['published_dns_name'] = current_dns_name + dns_data_db['published_dns_domain'] = current_dns_domain + dns_actions_data.current_dns_name = current_dns_name + dns_actions_data.current_dns_domain = current_dns_domain + else: + dns_data_db.delete() + return dns_actions_data if current_dns_name and current_dns_domain: fip_obj.FloatingIPDNS( context, diff --git a/neutron/db/dvr_mac_db.py b/neutron/db/dvr_mac_db.py index 4f69ffd995d..4e2094a610f 100644 --- a/neutron/db/dvr_mac_db.py +++ b/neutron/db/dvr_mac_db.py @@ -204,25 +204,26 @@ class DVRDbMixin(ext_dvr.DVRMacAddressPluginBase): subnet_data = fixed_ips[0]['subnet_id'] else: subnet_data = subnet + try: subnet_info = self.plugin.get_subnet( context, subnet_data) except n_exc.SubnetNotFound: return {} + + # retrieve the gateway port on this subnet + if fixed_ips: + ip_address = fixed_ips[0]['ip_address'] else: - # retrieve the gateway port on this subnet - if fixed_ips: - ip_address = fixed_ips[0]['ip_address'] - else: - ip_address = subnet_info['gateway_ip'] + ip_address = subnet_info['gateway_ip'] - internal_gateway_ports = get_ports_query_by_subnet_and_ip( - context, subnet, [ip_address]) + internal_gateway_ports = get_ports_query_by_subnet_and_ip( + context, subnet, [ip_address]) - if not internal_gateway_ports: - LOG.error("Could not retrieve gateway port " - "for subnet %s", subnet_info) - return {} - internal_port = internal_gateway_ports[0] - subnet_info['gateway_mac'] = internal_port['mac_address'] - return subnet_info + if not internal_gateway_ports: + LOG.error("Could not retrieve gateway port for subnet %s", + subnet_info) + return {} + internal_port = internal_gateway_ports[0] + subnet_info['gateway_mac'] = internal_port['mac_address'] + return subnet_info diff --git a/neutron/db/l3_agentschedulers_db.py b/neutron/db/l3_agentschedulers_db.py index c09b90d1df1..6353e878108 100644 --- a/neutron/db/l3_agentschedulers_db.py +++ b/neutron/db/l3_agentschedulers_db.py @@ -290,10 +290,9 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase, if router_ids: return {'routers': self.get_routers(context, filters={'id': router_ids})} - else: - # Exception will be thrown if the requested agent does not exist. - self._get_agent(context, agent_id) - return {'routers': []} + # Exception will be thrown if the requested agent does not exist. + self._get_agent(context, agent_id) + return {'routers': []} def _get_active_l3_agent_routers_sync_data(self, context, host, agent, router_ids): diff --git a/neutron/db/l3_db.py b/neutron/db/l3_db.py index 1c0a2ebd171..42efeff9fa8 100644 --- a/neutron/db/l3_db.py +++ b/neutron/db/l3_db.py @@ -1179,7 +1179,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase, self._core_plugin.update_port( context, p['id'], {'port': {'fixed_ips': fixed_ips}}) return (p, [subnet]) - elif subnet_id in port_subnets: + if subnet_id in port_subnets: # only one subnet on port - delete the port self._core_plugin.delete_port(context, p['id'], l3_port_check=False) @@ -1874,11 +1874,9 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase, def _get_floatingips_by_port_id(self, context, port_id): """Helper function to retrieve the fips associated with a port_id.""" - if l3_obj.FloatingIP.objects_exist(context, fixed_port_id=port_id): - return l3_obj.FloatingIP.get_objects( - context, fixed_port_id=port_id) - else: + if not l3_obj.FloatingIP.objects_exist(context, fixed_port_id=port_id): return [] + return l3_obj.FloatingIP.get_objects(context, fixed_port_id=port_id) def _build_routers_list(self, context, routers, gw_ports): """Subclasses can override this to add extra gateway info""" diff --git a/neutron/db/l3_dvrscheduler_db.py b/neutron/db/l3_dvrscheduler_db.py index c85a2f1d0c0..8c1d05252fa 100644 --- a/neutron/db/l3_dvrscheduler_db.py +++ b/neutron/db/l3_dvrscheduler_db.py @@ -669,7 +669,7 @@ def _notify_l3_agent_port_update(resource, event, trigger, payload): _dvr_handle_unbound_allowed_addr_pair_add( l3plugin, context, new_port, address_pair) return - elif original_port_state: + if original_port_state: # Case were we deactivate the port from active state. for address_pair in allowed_address_pairs_list: _dvr_handle_unbound_allowed_addr_pair_del( diff --git a/neutron/db/l3_hamode_db.py b/neutron/db/l3_hamode_db.py index ae9ef5eae0d..439ae9b1a44 100644 --- a/neutron/db/l3_hamode_db.py +++ b/neutron/db/l3_hamode_db.py @@ -826,8 +826,7 @@ def is_ha_router(router): def is_ha_router_port(context, device_owner, router_id): if device_owner == constants.DEVICE_OWNER_HA_REPLICATED_INT: return True - elif device_owner == constants.DEVICE_OWNER_ROUTER_SNAT: + if device_owner == constants.DEVICE_OWNER_ROUTER_SNAT: return l3_obj.RouterExtraAttributes.objects_exist( context, router_id=router_id, ha=True) - else: - return False + return False diff --git a/neutron/db/local_ip_db.py b/neutron/db/local_ip_db.py index 3fc3d08b7dd..061c53a6b13 100644 --- a/neutron/db/local_ip_db.py +++ b/neutron/db/local_ip_db.py @@ -99,14 +99,13 @@ class LocalIPDbMixin(lip_ext.LocalIPPluginBase): raise lip_exc.LocalIPRequestedIPNotFound( port_id=port.id, ip=requested_ip) return fixed_ip - elif validators.is_attr_set(requested_ip): + if validators.is_attr_set(requested_ip): for fixed_ip in fixed_ips: if str(fixed_ip.ip_address) == requested_ip: return requested_ip raise lip_exc.LocalIPRequestedIPNotFound( port_id=port.id, ip=requested_ip) - else: - raise lip_exc.LocalIPNoRequestedIP(port_id=port.id) + raise lip_exc.LocalIPNoRequestedIP(port_id=port.id) @db_api.retry_if_session_inactive() def create_local_ip(self, context, local_ip): diff --git a/neutron/db/metering/metering_rpc.py b/neutron/db/metering/metering_rpc.py index 8523a8fc8a9..af9b783a93d 100644 --- a/neutron/db/metering/metering_rpc.py +++ b/neutron/db/metering/metering_rpc.py @@ -40,20 +40,18 @@ class MeteringRpcCallbacks: if not extensions.is_extension_supported( l3_plugin, consts.L3_AGENT_SCHEDULER_EXT_ALIAS) or not host: return metering_data - else: - agents = l3_plugin.get_l3_agents(context, filters={'host': [host]}) - if not agents: - LOG.error('Unable to find agent on host %s.', host) - return + agents = l3_plugin.get_l3_agents(context, filters={'host': [host]}) + if not agents: + LOG.error('Unable to find agent on host %s.', host) + return - router_ids = [] - for agent in agents: - routers = l3_plugin.list_routers_on_l3_agent(context, agent.id) - router_ids += [router['id'] for router in routers['routers']] - if not router_ids: - return - else: - return [ - router for router in metering_data - if router['id'] in router_ids - ] + router_ids = [] + for agent in agents: + routers = l3_plugin.list_routers_on_l3_agent(context, agent.id) + router_ids += [router['id'] for router in routers['routers']] + if not router_ids: + return + return [ + router for router in metering_data + if router['id'] in router_ids + ] diff --git a/neutron/db/migration/alembic_migrations/env.py b/neutron/db/migration/alembic_migrations/env.py index 29a04a385a5..16cb95329ac 100644 --- a/neutron/db/migration/alembic_migrations/env.py +++ b/neutron/db/migration/alembic_migrations/env.py @@ -56,12 +56,11 @@ def set_mysql_engine(): def include_object(object_, name, type_, reflected, compare_to): if type_ == 'table' and name in external.TABLES: return False - elif type_ == 'index' and reflected and name.startswith("idx_autoinc_"): + if type_ == 'index' and reflected and name.startswith("idx_autoinc_"): # skip indexes created by SQLAlchemy autoincrement=True # on composite PK integer columns return False - else: - return True + return True def run_migrations_offline(): diff --git a/neutron/db/migration/alembic_migrations/versions/newton/contract/7d9d8eeec6ad_rename_tenant_to_project.py b/neutron/db/migration/alembic_migrations/versions/newton/contract/7d9d8eeec6ad_rename_tenant_to_project.py index a2020e6883b..609d2ebaf0a 100644 --- a/neutron/db/migration/alembic_migrations/versions/newton/contract/7d9d8eeec6ad_rename_tenant_to_project.py +++ b/neutron/db/migration/alembic_migrations/versions/newton/contract/7d9d8eeec6ad_rename_tenant_to_project.py @@ -40,8 +40,7 @@ def get_inspector(): if _INSPECTOR: return _INSPECTOR - else: - _INSPECTOR = sa.inspect(op.get_bind()) + _INSPECTOR = sa.inspect(op.get_bind()) return _INSPECTOR diff --git a/neutron/db/migration/alembic_migrations/versions/yoga/expand/1ffef8d6f371_rbac_target_tenant_to_target_project.py b/neutron/db/migration/alembic_migrations/versions/yoga/expand/1ffef8d6f371_rbac_target_tenant_to_target_project.py index 37e9a8d955c..c0c58a0aa7f 100644 --- a/neutron/db/migration/alembic_migrations/versions/yoga/expand/1ffef8d6f371_rbac_target_tenant_to_target_project.py +++ b/neutron/db/migration/alembic_migrations/versions/yoga/expand/1ffef8d6f371_rbac_target_tenant_to_target_project.py @@ -45,8 +45,8 @@ def get_inspector(): global _INSPECTOR if _INSPECTOR: return _INSPECTOR - else: - _INSPECTOR = sa.inspect(op.get_bind()) + + _INSPECTOR = sa.inspect(op.get_bind()) return _INSPECTOR diff --git a/neutron/db/migration/alembic_migrations/versions/yoga/expand/ba859d649675_add_indexes_to_rbacs.py b/neutron/db/migration/alembic_migrations/versions/yoga/expand/ba859d649675_add_indexes_to_rbacs.py index cb38491c805..1572b552a17 100644 --- a/neutron/db/migration/alembic_migrations/versions/yoga/expand/ba859d649675_add_indexes_to_rbacs.py +++ b/neutron/db/migration/alembic_migrations/versions/yoga/expand/ba859d649675_add_indexes_to_rbacs.py @@ -39,8 +39,8 @@ def get_inspector(): global _INSPECTOR if _INSPECTOR: return _INSPECTOR - else: - _INSPECTOR = sa.inspect(op.get_bind()) + + _INSPECTOR = sa.inspect(op.get_bind()) return _INSPECTOR diff --git a/neutron/db/migration/autogen.py b/neutron/db/migration/autogen.py index f0e91ea3648..62acb955519 100644 --- a/neutron/db/migration/autogen.py +++ b/neutron/db/migration/autogen.py @@ -76,8 +76,6 @@ def _migration_script_ops(context, directive, phase): def _expands(context, directive, phase): if phase == 'expand': return directive - else: - return None @_ec_dispatcher.dispatch_for(ops.DropConstraintOp) @@ -87,8 +85,6 @@ def _expands(context, directive, phase): def _contracts(context, directive, phase): if phase == 'contract': return directive - else: - return None @_ec_dispatcher.dispatch_for(ops.AlterColumnOp) @@ -97,12 +93,11 @@ def _alter_column(context, directive, phase): if is_expand and directive.modify_nullable is True: return directive - elif not is_expand and directive.modify_nullable is False: + if not is_expand and directive.modify_nullable is False: return directive - else: - raise NotImplementedError( - _("Don't know if operation is an expand or " - "contract at the moment: %s") % directive) + raise NotImplementedError( + _("Don't know if operation is an expand or " + "contract at the moment: %s") % directive) @_ec_dispatcher.dispatch_for(ops.ModifyTableOps) diff --git a/neutron/db/port_numa_affinity_policy_db.py b/neutron/db/port_numa_affinity_policy_db.py index 5c0b1f04701..8ec5e1d67bc 100644 --- a/neutron/db/port_numa_affinity_policy_db.py +++ b/neutron/db/port_numa_affinity_policy_db.py @@ -53,9 +53,8 @@ class PortNumaAffinityPolicyDbMixin: obj.update_fields( {pnap.NUMA_AFFINITY_POLICY: data[pnap.NUMA_AFFINITY_POLICY]}) obj.update() - else: - if obj: - obj.delete() + elif obj: + obj.delete() result[pnap.NUMA_AFFINITY_POLICY] = data[pnap.NUMA_AFFINITY_POLICY] diff --git a/neutron/db/securitygroups_db.py b/neutron/db/securitygroups_db.py index 665c2542827..e9ccf73af4d 100644 --- a/neutron/db/securitygroups_db.py +++ b/neutron/db/securitygroups_db.py @@ -101,13 +101,13 @@ class SecurityGroupDbMixin( tenant_id = s['tenant_id'] stateful = s.get('stateful', True) - if not default_sg: - self._ensure_default_security_group(context, tenant_id) - else: + if default_sg: existing_def_sg_id = self._get_default_sg_id(context, tenant_id) if existing_def_sg_id is not None: # default already exists, return it return self.get_security_group(context, existing_def_sg_id) + else: + self._ensure_default_security_group(context, tenant_id) with db_api.CONTEXT_WRITER.using(context): if default_sg: @@ -727,7 +727,7 @@ class SecurityGroupDbMixin( protocol = str(constants.PROTO_NUM_IPV6_ICMP) if protocol in constants.IP_PROTOCOL_MAP: return [protocol, str(constants.IP_PROTOCOL_MAP.get(protocol))] - elif protocol in constants.IP_PROTOCOL_NUM_TO_NAME_MAP: + if protocol in constants.IP_PROTOCOL_NUM_TO_NAME_MAP: return [constants.IP_PROTOCOL_NUM_TO_NAME_MAP.get(protocol), protocol] return [protocol, protocol] @@ -754,13 +754,11 @@ class SecurityGroupDbMixin( ip_proto in const.SG_PORT_PROTO_NAMES): if rule['port_range_min'] == 0 or rule['port_range_max'] == 0: raise ext_sg.SecurityGroupInvalidPortValue(port=0) - if (rule['port_range_min'] is not None and + if not (rule['port_range_min'] is not None and rule['port_range_max'] is not None and rule['port_range_min'] <= rule['port_range_max']): - # When min/max are the same it is just a single port - pass - else: raise ext_sg.SecurityGroupInvalidPortRange() + # When min/max are the same it is just a single port elif ip_proto in [constants.PROTO_NUM_ICMP, constants.PROTO_NUM_IPV6_ICMP]: for attr, field in [('port_range_min', 'type'), diff --git a/neutron/db/segments_db.py b/neutron/db/segments_db.py index 55ca52d6d8f..427518db8be 100644 --- a/neutron/db/segments_db.py +++ b/neutron/db/segments_db.py @@ -124,14 +124,13 @@ def get_dynamic_segment(context, network_id, physical_network=None, if objs: return _make_segment_dict(objs[0]) - else: - LOG.debug("No dynamic segment found for " - "Network:%(network_id)s, " - "Physical network:%(physnet)s, " - "segmentation_id:%(segmentation_id)s", - {'network_id': network_id, - 'physnet': physical_network, - 'segmentation_id': segmentation_id}) + LOG.debug("No dynamic segment found for " + "Network:%(network_id)s, " + "Physical network:%(physnet)s, " + "segmentation_id:%(segmentation_id)s", + {'network_id': network_id, + 'physnet': physical_network, + 'segmentation_id': segmentation_id}) def delete_network_segment(context, segment_id): @@ -185,10 +184,9 @@ def min_max_actual_segments_in_range(context, network_type, physical_network, if segment_objs: return (segment_objs[0].segmentation_id, segment_objs[-1].segmentation_id) - else: - LOG.debug("No existing segment found for " - "Network type:%(network_type)s, " - "Physical network:%(physical_network)s", - {'network_type': network_type, - 'physical_network': physical_network}) - return None, None + LOG.debug( + "No existing segment found for Network type:%(network_type)s, " + "Physical network:%(physical_network)s", + {'network_type': network_type, + 'physical_network': physical_network}) + return None, None diff --git a/neutron/extensions/qos_bw_limit_direction.py b/neutron/extensions/qos_bw_limit_direction.py index 99931f7c6be..d4d6449696b 100644 --- a/neutron/extensions/qos_bw_limit_direction.py +++ b/neutron/extensions/qos_bw_limit_direction.py @@ -76,5 +76,4 @@ class Qos_bw_limit_direction(api_extensions.ExtensionDescriptor): def get_extended_resources(self, version): if version == "2.0": return SUB_RESOURCE_ATTRIBUTE_MAP - else: - return {} + return {} diff --git a/neutron/extensions/quota_check_limit_default.py b/neutron/extensions/quota_check_limit_default.py index b603c60568b..1d95b4bf850 100644 --- a/neutron/extensions/quota_check_limit_default.py +++ b/neutron/extensions/quota_check_limit_default.py @@ -62,5 +62,4 @@ class Quota_check_limit_default(extensions.APIExtensionDescriptor): def get_extended_resources(cls, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP - else: - return {} + return {} diff --git a/neutron/extensions/quotasv2.py b/neutron/extensions/quotasv2.py index 4777cffec02..ab9865f2542 100644 --- a/neutron/extensions/quotasv2.py +++ b/neutron/extensions/quotasv2.py @@ -213,5 +213,4 @@ class Quotasv2(api_extensions.ExtensionDescriptor): def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 - else: - return {} + return {} diff --git a/neutron/extensions/securitygroup.py b/neutron/extensions/securitygroup.py index 45b09180629..5d2bd5a1300 100644 --- a/neutron/extensions/securitygroup.py +++ b/neutron/extensions/securitygroup.py @@ -183,8 +183,7 @@ def convert_validate_port_value(port): if netutils.is_valid_port(port): return int(port) - else: - raise SecurityGroupInvalidPortValue(port=port) + raise SecurityGroupInvalidPortValue(port=port) def convert_ip_prefix_to_cidr(ip_prefix): @@ -349,8 +348,7 @@ class Securitygroup(api_extensions.ExtensionDescriptor): if version == "2.0": return dict(list(EXTENDED_ATTRIBUTES_2_0.items()) + list(RESOURCE_ATTRIBUTE_MAP.items())) - else: - return {} + return {} def get_required_extensions(self): return [stdattr_ext.Standardattrdescription.get_alias()] diff --git a/neutron/ipam/requests.py b/neutron/ipam/requests.py index e4d6923dd4a..d98231ff2c3 100644 --- a/neutron/ipam/requests.py +++ b/neutron/ipam/requests.py @@ -304,15 +304,14 @@ class AddressRequestFactory: """ if ip_dict.get('ip_address'): return SpecificAddressRequest(ip_dict['ip_address']) - elif ip_dict.get('eui64_address'): + if ip_dict.get('eui64_address'): return AutomaticAddressRequest(prefix=ip_dict['subnet_cidr'], mac=ip_dict['mac']) - elif (port['device_owner'] == constants.DEVICE_OWNER_DHCP or - port['device_owner'] == constants.DEVICE_OWNER_DISTRIBUTED): + if (port['device_owner'] == constants.DEVICE_OWNER_DHCP or + port['device_owner'] == constants.DEVICE_OWNER_DISTRIBUTED): # preserve previous behavior of DHCP ports choosing start of pool return PreferNextAddressRequest() - else: - return AnyAddressRequest() + return AnyAddressRequest() class SubnetRequestFactory: @@ -337,21 +336,18 @@ class SubnetRequestFactory: subnet_id, common_utils.ip_version_from_int(subnetpool['ip_version']), prefixlen) - else: - alloc_pools = subnet.get('allocation_pools') - alloc_pools = (alloc_pools if validators.is_attr_set(alloc_pools) - else None) - if not cidr and gateway_ip: - prefixlen = subnet['prefixlen'] - if not validators.is_attr_set(prefixlen): - prefixlen = int(subnetpool['default_prefixlen']) - gw_ip_net = netaddr.IPNetwork('%s/%s' % - (gateway_ip, prefixlen)) - cidr = gw_ip_net.cidr + alloc_pools = subnet.get('allocation_pools') + alloc_pools = ( + alloc_pools if validators.is_attr_set(alloc_pools) else None) + if not cidr and gateway_ip: + prefixlen = subnet['prefixlen'] + if not validators.is_attr_set(prefixlen): + prefixlen = int(subnetpool['default_prefixlen']) + gw_ip_net = netaddr.IPNetwork('%s/%s' % (gateway_ip, prefixlen)) + cidr = gw_ip_net.cidr - return SpecificSubnetRequest( - subnet['tenant_id'], - subnet_id, - cidr, - gateway_ip=gateway_ip, - allocation_pools=alloc_pools) + return SpecificSubnetRequest(subnet['tenant_id'], + subnet_id, + cidr, + gateway_ip=gateway_ip, + allocation_pools=alloc_pools) diff --git a/neutron/ipam/subnet_alloc.py b/neutron/ipam/subnet_alloc.py index 61a0a8ab38b..faeaa5345fe 100644 --- a/neutron/ipam/subnet_alloc.py +++ b/neutron/ipam/subnet_alloc.py @@ -175,11 +175,10 @@ class SubnetAllocator(driver.Pool): if isinstance(request, ipam_req.AnySubnetRequest): return self._allocate_any_subnet(request) - elif isinstance(request, ipam_req.SpecificSubnetRequest): + if isinstance(request, ipam_req.SpecificSubnetRequest): return self._allocate_specific_subnet(request) - else: - msg = _("Unsupported request type") - raise exceptions.SubnetAllocationError(reason=msg) + msg = _("Unsupported request type") + raise exceptions.SubnetAllocationError(reason=msg) def get_subnet(self, subnet_id): raise NotImplementedError() diff --git a/neutron/ipam/utils.py b/neutron/ipam/utils.py index f4b4a883c03..fdde89b4dc6 100644 --- a/neutron/ipam/utils.py +++ b/neutron/ipam/utils.py @@ -23,15 +23,14 @@ def check_subnet_ip(cidr, ip_address, port_owner=''): net = netaddr.IPNetwork(cidr) # Check that the IP is valid on subnet. In IPv4 this cannot be the # network or the broadcast address - if net.version == constants.IP_VERSION_6: - # NOTE(njohnston): In some cases the code cannot know the owner of the - # port. In these cases port_owner should an empty string, and we pass - # it through here. - return ((port_owner in (constants.ROUTER_PORT_OWNERS + ('', )) or - ip != net.network) and - ip in net) - else: + if net.version == constants.IP_VERSION_4: return ip != net.network and ip != net.broadcast and ip in net + # NOTE(njohnston): In some cases the code cannot know the owner of the + # port. In these cases port_owner should an empty string, and we pass it + # through here. + return ( + (port_owner in (constants.ROUTER_PORT_OWNERS + ('', )) or + ip != net.network) and ip in net) def check_gateway_invalid_in_subnet(cidr, gateway): diff --git a/neutron/manager.py b/neutron/manager.py index 7d6d052276a..aea913c5972 100644 --- a/neutron/manager.py +++ b/neutron/manager.py @@ -181,8 +181,7 @@ class NeutronManager(metaclass=profiler.TracedMeta): core_plugin = directory.get_plugin() if core_plugin.has_native_datastore(): return constants.DEFAULT_SERVICE_PLUGINS.keys() - else: - return [] + return [] def _load_service_plugins(self): """Loads service plugins. diff --git a/neutron/notifiers/nova.py b/neutron/notifiers/nova.py index 5a8d85fc586..27dedc2d34d 100644 --- a/neutron/notifiers/nova.py +++ b/neutron/notifiers/nova.py @@ -180,8 +180,7 @@ class Notifier: if port and self._is_compute_port(port): if action == 'delete_port': return self._get_port_delete_event(port) - else: - return self._get_network_changed_event(port) + return self._get_network_changed_event(port) def _can_notify(self, port): if getattr(_notifier_store, 'enable', None) is None: diff --git a/neutron/objects/base.py b/neutron/objects/base.py index 7d6b2d3038c..1ea19d416fc 100644 --- a/neutron/objects/base.py +++ b/neutron/objects/base.py @@ -733,13 +733,11 @@ class NeutronDbObject(NeutronObject, metaclass=DeclarativeObject): if cls.has_standard_attributes(): return super().update_object( context, values, validate_filters=False, **kwargs) - else: - with cls.db_context_writer(context): - db_obj = obj_db_api.update_object( - cls, context, - cls.modify_fields_to_db(values), - **cls.modify_fields_to_db(kwargs)) - return cls._load_object(context, db_obj) + with cls.db_context_writer(context): + db_obj = obj_db_api.update_object( + cls, context, cls.modify_fields_to_db(values), + **cls.modify_fields_to_db(kwargs)) + return cls._load_object(context, db_obj) @classmethod def update_objects(cls, context, values, validate_filters=True, **kwargs): diff --git a/neutron/objects/ports.py b/neutron/objects/ports.py index 34f6d66a222..9274749c6ba 100644 --- a/neutron/objects/ports.py +++ b/neutron/objects/ports.py @@ -281,8 +281,7 @@ class IPAllocation(base.NeutronDbObject): if first: return alloc_qry.first() - else: - return alloc_qry.all() + return alloc_qry.all() @base.NeutronObjectRegistry.register @@ -785,13 +784,12 @@ class Port(base.NeutronDbObject): if not ports: return - elif not pci_slot: + if not pci_slot: return ports.pop() - else: - for port in ports: - for _binding in port.bindings: - if _binding.get('profile', {}).get('pci_slot') == pci_slot: - return port + for port in ports: + for _binding in port.bindings: + if _binding.get('profile', {}).get('pci_slot') == pci_slot: + return port @classmethod @db_api.CONTEXT_READER diff --git a/neutron/objects/qos/policy.py b/neutron/objects/qos/policy.py index f62b2efdc59..30d79e756f4 100644 --- a/neutron/objects/qos/policy.py +++ b/neutron/objects/qos/policy.py @@ -97,7 +97,7 @@ class QosPolicy(rbac_db.NeutronRbacObject): def obj_load_attr(self, attrname): if attrname == 'rules': return self._reload_rules() - elif attrname == 'is_default': + if attrname == 'is_default': return self._reload_is_default() return super().obj_load_attr(attrname) diff --git a/neutron/pecan_wsgi/controllers/utils.py b/neutron/pecan_wsgi/controllers/utils.py index 0ecf5db9912..28cf16a2bf6 100644 --- a/neutron/pecan_wsgi/controllers/utils.py +++ b/neutron/pecan_wsgi/controllers/utils.py @@ -307,12 +307,11 @@ class ShimItemController(NeutronPecanController): result = method(shim_request, **kwargs) self._set_response_code(result, self.item) return result - elif not self.controller_show: - pecan.abort(405) - else: + if self.controller_show: result = self.controller_show(shim_request, self.item, **kwargs) self._set_response_code(result, 'show') return result + pecan.abort(405) @when_delete(index) def delete(self): diff --git a/neutron/pecan_wsgi/hooks/policy_enforcement.py b/neutron/pecan_wsgi/hooks/policy_enforcement.py index 46db98c36d9..c8477b7fc62 100644 --- a/neutron/pecan_wsgi/hooks/policy_enforcement.py +++ b/neutron/pecan_wsgi/hooks/policy_enforcement.py @@ -62,11 +62,9 @@ def fetch_resource(method, neutron_context, controller, if parent_id: getter_args.append(parent_id) return getter(*getter_args, fields=field_list) - else: - # Some legit resources, like quota, do not have a plugin yet. - # Retrieving the original object is nevertheless important - # for policy checks. - return _custom_getter(resource, resource_id) + # Some legit resources, like quota, do not have a plugin yet. Retrieving + # the original object is nevertheless important for policy checks. + return _custom_getter(resource, resource_id) class PolicyHook(hooks.PecanHook): diff --git a/neutron/plugins/ml2/db.py b/neutron/plugins/ml2/db.py index 0a16ed0cc81..2a867a2f1d8 100644 --- a/neutron/plugins/ml2/db.py +++ b/neutron/plugins/ml2/db.py @@ -240,7 +240,7 @@ def generate_distributed_port_status(context, port_id): for bind in query.filter(models.DistributedPortBinding.port_id == port_id): if bind.status == n_const.PORT_STATUS_ACTIVE: return bind.status - elif bind.status == n_const.PORT_STATUS_DOWN: + if bind.status == n_const.PORT_STATUS_DOWN: final_status = bind.status return final_status diff --git a/neutron/plugins/ml2/driver_context.py b/neutron/plugins/ml2/driver_context.py index d5115596531..b249071d907 100644 --- a/neutron/plugins/ml2/driver_context.py +++ b/neutron/plugins/ml2/driver_context.py @@ -271,9 +271,8 @@ class PortContext(MechanismDriverContext, api.PortContext): # resolving bug 1367391? if self._port['device_owner'] == constants.DEVICE_OWNER_DVR_INTERFACE: return self._original_port and self._binding.host - else: - return (self._original_port and - self._original_port.get(portbindings.HOST_ID)) + return (self._original_port and + self._original_port.get(portbindings.HOST_ID)) @property def vif_type(self): diff --git a/neutron/plugins/ml2/drivers/helpers.py b/neutron/plugins/ml2/drivers/helpers.py index cb789ac2d1a..276665f8e1b 100644 --- a/neutron/plugins/ml2/drivers/helpers.py +++ b/neutron/plugins/ml2/drivers/helpers.py @@ -81,28 +81,25 @@ class SegmentTypeDriver(BaseTypeDriver): if alloc.allocated: # Segment already allocated return - else: - # Segment not allocated - LOG.debug("%(type)s segment %(segment)s allocate " - "started ", - {"type": network_type, - "segment": raw_segment}) - count = (context.session.query(self.model). - filter_by(allocated=False, **raw_segment). - update({"allocated": True})) - if count: - LOG.debug("%(type)s segment %(segment)s allocate " - "done ", - {"type": network_type, - "segment": raw_segment}) - return alloc + # Segment not allocated + LOG.debug("%(type)s segment %(segment)s allocate started ", + {"type": network_type, + "segment": raw_segment}) + count = (context.session.query(self.model). + filter_by(allocated=False, **raw_segment). + update({"allocated": True})) + if count: + LOG.debug( + "%(type)s segment %(segment)s allocate done ", + {"type": network_type, "segment": raw_segment}) + return alloc - # Segment allocated or deleted since select - LOG.debug("%(type)s segment %(segment)s allocate " - "failed: segment has been allocated or " - "deleted", - {"type": network_type, - "segment": raw_segment}) + # Segment allocated or deleted since select + LOG.debug("%(type)s segment %(segment)s allocate " + "failed: segment has been allocated or " + "deleted", + {"type": network_type, + "segment": raw_segment}) # Segment to create or already allocated LOG.debug("%(type)s segment %(segment)s create started", diff --git a/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py b/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py index 65fd74143cb..5bb565ce4b9 100644 --- a/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py +++ b/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py @@ -210,9 +210,8 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase): def get_vxlan_device_name(segmentation_id): if 0 <= int(segmentation_id) <= constants.MAX_VXLAN_VNI: return VXLAN_INTERFACE_PREFIX + str(segmentation_id) - else: - LOG.warning("Invalid Segmentation ID: %s, will lead to " - "incorrect vxlan device name", segmentation_id) + LOG.warning("Invalid Segmentation ID: %s, will lead to " + "incorrect vxlan device name", segmentation_id) @staticmethod def _match_multicast_range(segmentation_id): @@ -249,10 +248,9 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase): interface = self.ensure_vlan(physical_interface, vlan_id) if phy_bridge_name: return self.ensure_bridge(phy_bridge_name) - else: - bridge_name = self.get_bridge_name(network_id) - if self.ensure_bridge(bridge_name, interface): - return interface + bridge_name = self.get_bridge_name(network_id) + if self.ensure_bridge(bridge_name, interface): + return interface def ensure_vxlan_bridge(self, network_id, segmentation_id, mtu): """Create a vxlan and bridge unless they already exist.""" @@ -281,10 +279,9 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase): """Create a non-vlan bridge unless it already exists.""" if phy_bridge_name: return self.ensure_bridge(phy_bridge_name) - else: - bridge_name = self.get_bridge_name(network_id) - if self.ensure_bridge(bridge_name, physical_interface): - return physical_interface + bridge_name = self.get_bridge_name(network_id) + if self.ensure_bridge(bridge_name, physical_interface): + return physical_interface def ensure_local_bridge(self, network_id, phy_bridge_name): """Create a local bridge unless it already exists.""" @@ -494,14 +491,13 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase): if network_type == constants.TYPE_FLAT: return self.ensure_flat_bridge(network_id, physical_bridge, physical_interface) - elif network_type == constants.TYPE_VLAN: + if network_type == constants.TYPE_VLAN: return self.ensure_vlan_bridge(network_id, physical_bridge, physical_interface, segmentation_id) - else: - LOG.error("Unknown network_type %(network_type)s for network " - "%(network_id)s.", {network_type: network_type, - network_id: network_id}) + LOG.error("Unknown network_type %(network_type)s for network " + "%(network_id)s.", {network_type: network_type, + network_id: network_id}) def add_tap_interface(self, network_id, network_type, physical_network, segmentation_id, tap_device_name, device_owner, mtu): @@ -642,23 +638,21 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase): {'interface_name': interface_name, 'bridge_name': bridge_name}) return True - else: - if not bridge_device.owns_interface(interface_name): - LOG.debug("Cannot remove %(interface_name)s from " - "%(bridge_name)s. It is not on the bridge.", - {'interface_name': interface_name, - 'bridge_name': bridge_name}) - return False - msg = _("Error deleting %(interface_name)s from bridge " - "%(bridge_name)s") % {'interface_name': interface_name, - 'bridge_name': bridge_name} - raise RuntimeError(msg) - else: - LOG.debug("Cannot remove device %(interface_name)s bridge " - "%(bridge_name)s does not exist", - {'interface_name': interface_name, - 'bridge_name': bridge_name}) - return False + if not bridge_device.owns_interface(interface_name): + LOG.debug("Cannot remove %(interface_name)s from " + "%(bridge_name)s. It is not on the bridge.", + {'interface_name': interface_name, + 'bridge_name': bridge_name}) + return False + msg = _("Error deleting %(interface_name)s from bridge " + "%(bridge_name)s") % {'interface_name': interface_name, + 'bridge_name': bridge_name} + raise RuntimeError(msg) + LOG.debug("Cannot remove device %(interface_name)s bridge " + "%(bridge_name)s does not exist", + {'interface_name': interface_name, + 'bridge_name': bridge_name}) + return False def delete_interface(self, interface): device = self.ip.device(interface) diff --git a/neutron/plugins/ml2/drivers/macvtap/mech_driver/mech_macvtap.py b/neutron/plugins/ml2/drivers/macvtap/mech_driver/mech_macvtap.py index 591cc5f24cd..88c160f9ff5 100644 --- a/neutron/plugins/ml2/drivers/macvtap/mech_driver/mech_macvtap.py +++ b/neutron/plugins/ml2/drivers/macvtap/mech_driver/mech_macvtap.py @@ -76,8 +76,7 @@ class MacvtapMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): if port_profile and port_profile.get('migrating_to', None): LOG.debug("Live migration with profile %s detected.", port_profile) return True - else: - return False + return False def try_to_bind_segment_for_agent(self, context, segment, agent): if self.check_segment_for_agent(segment, agent): diff --git a/neutron/plugins/ml2/drivers/mech_agent.py b/neutron/plugins/ml2/drivers/mech_agent.py index faeec8e284f..7c950c15ffb 100644 --- a/neutron/plugins/ml2/drivers/mech_agent.py +++ b/neutron/plugins/ml2/drivers/mech_agent.py @@ -331,8 +331,7 @@ class SimpleAgentMechanismDriverBase(AgentMechanismDriverBase, self.get_vif_type(context, agent, segment), self.get_vif_details(context, agent, segment)) return True - else: - return False + return False def get_vif_details(self, context, agent, segment): return self.vif_details diff --git a/neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py b/neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py index 3542899b6a8..d4a86724e33 100644 --- a/neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py +++ b/neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py @@ -471,10 +471,9 @@ class ESwitchManager: LOG.info("Device %s has 0 VFs configured. Skipping " "for now to let the device initialize", dev_name) return - else: - # looks like device indeed has 0 VFs configured - # it is probably used just as direct-physical - LOG.info("Device %s has 0 VFs configured", dev_name) + # looks like device indeed has 0 VFs configured it is probably used + # just as direct-physical + LOG.info("Device %s has 0 VFs configured", dev_name) numvfs_cur = len(embedded_switch.scanned_pci_list) if numvfs >= 0 and numvfs > numvfs_cur: diff --git a/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py b/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py index 399b1375055..ae6bd2db8e3 100644 --- a/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py +++ b/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py @@ -110,9 +110,9 @@ class SriovNicSwitchMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): """ if 'device_mappings' in agent['configurations']: return agent['configurations']['device_mappings'] - else: - raise ValueError(_('Cannot standardize device mappings of agent ' - 'type: %s'), agent['agent_type']) + raise ValueError( + _('Cannot standardize device mappings of agent type: %s'), + agent['agent_type']) def bind_port(self, context): LOG.debug("Attempting to bind port %(port)s on " diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/qos_driver.py b/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/qos_driver.py index bc5ebca1776..c22a952b1f0 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/qos_driver.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/qos_driver.py @@ -433,7 +433,7 @@ class QosOVSAgentDriver(qos.QosLinuxAgentDriver, 'vif_port was not found. It seems that port is already ' 'deleted', port.get('port_id')) return - elif not port.get('physical_network'): + if not port.get('physical_network'): LOG.debug('update_minimum_bandwidth was received for port %s but ' 'has no physical network associated', port.get('port_id')) diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_int.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_int.py index 550c5a1c05e..12cbe1f7ca4 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_int.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_int.py @@ -225,8 +225,7 @@ class OVSIntegrationBridge(ovs_bridge.OVSAgentBridge, def _dvr_dst_mac_table_id(network_type): if network_type in constants.DVR_PHYSICAL_NETWORK_TYPES: return constants.ARP_DVR_MAC_TO_DST_MAC_PHYSICAL - else: - return constants.ARP_DVR_MAC_TO_DST_MAC + return constants.ARP_DVR_MAC_TO_DST_MAC def install_dvr_dst_mac_for_arp(self, network_type, vlan_tag, gateway_mac, dvr_mac, rtr_port): @@ -262,8 +261,7 @@ class OVSIntegrationBridge(ovs_bridge.OVSAgentBridge, def _dvr_to_src_mac_table_id(network_type): if network_type in constants.DVR_PHYSICAL_NETWORK_TYPES: return constants.DVR_TO_SRC_MAC_PHYSICAL - else: - return constants.DVR_TO_SRC_MAC + return constants.DVR_TO_SRC_MAC def install_dvr_to_src_mac(self, network_type, vlan_tag, gateway_mac, dst_mac, dst_port): diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py index ddfb304d17b..a6b6dc3bdba 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py @@ -1412,14 +1412,13 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, except vlanmanager.VifIdNotFound: LOG.info('net_uuid %s not managed by VLAN manager', net_uuid) - if net_uuid: - # TODO(sahid); This needs to be fixed. It supposes a segment - # per network per host. Basically this code is to avoid - # changing logic which is not the aim of this commit. - segs = self.vlan_manager.get_segments(net_uuid) - lvm = self.vlan_manager.get(net_uuid, list(segs.keys())[0]) - else: + if not net_uuid: return None, None, None + # TODO(sahid); This needs to be fixed. It supposes a segment per + # network per host. Basically this code is to avoid changing logic + # which is not the aim of this commit. + segs = self.vlan_manager.get_segments(net_uuid) + lvm = self.vlan_manager.get(net_uuid, list(segs.keys())[0]) if vif_id in lvm.vif_ports: vif_port = lvm.vif_ports[vif_id] @@ -2443,11 +2442,10 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, # We cannot change this from 8, since it could break # backwards-compatibility return '%08x' % addr - else: - # Create 32-bit Base32 encoded hash - blake2b = hashlib.blake2b(ip_address.encode(), digest_size=20) - iphash = base64.b32encode(blake2b.digest()) - return iphash[:hashlen].decode().lower() + # Create 32-bit Base32 encoded hash + blake2b = hashlib.blake2b(ip_address.encode(), digest_size=20) + iphash = base64.b32encode(blake2b.digest()) + return iphash[:hashlen].decode().lower() except Exception: LOG.warning("Invalid remote IP: %s", ip_address) return diff --git a/neutron/plugins/ml2/drivers/openvswitch/mech_driver/mech_openvswitch.py b/neutron/plugins/ml2/drivers/openvswitch/mech_driver/mech_openvswitch.py index afd1dc52899..91f9a318181 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/mech_driver/mech_openvswitch.py +++ b/neutron/plugins/ml2/drivers/openvswitch/mech_driver/mech_openvswitch.py @@ -105,9 +105,9 @@ class OpenvswitchMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): if 'bridge_mappings' in agent['configurations']: return {k: [v] for k, v in agent['configurations']['bridge_mappings'].items()} - else: - raise ValueError(_('Cannot standardize bridge mappings of agent ' - 'type: %s'), agent['agent_type']) + raise ValueError( + _('Cannot standardize bridge mappings of agent type: %s'), + agent['agent_type']) def check_vlan_transparency(self, context): """Currently Openvswitch driver doesn't support vlan transparency.""" diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py index a417e839d79..257fc1bdc14 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py @@ -1431,7 +1431,7 @@ class OVNMechanismDriver(api.MechanismDriver): LOG.debug('Chassis %s is reponsible of the resource provider ' '%s', ch_name, ch_rp) return True - elif len(states) > 1: + if len(states) > 1: rps = {state[0]: placement_ext.dict_chassis_config(state[1]) for state in states} LOG.error('Several chassis reported the requested resource ' @@ -1464,17 +1464,13 @@ def update_agent(self, context, id, agent, _driver=None): # and we can just fall through to raising in the case that admin_state_up # is being set to False, otherwise the end-state will be fine if not agent.get('admin_state_up', True): - pass - elif 'description' in agent: + raise n_exc.BadRequest(resource='agent', + msg='OVN agent status cannot be updated') + if 'description' in agent: _driver.sb_ovn.set_chassis_neutron_description( chassis_name, agent['description'], agent_type).execute(check_error=True) - return agent - else: - # admin_state_up=True w/o description - return agent - raise n_exc.BadRequest(resource='agent', - msg='OVN agent status cannot be updated') + return agent def delete_agent(self, context, id, _driver=None): diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/commands.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/commands.py index fe063183e16..d8571eb5744 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/commands.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/commands.py @@ -77,31 +77,27 @@ def get_lsp_dhcp_options_uuids(lsp, lsp_name): def _add_gateway_chassis(api, txn, lrp_name, val): gateway_chassis = api._tables.get('Gateway_Chassis') - if gateway_chassis: - prio = len(val) - uuid_list = [] - for chassis in val: - gwc_name = '{}_{}'.format(lrp_name, chassis) - try: - gwc = idlutils.row_by_value(api.idl, - 'Gateway_Chassis', - 'name', gwc_name) - except idlutils.RowNotFound: - gwc = txn.insert(gateway_chassis) - gwc.name = gwc_name - gwc.chassis_name = chassis - gwc.priority = prio - LOG.info( - "Schedule LRP %(lrp)s on gateway %(gtw)s with priority" - " %(prio)s", - {"lrp": lrp_name, "gtw": chassis, "prio": prio}, - ) - prio = prio - 1 - uuid_list.append(gwc.uuid) - return 'gateway_chassis', uuid_list - else: + if not gateway_chassis: chassis = {ovn_const.OVN_GATEWAY_CHASSIS_KEY: val[0]} return 'options', chassis + prio = len(val) + uuid_list = [] + for chassis in val: + gwc_name = '{}_{}'.format(lrp_name, chassis) + try: + gwc = idlutils.row_by_value( + api.idl, 'Gateway_Chassis', 'name', gwc_name) + except idlutils.RowNotFound: + gwc = txn.insert(gateway_chassis) + gwc.name = gwc_name + gwc.chassis_name = chassis + gwc.priority = prio + LOG.info( + "Schedule LRP %(lrp)s on gateway %(gtw)s with priority %(prio)s", + {"lrp": lrp_name, "gtw": chassis, "prio": prio}) + prio = prio - 1 + uuid_list.append(gwc.uuid) + return 'gateway_chassis', uuid_list class CheckLivenessCommand(command.BaseCommand): diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/placement.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/placement.py index 03f3c7d0f56..962e5896afa 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/placement.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/placement.py @@ -132,9 +132,9 @@ class ChassisBandwidthConfigEvent(row_event.RowEvent): if (not self.placement_extension or not self.placement_extension.enabled): return False - elif event == self.ROW_CREATE: + if event == self.ROW_CREATE: return True - elif event == self.ROW_UPDATE and old and hasattr(old, 'other_config'): + if event == self.ROW_UPDATE and old and hasattr(old, 'other_config'): row_bw = _parse_ovn_cms_options(row) old_bw = _parse_ovn_cms_options(old) if row_bw != old_bw: diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/qos.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/qos.py index 00b26479b79..26854b82186 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/qos.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/qos.py @@ -241,8 +241,7 @@ class OVNClientQosExtension: if port.get('qos_policy_id'): return port['qos_policy_id'], 'port' - else: - return port['qos_network_policy_id'], 'network' + return port['qos_network_policy_id'], 'network' def _delete_port_qos_rules(self, txn, port_id, network_id, lsp=None, port_deleted=False): diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/impl_idl_ovn.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/impl_idl_ovn.py index e5ed9d0f7f8..43d4eeb92d0 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/impl_idl_ovn.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/impl_idl_ovn.py @@ -294,9 +294,7 @@ class OvsdbNbOvnIdl(nb_impl_idl.OvnNbApiIdlImpl, Backend): if lport_name is not None: return cmd.DelLSwitchPortCommand(self, lport_name, lswitch_name, if_exists) - else: - raise RuntimeError(_("Currently only supports " - "delete by lport-name")) + raise RuntimeError(_("Currently only supports delete by lport-name")) def get_all_logical_switches_with_ports(self): result = [] @@ -1045,9 +1043,8 @@ class OvsdbSbOvnIdl(sb_impl_idl.OvnSbApiIdlImpl, Backend): return [r for r in rows if r.chassis and r.chassis[0].name == chassis or chassis in [ch.name for ch in r.additional_chassis]] - else: - return [r for r in rows - if r.chassis and r.chassis[0].name == chassis] + return [r for r in rows + if r.chassis and r.chassis[0].name == chassis] def get_chassis_host_for_port(self, port_id): chassis = set() diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py index daa566a6ab5..40e4ca89a9a 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py @@ -1379,7 +1379,7 @@ class OVNClient: route_bfd = getattr(route, 'bfd', []) if router_default_route_bfd and not route_bfd: return True - elif route_bfd and not router_default_route_bfd: + if route_bfd and not router_default_route_bfd: return True return False diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovsdb_monitor.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovsdb_monitor.py index 4bc5e50dfbe..e0662925606 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovsdb_monitor.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovsdb_monitor.py @@ -151,7 +151,7 @@ class ChassisEvent(row_event.RowEvent): self.driver.nb_ovn.ha_chassis_group_del_chassis( hcg.name, row.name, if_exists=True)) return - elif not is_gw_chassis and is_old_gw: + if not is_gw_chassis and is_old_gw: # Chassis is not a gateway anymore, treat it as deletion event = self.ROW_DELETE elif is_gw_chassis and not is_old_gw: diff --git a/neutron/plugins/ml2/drivers/type_tunnel.py b/neutron/plugins/ml2/drivers/type_tunnel.py index 7384a16bba4..114ec6af840 100644 --- a/neutron/plugins/ml2/drivers/type_tunnel.py +++ b/neutron/plugins/ml2/drivers/type_tunnel.py @@ -429,58 +429,7 @@ class TunnelRpcCallbackMixin: raise exc.InvalidInput(error_message=msg) driver = self._type_manager.drivers.get(tunnel_type) - if driver: - # The given conditional statements will verify the following - # things: - # 1. If host is not passed from an agent, it is a legacy mode. - # 2. If passed host and tunnel_ip are not found in the DB, - # it is a new endpoint. - # 3. If host is passed from an agent and it is not found in DB - # but the passed tunnel_ip is found, delete the endpoint - # from DB and add the endpoint with (tunnel_ip, host), - # it is an upgrade case. - # 4. If passed host is found in DB and passed tunnel ip is not - # found, delete the endpoint belonging to that host and - # add endpoint with latest (tunnel_ip, host), it is a case - # where local_ip of an agent got changed. - # 5. If the passed host had another ip in the DB the host-id has - # roamed to a different IP then delete any reference to the new - # local_ip or the host id. Don't notify tunnel_delete for the - # old IP since that one could have been taken by a different - # agent host-id (neutron-ovs-cleanup should be used to clean up - # the stale endpoints). - # Finally create a new endpoint for the (tunnel_ip, host). - if host: - host_endpoint = driver.obj.get_endpoint_by_host(host) - ip_endpoint = driver.obj.get_endpoint_by_ip(tunnel_ip) - - if (ip_endpoint and ip_endpoint.host is None and - host_endpoint is None): - driver.obj.delete_endpoint(ip_endpoint.ip_address) - elif (ip_endpoint and ip_endpoint.host != host): - LOG.info( - "Tunnel IP %(ip)s was used by host %(host)s and " - "will be assigned to %(new_host)s", - {'ip': ip_endpoint.ip_address, - 'host': ip_endpoint.host, - 'new_host': host}) - driver.obj.delete_endpoint_by_host_or_ip( - host, ip_endpoint.ip_address) - elif (host_endpoint and host_endpoint.ip_address != tunnel_ip): - # Notify all other listening agents to delete stale tunnels - self._notifier.tunnel_delete( - rpc_context, host_endpoint.ip_address, tunnel_type) - driver.obj.delete_endpoint(host_endpoint.ip_address) - - tunnel = driver.obj.add_endpoint(tunnel_ip, host) - tunnels = driver.obj.get_endpoints() - entry = {'tunnels': tunnels} - # Notify all other listening agents - self._notifier.tunnel_update(rpc_context, tunnel.ip_address, - tunnel_type) - # Return the list of tunnels IP's to the agent - return entry - else: + if not driver: msg = (_("Network type value %(type)s not supported, " "host: %(host)s with tunnel IP: %(ip)s") % {'type': tunnel_type, @@ -488,6 +437,55 @@ class TunnelRpcCallbackMixin: 'ip': tunnel_ip}) raise exc.InvalidInput(error_message=msg) + # The given conditional statements will verify the following things: + # 1. If host is not passed from an agent, it is a legacy mode. + # 2. If passed host and tunnel_ip are not found in the DB, it is a new + # endpoint. + # 3. If host is passed from an agent and it is not found in DB but the + # passed tunnel_ip is found, delete the endpoint from DB and add the + # endpoint with (tunnel_ip, host), it is an upgrade case. + # 4. If passed host is found in DB and passed tunnel ip is not found, + # delete the endpoint belonging to that host and add endpoint with + # latest (tunnel_ip, host), it is a case where local_ip of an agent + # got changed. + # 5. If the passed host had another ip in the DB the host-id has roamed + # to a different IP then delete any reference to the new local_ip or + # the host id. Don't notify tunnel_delete for the old IP since that + # one could have been taken by a different agent host-id + # (neutron-ovs-cleanup should be used to clean up the stale + # endpoints). Finally create a new endpoint for the (tunnel_ip, + # host). + if host: + host_endpoint = driver.obj.get_endpoint_by_host(host) + ip_endpoint = driver.obj.get_endpoint_by_ip(tunnel_ip) + + if (ip_endpoint and ip_endpoint.host is None and + host_endpoint is None): + driver.obj.delete_endpoint(ip_endpoint.ip_address) + elif (ip_endpoint and ip_endpoint.host != host): + LOG.info( + "Tunnel IP %(ip)s was used by host %(host)s and " + "will be assigned to %(new_host)s", + {'ip': ip_endpoint.ip_address, + 'host': ip_endpoint.host, + 'new_host': host}) + driver.obj.delete_endpoint_by_host_or_ip( + host, ip_endpoint.ip_address) + elif (host_endpoint and host_endpoint.ip_address != tunnel_ip): + # Notify all other listening agents to delete stale tunnels + self._notifier.tunnel_delete( + rpc_context, host_endpoint.ip_address, tunnel_type) + driver.obj.delete_endpoint(host_endpoint.ip_address) + + tunnel = driver.obj.add_endpoint(tunnel_ip, host) + tunnels = driver.obj.get_endpoints() + entry = {'tunnels': tunnels} + # Notify all other listening agents + self._notifier.tunnel_update(rpc_context, tunnel.ip_address, + tunnel_type) + # Return the list of tunnels IP's to the agent + return entry + class TunnelAgentRpcApiMixin: diff --git a/neutron/plugins/ml2/extensions/dns_integration.py b/neutron/plugins/ml2/extensions/dns_integration.py index edbf0d75ff3..0f2fe773e6e 100644 --- a/neutron/plugins/ml2/extensions/dns_integration.py +++ b/neutron/plugins/ml2/extensions/dns_integration.py @@ -461,8 +461,7 @@ def _filter_by_subnet(context, fixed_ips): subnet_filtered.append(str(ip['ip_address'])) if filter_fixed_ips: return subnet_filtered - else: - return [str(ip['ip_address']) for ip in fixed_ips] + return [str(ip['ip_address']) for ip in fixed_ips] def _create_port_in_external_dns_service(resource, event, diff --git a/neutron/plugins/ml2/managers.py b/neutron/plugins/ml2/managers.py index 6e4fa2a39dd..a6f4f5decbd 100644 --- a/neutron/plugins/ml2/managers.py +++ b/neutron/plugins/ml2/managers.py @@ -76,12 +76,11 @@ class TypeManager(stevedore.named.NamedExtensionManager): def _check_tenant_network_types(self, types): self.tenant_network_types = [] for network_type in types: - if network_type in self.drivers: - self.tenant_network_types.append(network_type) - else: + if network_type not in self.drivers: LOG.error("No type driver for tenant network_type: %s. " "Service terminated!", network_type) raise SystemExit(1) + self.tenant_network_types.append(network_type) LOG.info("Tenant network_types: %s", self.tenant_network_types) def _check_external_network_type(self, ext_network_type): @@ -114,11 +113,11 @@ class TypeManager(stevedore.named.NamedExtensionManager): raise mpnet_exc.SegmentsSetInConjunctionWithProviders() segment = self._get_provider_segment(network) return [self._process_provider_segment(segment)] - elif validators.is_attr_set(network.get(mpnet_apidef.SEGMENTS)): + if validators.is_attr_set(network.get(mpnet_apidef.SEGMENTS)): segments = [self._process_provider_segment(s) for s in network[mpnet_apidef.SEGMENTS]] - mpnet_apidef.check_duplicate_segments( - segments, self.is_partial_segment) + mpnet_apidef.check_duplicate_segments(segments, + self.is_partial_segment) return segments def _match_segment(self, segment, filters): @@ -290,20 +289,18 @@ class TypeManager(stevedore.named.NamedExtensionManager): def is_partial_segment(self, segment): network_type = segment[api.NETWORK_TYPE] driver = self.drivers.get(network_type) - if driver: - return driver.obj.is_partial_segment(segment) - else: + if not driver: msg = _("network_type value '%s' not supported") % network_type raise exc.InvalidInput(error_message=msg) + return driver.obj.is_partial_segment(segment) def validate_provider_segment(self, segment): network_type = segment[api.NETWORK_TYPE] driver = self.drivers.get(network_type) - if driver: - driver.obj.validate_provider_segment(segment) - else: + if not driver: msg = _("network_type value '%s' not supported") % network_type raise exc.InvalidInput(error_message=msg) + driver.obj.validate_provider_segment(segment) def reserve_provider_segment(self, context, segment, filters=None): network_type = segment.get(api.NETWORK_TYPE) @@ -311,16 +308,13 @@ class TypeManager(stevedore.named.NamedExtensionManager): if isinstance(driver.obj, api.TypeDriver): return driver.obj.reserve_provider_segment(context.session, segment, filters) - else: - return driver.obj.reserve_provider_segment(context, - segment, filters) + return driver.obj.reserve_provider_segment(context, segment, filters) def _allocate_segment(self, context, network_type, filters=None): driver = self.drivers.get(network_type) if isinstance(driver.obj, api.TypeDriver): return driver.obj.allocate_tenant_segment(context.session, filters) - else: - return driver.obj.allocate_tenant_segment(context, filters) + return driver.obj.allocate_tenant_segment(context, filters) def _allocate_tenant_net_segment(self, context, filters=None): for network_type in self.tenant_network_types: @@ -887,13 +881,12 @@ class MechanismManager(stevedore.named.NamedExtensionManager): if self._bind_port_level(context, level + 1, next_segments): return True - else: - LOG.warning("Failed to bind port %(port)s on " - "host %(host)s at level %(lvl)s", - {'port': context.current['id'], - 'host': context.host, - 'lvl': level + 1}) - context._pop_binding_level() + LOG.warning("Failed to bind port %(port)s on " + "host %(host)s at level %(lvl)s", + {'port': context.current['id'], + 'host': context.host, + 'lvl': level + 1}) + context._pop_binding_level() else: # NOTE(bence romsics): Consider: "In case of # hierarchical port binding binding_profile.allocation diff --git a/neutron/plugins/ml2/plugin.py b/neutron/plugins/ml2/plugin.py index 55cf448e764..a18b3c66f1b 100644 --- a/neutron/plugins/ml2/plugin.py +++ b/neutron/plugins/ml2/plugin.py @@ -490,8 +490,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, orig_port['device_id'] != ''): port['mac_address'] = self._generate_macs()[0] return True - else: - return False + return False @registry.receives(resources.AGENT, [events.AFTER_UPDATE]) def _retry_binding_revived_agents(self, resource, event, trigger, @@ -2507,13 +2506,11 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, return port LOG.debug("No binding found for DVR port %s", port['id']) return - else: - port_host = db.get_port_binding_host(context, port_id) - ret_val = port if (port_host == host) else None - if not ret_val: - LOG.debug('The host %s is not matching for port %s host %s!', - host, port_id, port_host) - return ret_val + port_host = db.get_port_binding_host(context, port_id) + if port_host == host: + return port + LOG.debug('The host %s is not matching for port %s host %s!', + host, port_id, port_host) @db_api.retry_if_session_inactive() def get_ports_from_devices(self, context, devices): diff --git a/neutron/plugins/ml2/rpc.py b/neutron/plugins/ml2/rpc.py index 94ff8ad9fdd..033b8f57291 100644 --- a/neutron/plugins/ml2/rpc.py +++ b/neutron/plugins/ml2/rpc.py @@ -313,16 +313,15 @@ class RpcCallbacks(type_tunnel.TunnelRpcCallbackMixin): if not port: LOG.debug("Port %s not found, will not notify nova.", port_id) return - else: - if port.device_owner.startswith( - n_const.DEVICE_OWNER_COMPUTE_PREFIX): - # NOTE(haleyb): It is possible for a test to override a - # config option after the plugin has been initialized so - # the nova_notifier attribute is not set on the plugin. - if (cfg.CONF.notify_nova_on_port_status_changes and - hasattr(plugin, 'nova_notifier')): - plugin.nova_notifier.notify_port_active_direct(port) - return + if port.device_owner.startswith( + n_const.DEVICE_OWNER_COMPUTE_PREFIX): + # NOTE(haleyb): It is possible for a test to override a + # config option after the plugin has been initialized so + # the nova_notifier attribute is not set on the plugin. + if (cfg.CONF.notify_nova_on_port_status_changes and + hasattr(plugin, 'nova_notifier')): + plugin.nova_notifier.notify_port_active_direct(port) + return else: self.update_port_status_to_active(port, rpc_context, port_id, host) self.notify_l2pop_port_wiring(port_id, rpc_context, diff --git a/neutron/privileged/agent/linux/__init__.py b/neutron/privileged/agent/linux/__init__.py index 103104c5fd1..ff067ba2483 100644 --- a/neutron/privileged/agent/linux/__init__.py +++ b/neutron/privileged/agent/linux/__init__.py @@ -45,13 +45,13 @@ def make_serializable(value): if isinstance(value, list): return [make_serializable(item) for item in value] - elif isinstance(value, netlink.nla_slot): + if isinstance(value, netlink.nla_slot): return [_ensure_string(value[0]), make_serializable(value[1])] - elif isinstance(value, netlink.nla_base): + if isinstance(value, netlink.nla_base): return make_serializable(value.dump()) - elif isinstance(value, dict): + if isinstance(value, dict): return {_ensure_string(key): make_serializable(data) for key, data in value.items()} - elif isinstance(value, tuple): + if isinstance(value, tuple): return tuple(make_serializable(item) for item in value) return _ensure_string(value) diff --git a/neutron/privileged/agent/linux/ip_lib.py b/neutron/privileged/agent/linux/ip_lib.py index 39213ed23a4..c70e2a64e47 100644 --- a/neutron/privileged/agent/linux/ip_lib.py +++ b/neutron/privileged/agent/linux/ip_lib.py @@ -147,8 +147,7 @@ def get_iproute(namespace): if namespace: # do not try and create the namespace return nslink.NetNS(namespace, flags=0, libc=priv_linux.get_cdll()) - else: - return iproute.IPRoute() + return iproute.IPRoute() @privileged.default.entrypoint @@ -338,9 +337,7 @@ def delete_ip_addresses(cidrs, device, namespace): # NetlinkError with code EADDRNOTAVAIL (99, 'Cannot assign # requested address') # this shouldn't raise an error - if e.code == errno.EADDRNOTAVAIL: - pass - else: + if e.code != errno.EADDRNOTAVAIL: raise @@ -588,9 +585,8 @@ def create_netns(name, **kwargs): except Exception: os._exit(1) os._exit(0) - else: - if os.waitpid(pid, 0)[1]: - raise RuntimeError(_('Error creating namespace %s' % name)) + if os.waitpid(pid, 0)[1]: + raise RuntimeError(_('Error creating namespace %s' % name)) @privileged.namespace_cmd.entrypoint diff --git a/neutron/quota/resource_registry.py b/neutron/quota/resource_registry.py index e207e4d3869..fa756c72dc9 100644 --- a/neutron/quota/resource_registry.py +++ b/neutron/quota/resource_registry.py @@ -173,13 +173,11 @@ class ResourceRegistry: return resource.CountableResource( resource_name, resource._count_resource, 'quota_%s' % resource_name) - else: - LOG.info("Creating instance of TrackedResource for " - "resource:%s", resource_name) - return resource.TrackedResource( - resource_name, - self._tracked_resource_mappings[resource_name], - 'quota_%s' % resource_name) + LOG.info("Creating instance of TrackedResource for resource:%s", + resource_name) + return resource.TrackedResource( + resource_name, self._tracked_resource_mappings[resource_name], + 'quota_%s' % resource_name) def set_tracked_resource(self, resource_name, model_class, override=False): # Do not do anything if tracking is disabled by config diff --git a/neutron/scheduler/l3_agent_scheduler.py b/neutron/scheduler/l3_agent_scheduler.py index 62bbf16cfc6..1d255dd58cc 100644 --- a/neutron/scheduler/l3_agent_scheduler.py +++ b/neutron/scheduler/l3_agent_scheduler.py @@ -222,7 +222,7 @@ class L3Scheduler(metaclass=abc.ABCMeta): plugin, context, sync_router) if not candidates: return - elif sync_router.get('ha', False): + if sync_router.get('ha', False): chosen_agents = self._bind_ha_router(plugin, context, router_id, sync_router.get('tenant_id'), diff --git a/neutron/services/auto_allocate/db.py b/neutron/services/auto_allocate/db.py index b52e3fa014b..21d3ec824db 100644 --- a/neutron/services/auto_allocate/db.py +++ b/neutron/services/auto_allocate/db.py @@ -137,7 +137,7 @@ class AutoAllocatedTopologyMixin: # requests can be fulfilled based on a set of requirements # such as existence of default networks, pools, etc. return self._check_requirements(context, tenant_id) - elif fields: + if fields: raise n_exc.BadRequest(resource='auto_allocate', msg=_("Unrecognized field")) diff --git a/neutron/services/logapi/common/validators.py b/neutron/services/logapi/common/validators.py index deb30931f25..4b4231f65da 100644 --- a/neutron/services/logapi/common/validators.py +++ b/neutron/services/logapi/common/validators.py @@ -114,12 +114,12 @@ class ResourceValidateRequest: log_plugin = directory.get_plugin(alias=plugin_const.LOG_API) supported_logging_types = log_plugin.supported_logging_types - if resource_type in supported_logging_types: - method = self.get_validated_method(resource_type) - method(context, log_data) - else: + if resource_type not in supported_logging_types: raise log_exc.InvalidLogResourceType(resource_type=resource_type) + method = self.get_validated_method(resource_type) + method(context, log_data) + def get_validated_method(self, resource_type): """Get the validated method for resource_type""" diff --git a/neutron/services/network_ip_availability/plugin.py b/neutron/services/network_ip_availability/plugin.py index 3e24fd8580a..5ed30d8ab1d 100644 --- a/neutron/services/network_ip_availability/plugin.py +++ b/neutron/services/network_ip_availability/plugin.py @@ -55,7 +55,6 @@ class NetworkIPAvailabilityPlugin(ip_availability_db.IpAvailabilityMixin, """Return ip availability data for a specific network id.""" filters = {'network_id': [id]} result = self.get_network_ip_availabilities(context, filters) - if result: - return db_utils.resource_fields(result[0], fields) - else: + if not result: raise exceptions.NetworkNotFound(net_id=id) + return db_utils.resource_fields(result[0], fields) diff --git a/neutron/services/provider_configuration.py b/neutron/services/provider_configuration.py index 5da389e3fd2..25e351d0a91 100644 --- a/neutron/services/provider_configuration.py +++ b/neutron/services/provider_configuration.py @@ -183,14 +183,13 @@ def parse_service_provider_opt(service_module='neutron', service_type=None): name = normalize_provider_name(name) default = False if len(split) == 4 and split[3]: - if split[3] == 'default': - default = True - else: + if split[3] != 'default': msg = (_("Invalid provider format. " "Last part should be 'default' or empty: %s") % prov_def) LOG.error(msg) raise n_exc.Invalid(msg) + default = True driver = get_provider_driver_class(driver) res.append({'service_type': svc_type, diff --git a/neutron/services/qos/qos_plugin.py b/neutron/services/qos/qos_plugin.py index ce95376174d..464b2dbd671 100644 --- a/neutron/services/qos/qos_plugin.py +++ b/neutron/services/qos/qos_plugin.py @@ -256,7 +256,7 @@ class QoSPlugin(qos.QoSPluginBase): first_segment = segments[0] if not first_segment: return [] - elif not first_segment.physical_network: + if not first_segment.physical_network: # If there is no physical network this is because this is an # overlay network (tunnelled network). net_trait = n_const.TRAIT_NETWORK_TUNNEL @@ -382,11 +382,9 @@ class QoSPlugin(qos.QoSPluginBase): # TODO(lajoskatona): move this to neutron-lib, see similar # dict @l125. if dir == 'egress': - drctn = orc.NET_BW_EGR_KILOBIT_PER_SEC - else: - drctn = orc.NET_BW_IGR_KILOBIT_PER_SEC - return {drctn: value} - elif isinstance(rule, rule_object.QosMinimumPacketRateRule): + return {orc.NET_BW_EGR_KILOBIT_PER_SEC: value} + return {orc.NET_BW_IGR_KILOBIT_PER_SEC: value} + if isinstance(rule, rule_object.QosMinimumPacketRateRule): value = rule.get('min_kpps') # TODO(przszc): move this to neutron-lib, see similar # dict @l268. diff --git a/neutron/services/trunk/drivers/openvswitch/agent/ovsdb_handler.py b/neutron/services/trunk/drivers/openvswitch/agent/ovsdb_handler.py index 22fb1b572bf..0de71045153 100644 --- a/neutron/services/trunk/drivers/openvswitch/agent/ovsdb_handler.py +++ b/neutron/services/trunk/drivers/openvswitch/agent/ovsdb_handler.py @@ -472,8 +472,7 @@ class OVSDBHandler: # can be exceptions (e.g. unwire_subports_for_trunk). if len(expected_subports) != len(actual_subports): return constants.TRUNK_DEGRADED_STATUS - else: - return constants.TRUNK_ACTIVE_STATUS + return constants.TRUNK_ACTIVE_STATUS def _is_vm_connected(self, bridge): """True if an instance is connected to bridge, False otherwise.""" diff --git a/neutron/services/trunk/plugin.py b/neutron/services/trunk/plugin.py index 881146bcf43..d2f005171fb 100644 --- a/neutron/services/trunk/plugin.py +++ b/neutron/services/trunk/plugin.py @@ -302,26 +302,25 @@ class TrunkPlugin(service_base.ServicePluginBase): trunk = self._get_trunk(context, trunk_id) rules.trunk_can_be_managed(context, trunk) trunk_port_validator = rules.TrunkPortValidator(trunk.port_id) - if trunk_port_validator.can_be_trunked_or_untrunked(context): - # NOTE(status_police): when a trunk is deleted, the logical - # object disappears from the datastore, therefore there is no - # status transition involved. If PRECOMMIT failures occur, - # the trunk remains in the status where it was. - try: - trunk.delete() - except Exception as e: - with excutils.save_and_reraise_exception(): - LOG.warning('Trunk driver raised exception when ' - 'deleting trunk port %s: %s', trunk_id, - str(e)) - payload = events.DBEventPayload(context, resource_id=trunk_id, - states=(trunk,)) - registry.publish(resources.TRUNK, events.PRECOMMIT_DELETE, - self, payload=payload) - else: + if not trunk_port_validator.can_be_trunked_or_untrunked(context): LOG.info('Trunk driver does not consider trunk %s ' 'untrunkable', trunk_id) raise trunk_exc.TrunkInUse(trunk_id=trunk_id) + # NOTE(status_police): when a trunk is deleted, the logical object + # disappears from the datastore, therefore there is no status + # transition involved. If PRECOMMIT failures occur, the trunk + # remains in the status where it was. + try: + trunk.delete() + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.warning('Trunk driver raised exception when ' + 'deleting trunk port %s: %s', trunk_id, + str(e)) + payload = events.DBEventPayload(context, resource_id=trunk_id, + states=(trunk,)) + registry.publish(resources.TRUNK, events.PRECOMMIT_DELETE, + self, payload=payload) registry.publish(resources.TRUNK, events.AFTER_DELETE, self, payload=events.DBEventPayload( context, resource_id=trunk_id, diff --git a/neutron/services/trunk/rules.py b/neutron/services/trunk/rules.py index 7b5e7d7de55..a7a363743c8 100644 --- a/neutron/services/trunk/rules.py +++ b/neutron/services/trunk/rules.py @@ -134,8 +134,7 @@ class TrunkPortValidator: raise trunk_exc.TrunkPluginDriverConflict() if len(drivers) == 1: return drivers[0].can_trunk_bound_port - else: - return False + return False def check_not_in_use(self, context): """Raises PortInUse for ports assigned for device purposes.""" @@ -167,14 +166,14 @@ class SubPortsValidator: if msg: raise n_exc.InvalidInput(error_message=msg) - if trunk_validation: - trunk_port_mtu = self._get_port_mtu(context, self.trunk_port_id) - subport_mtus = self._prepare_subports(context) - return [self._validate(context, s, trunk_port_mtu, subport_mtus) - for s in self.subports] - else: + if not trunk_validation: return self.subports + trunk_port_mtu = self._get_port_mtu(context, self.trunk_port_id) + subport_mtus = self._prepare_subports(context) + return [self._validate(context, s, trunk_port_mtu, subport_mtus) + for s in self.subports] + def _prepare_subports(self, context): """Utility method to parse subports in the request diff --git a/neutron/tests/base.py b/neutron/tests/base.py index 8f5f4309855..0bc832c5181 100644 --- a/neutron/tests/base.py +++ b/neutron/tests/base.py @@ -589,10 +589,9 @@ class Timeout(fixtures.Fixture): except ValueError: # If timeout value is invalid do not set a timeout. self.test_timeout = 0 - if scaling >= 1: - self.test_timeout *= scaling - else: + if scaling < 1: raise ValueError('scaling value must be >= 1') + self.test_timeout *= scaling def setUp(self): super().setUp() diff --git a/neutron/tests/common/agents/l2_extensions.py b/neutron/tests/common/agents/l2_extensions.py index 0e7e78bd4e3..4557ec40a0c 100644 --- a/neutron/tests/common/agents/l2_extensions.py +++ b/neutron/tests/common/agents/l2_extensions.py @@ -114,8 +114,7 @@ def wait_until_pkt_meter_rule_applied_ovs(bridge, port_vif, port_id, dl_dst=str(mac)) if mac: return bool(flows) and meter_id - else: - return not bool(flows) and not meter_id + return not bool(flows) and not meter_id common_utils.wait_until_true(_pkt_rate_limit_rule_applied) diff --git a/neutron/tests/common/conn_testers.py b/neutron/tests/common/conn_testers.py index 8b400edc009..c1a94043a82 100644 --- a/neutron/tests/common/conn_testers.py +++ b/neutron/tests/common/conn_testers.py @@ -223,26 +223,23 @@ class ConnectionTester(fixtures.Fixture): dst_port=None): nc_params = (direction, protocol, src_port, dst_port) nc_tester = self._nc_testers.get(nc_params) - if nc_tester: - if nc_tester.is_established: - try: - nc_tester.test_connectivity() - except RuntimeError: - raise ConnectionTesterException( - "Established %s connection with protocol %s, source " - "port %s and destination port %s can no longer " - "communicate") - else: - nc_tester.stop_processes() - raise ConnectionTesterException( - '%s connection with protocol %s, source port %s and ' - 'destination port %s is not established' % nc_params) - else: + if not nc_tester: raise ConnectionTesterException( "Attempting to test established %s connection with protocol %s" ", source port %s and destination port %s that hasn't been " "established yet by calling establish_connection()" % nc_params) + if not nc_tester.is_established: + nc_tester.stop_processes() + raise ConnectionTesterException( + '%s connection with protocol %s, source port %s and ' + 'destination port %s is not established' % nc_params) + try: + nc_tester.test_connectivity() + except RuntimeError: + raise ConnectionTesterException( + "Established %s connection with protocol %s, source port %s " + "and destination port %s can no longer communicate") def assert_no_established_connection(self, direction, protocol, src_port=None, dst_port=None): diff --git a/neutron/tests/common/net_helpers.py b/neutron/tests/common/net_helpers.py index ad75d12b1f0..32f8d1fb6cc 100644 --- a/neutron/tests/common/net_helpers.py +++ b/neutron/tests/common/net_helpers.py @@ -472,12 +472,11 @@ class Pinger: self._parse_stats() def wait(self): - if self.count: - self._wait_for_death() - self._parse_stats() - else: + if not self.count: raise RuntimeError("Pinger is running infinitely, use stop() " "first") + self._wait_for_death() + self._parse_stats() class NetcatTester: @@ -699,10 +698,9 @@ class VethFixture(fixtures.Fixture): def get_peer_name(name): if name.startswith(VETH0_PREFIX): return name.replace(VETH0_PREFIX, VETH1_PREFIX) - elif name.startswith(VETH1_PREFIX): + if name.startswith(VETH1_PREFIX): return name.replace(VETH1_PREFIX, VETH0_PREFIX) - else: - tools.fail('%s is not a valid VethFixture veth endpoint' % name) + tools.fail('%s is not a valid VethFixture veth endpoint' % name) class NamedVethFixture(VethFixture): @@ -990,15 +988,11 @@ class LinuxBridgeFixture(fixtures.Fixture): def _create_bridge(self): if self.prefix_is_full_name: - return bridge_lib.BridgeDevice.addbr( - name=self.prefix, - namespace=self.namespace - ) - else: - return common_base.create_resource( - self.prefix, - bridge_lib.BridgeDevice.addbr, - namespace=self.namespace) + return bridge_lib.BridgeDevice.addbr(name=self.prefix, + namespace=self.namespace) + return common_base.create_resource(self.prefix, + bridge_lib.BridgeDevice.addbr, + namespace=self.namespace) class LinuxBridgePortFixture(PortFixture): diff --git a/neutron/tests/common/test_db_base_plugin_v2.py b/neutron/tests/common/test_db_base_plugin_v2.py index 09a7a41b736..8ae8701c412 100644 --- a/neutron/tests/common/test_db_base_plugin_v2.py +++ b/neutron/tests/common/test_db_base_plugin_v2.py @@ -107,8 +107,7 @@ def _get_create_db_method(resource): ml2_method = '_create_%s_db' % resource if hasattr(directory.get_plugin(), ml2_method): return ml2_method - else: - return 'create_%s' % resource + return 'create_%s' % resource def _set_temporary_quota(resource, default_value): @@ -732,8 +731,7 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase): if resource in ['networks', 'subnets', 'ports', 'subnetpools', 'security-groups']: return self.api - else: - return self.ext_api + return self.ext_api def _delete(self, collection, id, expected_code=webob.exc.HTTPNoContent.code, diff --git a/neutron/tests/fullstack/resources/machine.py b/neutron/tests/fullstack/resources/machine.py index 23d1a2980ae..217415d5eaf 100644 --- a/neutron/tests/fullstack/resources/machine.py +++ b/neutron/tests/fullstack/resources/machine.py @@ -111,14 +111,11 @@ class FakeFullstackMachine(machine_fixtures.FakeMachineBase): if self.bridge_name is None: return self.host.get_bridge(self.network_id) agent_type = self.host.host_desc.l2_agent_type - if agent_type == constants.AGENT_TYPE_OVS: - new_bridge = self.useFixture( - net_helpers.OVSTrunkBridgeFixture(self.bridge_name)).bridge - else: + if agent_type != constants.AGENT_TYPE_OVS: raise NotImplementedError( "Support for %s agent is not implemented." % agent_type) - - return new_bridge + return self.useFixture( + net_helpers.OVSTrunkBridgeFixture(self.bridge_name)).bridge def _configure_ipaddress(self, port_id, fixed_ip): subnet_id = fixed_ip['subnet_id'] diff --git a/neutron/tests/fullstack/resources/process.py b/neutron/tests/fullstack/resources/process.py index 9dbe0b791e8..500a21a8bed 100644 --- a/neutron/tests/fullstack/resources/process.py +++ b/neutron/tests/fullstack/resources/process.py @@ -147,11 +147,11 @@ class ProcessFixture(fixtures.Fixture): LOG.debug("Restarting process: %s", self.process_name) - if executor is None: - _restart() - else: + if executor is not None: return executor.submit(_restart) + _restart() + @property def service_state(self): cmd = ['systemctl', 'is-active', self.unit_name] diff --git a/neutron/tests/fullstack/servers/placement.py b/neutron/tests/fullstack/servers/placement.py index 6e7ee6fdb40..acf9cce93fb 100755 --- a/neutron/tests/fullstack/servers/placement.py +++ b/neutron/tests/fullstack/servers/placement.py @@ -53,14 +53,11 @@ class FakePlacement: def get_resource_providers(self, **kwargs): id = kwargs.get('id', None) - if not id: - return jsonutils.dumps( - { - 'resource_providers': - [self.resource_providers[self.host_rp_uuid]] - }) - else: + if id: return jsonutils.dumps(self.resource_providers[id]) + return jsonutils.dumps({ + 'resource_providers': [self.resource_providers[self.host_rp_uuid]] + }) def put_traits(self, **kwargs): # Return empty sting otherwise wsgiref goes mad diff --git a/neutron/tests/fullstack/test_qos.py b/neutron/tests/fullstack/test_qos.py index 8714bf82a4b..5ac073cc870 100644 --- a/neutron/tests/fullstack/test_qos.py +++ b/neutron/tests/fullstack/test_qos.py @@ -305,12 +305,11 @@ class TestBwLimitQoSOvs(_TestBwLimitQoS, base.BaseFullStackTestCase): @staticmethod def _get_expected_burst_value(limit, direction): + if direction != constants.EGRESS_DIRECTION: + return 0 # For egress bandwidth limit this value should be calculated as # bandwidth_limit * qos_consts.DEFAULT_BURST_RATE - if direction == constants.EGRESS_DIRECTION: - return TestBwLimitQoSOvs._get_expected_egress_burst_value(limit) - else: - return 0 + return TestBwLimitQoSOvs._get_expected_egress_burst_value(limit) def _wait_for_bw_rule_applied(self, vm, limit, burst, direction): if direction == constants.EGRESS_DIRECTION: diff --git a/neutron/tests/functional/agent/l3/test_metadata_proxy.py b/neutron/tests/functional/agent/l3/test_metadata_proxy.py index 58d6ae4a0a7..4806d95de48 100644 --- a/neutron/tests/functional/agent/l3/test_metadata_proxy.py +++ b/neutron/tests/functional/agent/l3/test_metadata_proxy.py @@ -120,15 +120,15 @@ class MetadataL3AgentTestCase(framework.L3AgentTestFramework): raw_headers = machine.execute(cmd) break except RuntimeError as e: - if 'Connection refused' in str(e): - time.sleep(METADATA_REQUEST_SLEEP) - i += METADATA_REQUEST_SLEEP - else: + if 'Connection refused' not in str(e): if router: self._log_router_interfaces_configuration(router) - self.fail('metadata proxy unreachable ' - 'on %s before timeout' % cmd[-1]) + self.fail( + 'metadata proxy unreachable on %s before timeout' % + cmd[-1]) + time.sleep(METADATA_REQUEST_SLEEP) + i += METADATA_REQUEST_SLEEP if i > CONNECTION_REFUSED_TIMEOUT: self.fail('Timed out waiting metadata proxy to become available') diff --git a/neutron/tests/functional/agent/linux/test_utils.py b/neutron/tests/functional/agent/linux/test_utils.py index cbef4ba6afc..57db536884d 100644 --- a/neutron/tests/functional/agent/linux/test_utils.py +++ b/neutron/tests/functional/agent/linux/test_utils.py @@ -197,8 +197,7 @@ line 4 content = cls.FILE.encode('ascii') if run_as_root: return priv_utils.write_to_tempfile(content, _path=path) - else: - return fileutils.write_to_tempfile(content, path=path) + return fileutils.write_to_tempfile(content, path=path) def test_read_if_exists(self): test_file_path = self._write_file(run_as_root=self.run_as_root) diff --git a/neutron/tests/functional/pecan_wsgi/utils.py b/neutron/tests/functional/pecan_wsgi/utils.py index f5cd129638a..ef79550ed3f 100644 --- a/neutron/tests/functional/pecan_wsgi/utils.py +++ b/neutron/tests/functional/pecan_wsgi/utils.py @@ -49,8 +49,7 @@ class FakeSingularCollectionExtension(api_extensions.ExtensionDescriptor): def get_extended_resources(self, version): if version == "2.0": return self.RAM - else: - return {} + return {} def get_pecan_controllers(self): ctrllr = controllers.CollectionsController( @@ -200,8 +199,7 @@ class FakeExtension(api_extensions.ExtensionDescriptor): def get_extended_resources(self, version): if version == "2.0": return self.RESOURCE_ATTRIBUTE_MAP - else: - return {} + return {} class FakePlugin: diff --git a/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py b/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py index a31a00923d1..c822744f598 100644 --- a/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py +++ b/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py @@ -108,8 +108,7 @@ class TestOvnNbSync(base.TestOVNFunctionalBase): def _api_for_resource(self, resource): if resource in ['security-groups']: return self._sg_api - else: - return super()._api_for_resource(resource) + return super()._api_for_resource(resource) def _create_resources(self, restart_ovsdb_processes=False): net_kwargs = {dns_apidef.DNSDOMAIN: 'ovn.test.'} diff --git a/neutron/tests/functional/resources/process.py b/neutron/tests/functional/resources/process.py index 4e5bb7d6fc5..00216bcf4ab 100644 --- a/neutron/tests/functional/resources/process.py +++ b/neutron/tests/functional/resources/process.py @@ -243,7 +243,6 @@ class OvsdbServer(DaemonProcessFixture): if ovsdb_process['db_type'] == db_type: if ovsdb_process['protocol'] == 'unix': return 'unix:' + ovsdb_process['remote_path'] - else: - return '{}:{}:{}'.format(ovsdb_process['protocol'], - ovsdb_process['remote_ip'], - ovsdb_process['remote_port']) + return '{}:{}:{}'.format(ovsdb_process['protocol'], + ovsdb_process['remote_ip'], + ovsdb_process['remote_port']) diff --git a/neutron/tests/functional/test_server.py b/neutron/tests/functional/test_server.py index 316acd36dd4..1563acd3f0a 100644 --- a/neutron/tests/functional/test_server.py +++ b/neutron/tests/functional/test_server.py @@ -113,9 +113,8 @@ class TestNeutronServer(base.BaseLoggingTestCase): if self.workers > 1: return [proc.pid for proc in psutil.process_iter() if safe_ppid(proc) == self.service_pid] - else: - return [proc.pid for proc in psutil.process_iter() - if proc.pid == self.service_pid] + return [proc.pid for proc in psutil.process_iter() + if proc.pid == self.service_pid] exception = RuntimeError('Failed to start %d workers.' % self.workers) diff --git a/neutron/tests/post_mortem_debug.py b/neutron/tests/post_mortem_debug.py index 7154a477889..e11b34aa6ef 100644 --- a/neutron/tests/post_mortem_debug.py +++ b/neutron/tests/post_mortem_debug.py @@ -30,9 +30,8 @@ def _get_debugger(debugger_name): debugger_name) if 'post_mortem' in dir(debugger): return debugger - else: - raise ValueError("%s is not a supported post mortem debugger" % - debugger_name) + raise ValueError("%s is not a supported post mortem debugger" % + debugger_name) def _exception_handler(debugger, exc_info): @@ -89,10 +88,9 @@ def get_ignored_traceback(tb): # Find all members of an ignored trailing chain ignored_tracebacks = [] for tb in reversed(tb_list): - if '__unittest' in tb.tb_frame.f_globals: - ignored_tracebacks.append(tb) - else: + if '__unittest' not in tb.tb_frame.f_globals: break + ignored_tracebacks.append(tb) # Return the first member of the ignored trailing chain if ignored_tracebacks: diff --git a/neutron/tests/tools.py b/neutron/tests/tools.py index 303379efbd7..e84a2076085 100644 --- a/neutron/tests/tools.py +++ b/neutron/tests/tools.py @@ -221,12 +221,9 @@ def get_random_ip_address(version=4): random.randint(3, 254), random.randint(3, 254)) return netaddr.IPAddress(ip_string) - else: - ip = netutils.get_ipv6_addr_by_EUI64( - '2001:db8::/64', - net.get_random_mac(['fe', '16', '3e', '00', '00', '00']) - ) - return ip + return netutils.get_ipv6_addr_by_EUI64( + '2001:db8::/64', + net.get_random_mac(['fe', '16', '3e', '00', '00', '00'])) def get_random_router_status(): diff --git a/neutron/tests/unit/agent/common/test_ovs_lib.py b/neutron/tests/unit/agent/common/test_ovs_lib.py index 1f17b11b69a..bef4911e95a 100644 --- a/neutron/tests/unit/agent/common/test_ovs_lib.py +++ b/neutron/tests/unit/agent/common/test_ovs_lib.py @@ -753,8 +753,6 @@ class TestDeferredOVSBridge(base.BaseTestCase): raise Exception() except Exception: self._verify_mock_call([]) - else: - self.fail('Exception would be reraised') def test_apply(self): expected_calls = [ diff --git a/neutron/tests/unit/db/test_agentschedulers_db.py b/neutron/tests/unit/db/test_agentschedulers_db.py index b53d0ae9b5c..87d5443e2c6 100644 --- a/neutron/tests/unit/db/test_agentschedulers_db.py +++ b/neutron/tests/unit/db/test_agentschedulers_db.py @@ -1520,9 +1520,8 @@ class OvsDhcpAgentNotifierTestCase(test_agent.AgentDBTestMixIn, with self.port(subnet=subnet1, device_owner=owner) as port: return [net1, subnet1, port] - else: - with self.port(subnet=subnet1) as port: - return [net1, subnet1, port] + with self.port(subnet=subnet1) as port: + return [net1, subnet1, port] def _network_port_create(self, *args, **kwargs): net, sub, port = self._api_network_port_create(*args, **kwargs) diff --git a/neutron/tests/unit/db/test_ipam_backend_mixin.py b/neutron/tests/unit/db/test_ipam_backend_mixin.py index ea90f16077a..dd18102aafe 100644 --- a/neutron/tests/unit/db/test_ipam_backend_mixin.py +++ b/neutron/tests/unit/db/test_ipam_backend_mixin.py @@ -79,11 +79,8 @@ class TestIpamBackendMixin(base.BaseTestCase): self.ctx, ipv6_address_mode=constants.IPV6_SLAAC, ipv6_ra_mode=constants.IPV6_SLAAC) - else: - return subnet_obj.Subnet( - self.ctx, - ipv6_address_mode=None, - ipv6_ra_mode=None) + return subnet_obj.Subnet( + self.ctx, ipv6_address_mode=None, ipv6_ra_mode=None) self.mixin._get_subnet_object = mock.Mock( side_effect=_get_subnet_object) diff --git a/neutron/tests/unit/db/test_ipam_pluggable_backend.py b/neutron/tests/unit/db/test_ipam_pluggable_backend.py index f04669ea08c..daeac5b9eed 100644 --- a/neutron/tests/unit/db/test_ipam_pluggable_backend.py +++ b/neutron/tests/unit/db/test_ipam_pluggable_backend.py @@ -128,8 +128,7 @@ class TestDbBasePluginIpam(test_db_base.NeutronDbPluginV2TestCase): if request.address == netaddr.IPAddress(fail_ip): raise exception return str(request.address), subnet_id - else: - return auto_ip, subnet_id + return auto_ip, subnet_id return allocate_mock diff --git a/neutron/tests/unit/extensions/extendedattribute.py b/neutron/tests/unit/extensions/extendedattribute.py index d35f453caf9..dcdb24b6371 100644 --- a/neutron/tests/unit/extensions/extendedattribute.py +++ b/neutron/tests/unit/extensions/extendedattribute.py @@ -46,5 +46,4 @@ class Extendedattribute(extensions.ExtensionDescriptor): def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 - else: - return {} + return {} diff --git a/neutron/tests/unit/extensions/extensionattribute.py b/neutron/tests/unit/extensions/extensionattribute.py index 90e0b63e413..116bca17821 100644 --- a/neutron/tests/unit/extensions/extensionattribute.py +++ b/neutron/tests/unit/extensions/extensionattribute.py @@ -88,8 +88,7 @@ class Extensionattribute(api_extensions.ExtensionDescriptor): def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP - else: - return {} + return {} class ExtensionObjectTestPluginBase: diff --git a/neutron/tests/unit/extensions/v2attributes.py b/neutron/tests/unit/extensions/v2attributes.py index 0c83e2c1b36..04b4683c9a1 100644 --- a/neutron/tests/unit/extensions/v2attributes.py +++ b/neutron/tests/unit/extensions/v2attributes.py @@ -44,5 +44,4 @@ class V2attributes(extensions.ExtensionDescriptor): def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 - else: - return {} + return {} diff --git a/neutron/tests/unit/plugins/ml2/_test_mech_agent.py b/neutron/tests/unit/plugins/ml2/_test_mech_agent.py index 169eda4fdbb..9fc6fd33328 100644 --- a/neutron/tests/unit/plugins/ml2/_test_mech_agent.py +++ b/neutron/tests/unit/plugins/ml2/_test_mech_agent.py @@ -163,8 +163,7 @@ class FakePortContext(api.PortContext): def host_agents(self, agent_type): if agent_type == self._agent_type: return self._agents - else: - return [] + return [] def set_binding(self, segment_id, vif_type, vif_details): self._bound_segment_id = segment_id diff --git a/neutron/tests/unit/plugins/ml2/extensions/fake_extension.py b/neutron/tests/unit/plugins/ml2/extensions/fake_extension.py index 7196996dd34..a009a15a3f7 100644 --- a/neutron/tests/unit/plugins/ml2/extensions/fake_extension.py +++ b/neutron/tests/unit/plugins/ml2/extensions/fake_extension.py @@ -62,5 +62,4 @@ class Fake_extension(extensions.ExtensionDescriptor): def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 - else: - return {} + return {} diff --git a/neutron/tests/unit/testlib_api.py b/neutron/tests/unit/testlib_api.py index e4d6e84d771..fc073f16bf3 100644 --- a/neutron/tests/unit/testlib_api.py +++ b/neutron/tests/unit/testlib_api.py @@ -76,14 +76,13 @@ class StaticSqlFixtureNoSchema(lib_fixtures.SqlFixture): def _init_resources(cls): if cls._GLOBAL_RESOURCES: return - else: - cls._GLOBAL_RESOURCES = True - cls.database_resource = provision.DatabaseResource( - "sqlite", db_api.get_context_manager()) - dependency_resources = {} - for name, resource in cls.database_resource.resources: - dependency_resources[name] = resource.getResource() - cls.engine = dependency_resources['backend'].engine + cls._GLOBAL_RESOURCES = True + cls.database_resource = provision.DatabaseResource( + "sqlite", db_api.get_context_manager()) + dependency_resources = {} + for name, resource in cls.database_resource.resources: + dependency_resources[name] = resource.getResource() + cls.engine = dependency_resources['backend'].engine def _delete_from_schema(self, engine): pass @@ -173,10 +172,7 @@ class OpportunisticSqlFixture(lib_fixtures.SqlFixture): ('schema', schema_resource), ('db', database_resource) ] - else: - return [ - ('db', database_resource) - ] + return [('db', database_resource)] class BaseSqlTestCase: @@ -223,8 +219,7 @@ class OpportunisticDBTestMixin: msg = "backend '%s' unavailable" % self.DRIVER if self.SKIP_ON_UNAVAILABLE_DB: self.skipTest(msg) - else: - self.fail(msg) + self.fail(msg) _schema_resources = {} _database_resources = {}