From 83ef6b56772d94e870b9c3eca928d5aefeffe07e Mon Sep 17 00:00:00 2001 From: LiuNanke Date: Tue, 29 Dec 2015 23:25:55 +0800 Subject: [PATCH] Using LOG.warning replace LOG.warn Python 3 deprecated the logger.warn method, see: https://docs.python.org/3/library/logging.html#logging.warning so we prefer to use warning to avoid DeprecationWarning. Closes-Bugs: #1529913 Change-Id: Icc01ce5fbd10880440cf75a2e0833394783464a0 Co-Authored-By: Gary Kotton --- HACKING.rst | 2 ++ neutron/agent/common/ovs_lib.py | 14 +++++----- neutron/agent/dhcp/agent.py | 11 ++++---- neutron/agent/l3/agent.py | 8 +++--- neutron/agent/l3/dvr_local_router.py | 9 ++++--- neutron/agent/l3/router_info.py | 4 +-- neutron/agent/linux/iptables_manager.py | 12 ++++----- neutron/agent/metadata/agent.py | 8 +++--- neutron/agent/rpc.py | 5 ++-- neutron/agent/securitygroups_rpc.py | 8 +++--- neutron/api/api_common.py | 6 ++--- neutron/api/extensions.py | 25 ++++++++--------- .../rpc/agentnotifiers/dhcp_rpc_agent_api.py | 20 +++++++------- neutron/api/rpc/handlers/dhcp_rpc.py | 6 ++--- neutron/db/agents_db.py | 20 +++++++------- neutron/db/agentschedulers_db.py | 27 ++++++++++--------- neutron/db/l3_agentschedulers_db.py | 2 +- neutron/db/securitygroups_rpc_base.py | 4 +-- neutron/debug/debug_agent.py | 2 +- neutron/hacking/checks.py | 10 ++++++- neutron/pecan_wsgi/controllers/resource.py | 2 +- neutron/pecan_wsgi/controllers/root.py | 4 +-- neutron/pecan_wsgi/startup.py | 8 +++--- .../hyperv/agent/security_groups_driver.py | 8 +++--- .../mech_sriov/agent/sriov_nic_agent.py | 2 +- .../agent/openflow/native/ofswitch.py | 4 +-- .../openvswitch/agent/ovs_neutron_agent.py | 17 ++++++------ neutron/plugins/ml2/managers.py | 2 +- neutron/policy.py | 5 ++-- neutron/quota/resource_registry.py | 2 +- neutron/scheduler/dhcp_agent_scheduler.py | 5 ++-- neutron/scheduler/l3_agent_scheduler.py | 10 +++---- .../bgp/scheduler/bgp_dragent_scheduler.py | 2 +- .../metering/agents/metering_agent.py | 4 +-- neutron/tests/unit/agent/dhcp/test_agent.py | 2 +- .../unit/agent/linux/test_iptables_manager.py | 4 +-- .../agentnotifiers/test_dhcp_rpc_agent_api.py | 4 +-- neutron/tests/unit/db/test_agents_db.py | 2 +- neutron/tests/unit/hacking/test_checks.py | 1 - 39 files changed, 155 insertions(+), 136 deletions(-) diff --git a/HACKING.rst b/HACKING.rst index a6cfbc136a8..3f6c595f630 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -23,6 +23,8 @@ Neutron Specific Commandments - [N331] Detect wrong usage with assertTrue(isinstance()). - [N332] Use assertEqual(expected_http_code, observed_http_code) instead of assertEqual(observed_http_code, expected_http_code). +- [N333] Validate that LOG.warning is used instead of LOG.warn. The latter + is deprecated. Creating Unit Tests ------------------- diff --git a/neutron/agent/common/ovs_lib.py b/neutron/agent/common/ovs_lib.py index c75ede74ce2..e1b8020b3b3 100644 --- a/neutron/agent/common/ovs_lib.py +++ b/neutron/agent/common/ovs_lib.py @@ -449,11 +449,11 @@ class OVSBridge(BaseOVS): if_exists=True) for result in results: if result['ofport'] == UNASSIGNED_OFPORT: - LOG.warn(_LW("Found not yet ready openvswitch port: %s"), - result['name']) + LOG.warning(_LW("Found not yet ready openvswitch port: %s"), + result['name']) elif result['ofport'] == INVALID_OFPORT: - LOG.warn(_LW("Found failed openvswitch port: %s"), - result['name']) + LOG.warning(_LW("Found failed openvswitch port: %s"), + result['name']) elif 'attached-mac' in result['external_ids']: port_id = self.portid_from_external_ids(result['external_ids']) if port_id: @@ -511,9 +511,9 @@ class OVSBridge(BaseOVS): @staticmethod def _check_ofport(port_id, port_info): if port_info['ofport'] in [UNASSIGNED_OFPORT, INVALID_OFPORT]: - LOG.warn(_LW("ofport: %(ofport)s for VIF: %(vif)s is not a" - " positive integer"), - {'ofport': port_info['ofport'], 'vif': port_id}) + LOG.warning(_LW("ofport: %(ofport)s for VIF: %(vif)s " + "is not a positive integer"), + {'ofport': port_info['ofport'], 'vif': port_id}) return False return True diff --git a/neutron/agent/dhcp/agent.py b/neutron/agent/dhcp/agent.py index 11154b27da2..8e3db22e3ae 100644 --- a/neutron/agent/dhcp/agent.py +++ b/neutron/agent/dhcp/agent.py @@ -203,7 +203,7 @@ class DhcpAgent(manager.Manager): try: network = self.plugin_rpc.get_network_info(network_id) if not network: - LOG.warn(_LW('Network %s has been deleted.'), network_id) + LOG.warning(_LW('Network %s has been deleted.'), network_id) return network except Exception as e: self.schedule_resync(e, network_id) @@ -223,8 +223,9 @@ class DhcpAgent(manager.Manager): self.configure_dhcp_for_network(network) LOG.info(_LI('Finished network %s dhcp configuration'), network_id) except (exceptions.NetworkNotFound, RuntimeError): - LOG.warn(_LW('Network %s may have been deleted and its resources ' - 'may have already been disposed.'), network.id) + LOG.warning(_LW('Network %s may have been deleted and ' + 'its resources may have already been disposed.'), + network.id) def configure_dhcp_for_network(self, network): if not network.admin_state_up: @@ -585,8 +586,8 @@ class DhcpAgentWithStateReport(DhcpAgent): self.schedule_resync("Agent has just been revived") except AttributeError: # This means the server does not support report_state - LOG.warn(_LW("Neutron server does not support state report." - " State report for this agent will be disabled.")) + LOG.warning(_LW("Neutron server does not support state report. " + "State report for this agent will be disabled.")) self.heartbeat.stop() self.run() return diff --git a/neutron/agent/l3/agent.py b/neutron/agent/l3/agent.py index 70333790340..ddcb50da5f1 100644 --- a/neutron/agent/l3/agent.py +++ b/neutron/agent/l3/agent.py @@ -362,8 +362,8 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, def _router_removed(self, router_id): ri = self.router_info.get(router_id) if ri is None: - LOG.warn(_LW("Info for router %s was not found. " - "Performing router cleanup"), router_id) + LOG.warning(_LW("Info for router %s was not found. " + "Performing router cleanup"), router_id) self.namespaces_manager.ensure_router_cleanup(router_id) return @@ -683,8 +683,8 @@ class L3NATAgentWithStateReport(L3NATAgent): self.agent_state.pop('start_flag', None) except AttributeError: # This means the server does not support report_state - LOG.warn(_LW("Neutron server does not support state report. " - "State report for this agent will be disabled.")) + LOG.warning(_LW("Neutron server does not support state report. " + "State report for this agent will be disabled.")) self.heartbeat.stop() return except Exception: diff --git a/neutron/agent/l3/dvr_local_router.py b/neutron/agent/l3/dvr_local_router.py index fb83eb1fd99..3ec964e2a1d 100644 --- a/neutron/agent/l3/dvr_local_router.py +++ b/neutron/agent/l3/dvr_local_router.py @@ -209,10 +209,11 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase): return True else: if operation == 'add': - LOG.warn(_LW("Device %s does not exist so ARP entry " - "cannot be updated, will cache information " - "to be applied later when the device exists"), - device) + LOG.warning(_LW("Device %s does not exist so ARP entry " + "cannot be updated, will cache " + "information to be applied later " + "when the device exists"), + device) self._cache_arp_entry(ip, mac, subnet_id, operation) return False except Exception: diff --git a/neutron/agent/l3/router_info.py b/neutron/agent/l3/router_info.py index bfeddbd1f0d..f767c7d139e 100644 --- a/neutron/agent/l3/router_info.py +++ b/neutron/agent/l3/router_info.py @@ -265,8 +265,8 @@ class RouterInfo(object): except RuntimeError: # any exception occurred here should cause the floating IP # to be set in error state - LOG.warn(_LW("Unable to configure IP address for " - "floating IP: %s"), fip['id']) + LOG.warning(_LW("Unable to configure IP address for " + "floating IP: %s"), fip['id']) def add_floating_ip(self, fip, interface_name, device): raise NotImplementedError() diff --git a/neutron/agent/linux/iptables_manager.py b/neutron/agent/linux/iptables_manager.py index a1e4653d3cf..bbedb877ecf 100644 --- a/neutron/agent/linux/iptables_manager.py +++ b/neutron/agent/linux/iptables_manager.py @@ -250,10 +250,10 @@ class IptablesTable(object): top, self.wrap_name, comment=comment))) except ValueError: - LOG.warn(_LW('Tried to remove rule that was not there:' - ' %(chain)r %(rule)r %(wrap)r %(top)r'), - {'chain': chain, 'rule': rule, - 'top': top, 'wrap': wrap}) + LOG.warning(_LW('Tried to remove rule that was not there:' + ' %(chain)r %(rule)r %(wrap)r %(top)r'), + {'chain': chain, 'rule': rule, + 'top': top, 'wrap': wrap}) def _get_chain_rules(self, chain, wrap): chain = get_chain_name(chain, wrap) @@ -696,8 +696,8 @@ class IptablesManager(object): """Return the sum of the traffic counters of all rules of a chain.""" cmd_tables = self._get_traffic_counters_cmd_tables(chain, wrap) if not cmd_tables: - LOG.warn(_LW('Attempted to get traffic counters of chain %s which ' - 'does not exist'), chain) + LOG.warning(_LW('Attempted to get traffic counters of chain %s ' + 'which does not exist'), chain) return name = get_chain_name(chain, wrap) diff --git a/neutron/agent/metadata/agent.py b/neutron/agent/metadata/agent.py index ab5c93f8649..1f95cc3d7ab 100644 --- a/neutron/agent/metadata/agent.py +++ b/neutron/agent/metadata/agent.py @@ -200,7 +200,7 @@ class MetadataProxyHandler(object): req.response.body = content return req.response elif resp.status == 403: - LOG.warn(_LW( + LOG.warning(_LW( 'The remote metadata server responded with Forbidden. This ' 'response usually occurs when shared secrets do not match.' )) @@ -215,7 +215,7 @@ class MetadataProxyHandler(object): msg = _( 'Remote metadata server experienced an internal server error.' ) - LOG.warn(msg) + LOG.warning(msg) explanation = six.text_type(msg) return webob.exc.HTTPInternalServerError(explanation=explanation) else: @@ -267,8 +267,8 @@ class UnixDomainMetadataProxy(object): use_call=self.agent_state.get('start_flag')) except AttributeError: # This means the server does not support report_state - LOG.warn(_LW('Neutron server does not support state report.' - ' State report for this agent will be disabled.')) + LOG.warning(_LW('Neutron server does not support state report.' + ' State report for this agent will be disabled.')) self.heartbeat.stop() return except Exception: diff --git a/neutron/agent/rpc.py b/neutron/agent/rpc.py index 64a30e1a851..784f7414654 100644 --- a/neutron/agent/rpc.py +++ b/neutron/agent/rpc.py @@ -118,7 +118,7 @@ class PluginApi(object): # may not work correctly, however it can function in 'degraded' # mode, in that DVR routers may not be in the system yet, and # it might be not necessary to retrieve info about the host. - LOG.warn(_LW('DVR functionality requires a server upgrade.')) + LOG.warning(_LW('DVR functionality requires a server upgrade.')) res = [ self.get_device_details(context, device, agent_id, host) for device in devices @@ -196,7 +196,8 @@ class PluginApi(object): res = cctxt.call(context, 'tunnel_sync', tunnel_ip=tunnel_ip, tunnel_type=tunnel_type, host=host) except oslo_messaging.UnsupportedVersion: - LOG.warn(_LW('Tunnel synchronization requires a server upgrade.')) + LOG.warning(_LW('Tunnel synchronization requires a ' + 'server upgrade.')) cctxt = self.client.prepare() res = cctxt.call(context, 'tunnel_sync', tunnel_ip=tunnel_ip, tunnel_type=tunnel_type) diff --git a/neutron/agent/securitygroups_rpc.py b/neutron/agent/securitygroups_rpc.py index 2f2a8b386b5..29909a1d140 100644 --- a/neutron/agent/securitygroups_rpc.py +++ b/neutron/agent/securitygroups_rpc.py @@ -63,8 +63,8 @@ def _is_valid_driver_combination(): def is_firewall_enabled(): if not _is_valid_driver_combination(): - LOG.warn(_LW("Driver configuration doesn't match with " - "enable_security_group")) + LOG.warning(_LW("Driver configuration doesn't match with " + "enable_security_group")) return cfg.CONF.SECURITYGROUP.enable_security_group @@ -97,8 +97,8 @@ class SecurityGroupAgentRpc(object): firewall_driver = cfg.CONF.SECURITYGROUP.firewall_driver or 'noop' LOG.debug("Init firewall settings (driver=%s)", firewall_driver) if not _is_valid_driver_combination(): - LOG.warn(_LW("Driver configuration doesn't match " - "with enable_security_group")) + LOG.warning(_LW("Driver configuration doesn't match " + "with enable_security_group")) firewall_class = firewall.load_firewall_driver_class(firewall_driver) try: self.firewall = firewall_class( diff --git a/neutron/api/api_common.py b/neutron/api/api_common.py index 743c690eb4a..0252a5fb424 100644 --- a/neutron/api/api_common.py +++ b/neutron/api/api_common.py @@ -107,9 +107,9 @@ def _get_pagination_max_limit(): if max_limit == 0: raise ValueError() except ValueError: - LOG.warn(_LW("Invalid value for pagination_max_limit: %s. It " - "should be an integer greater to 0"), - cfg.CONF.pagination_max_limit) + LOG.warning(_LW("Invalid value for pagination_max_limit: %s. It " + "should be an integer greater to 0"), + cfg.CONF.pagination_max_limit) return max_limit diff --git a/neutron/api/extensions.py b/neutron/api/extensions.py index 1a8959f910f..7f56737abde 100644 --- a/neutron/api/extensions.py +++ b/neutron/api/extensions.py @@ -532,17 +532,17 @@ class ExtensionManager(object): ext_name = mod_name[0].upper() + mod_name[1:] new_ext_class = getattr(mod, ext_name, None) if not new_ext_class: - LOG.warn(_LW('Did not find expected name ' - '"%(ext_name)s" in %(file)s'), - {'ext_name': ext_name, - 'file': ext_path}) + LOG.warning(_LW('Did not find expected name ' + '"%(ext_name)s" in %(file)s'), + {'ext_name': ext_name, + 'file': ext_path}) continue new_ext = new_ext_class() self.add_extension(new_ext) except Exception as exception: - LOG.warn(_LW("Extension file %(f)s wasn't loaded due to " - "%(exception)s"), - {'f': f, 'exception': exception}) + LOG.warning(_LW("Extension file %(f)s wasn't loaded due to " + "%(exception)s"), + {'f': f, 'exception': exception}) def add_extension(self, ext): # Do nothing if the extension doesn't check out @@ -578,9 +578,9 @@ class PluginAwareExtensionManager(ExtensionManager): alias = extension.get_alias() supports_extension = alias in self.get_supported_extension_aliases() if not supports_extension: - LOG.warn(_LW("Extension %s not supported by any of loaded " - "plugins"), - alias) + LOG.warning(_LW("Extension %s not supported by any of loaded " + "plugins"), + alias) return supports_extension def _plugins_implement_interface(self, extension): @@ -589,8 +589,9 @@ class PluginAwareExtensionManager(ExtensionManager): for plugin in self.plugins.values(): if isinstance(plugin, extension.get_plugin_interface()): return True - LOG.warn(_LW("Loaded plugins do not implement extension %s interface"), - extension.get_alias()) + LOG.warning(_LW("Loaded plugins do not implement extension " + "%s interface"), + extension.get_alias()) return False @classmethod diff --git a/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py b/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py index 06374b16949..7fb2bbc813e 100644 --- a/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py +++ b/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py @@ -70,9 +70,10 @@ class DhcpAgentNotifyAPI(object): context, 'network_create_end', {'network': {'id': network['id']}}, agent['host']) elif not existing_agents: - LOG.warn(_LW('Unable to schedule network %s: no agents available; ' - 'will retry on subsequent port and subnet creation ' - 'events.'), network['id']) + LOG.warning(_LW('Unable to schedule network %s: no agents ' + 'available; will retry on subsequent port ' + 'and subnet creation events.'), + network['id']) return new_agents + existing_agents def _get_enabled_agents(self, context, network, agents, method, payload): @@ -87,12 +88,13 @@ class DhcpAgentNotifyAPI(object): len_enabled_agents = len(enabled_agents) len_active_agents = len(active_agents) if len_active_agents < len_enabled_agents: - LOG.warn(_LW("Only %(active)d of %(total)d DHCP agents associated " - "with network '%(net_id)s' are marked as active, so " - "notifications may be sent to inactive agents."), - {'active': len_active_agents, - 'total': len_enabled_agents, - 'net_id': network_id}) + LOG.warning(_LW("Only %(active)d of %(total)d DHCP agents " + "associated with network '%(net_id)s' " + "are marked as active, so notifications " + "may be sent to inactive agents."), + {'active': len_active_agents, + 'total': len_enabled_agents, + 'net_id': network_id}) if not enabled_agents: num_ports = self.plugin.get_ports_count( context, {'network_id': [network_id]}) diff --git a/neutron/api/rpc/handlers/dhcp_rpc.py b/neutron/api/rpc/handlers/dhcp_rpc.py index 2b197d33f28..8d0e969af28 100644 --- a/neutron/api/rpc/handlers/dhcp_rpc.py +++ b/neutron/api/rpc/handlers/dhcp_rpc.py @@ -104,9 +104,9 @@ class DhcpRpcCallback(object): else: ctxt.reraise = True net_id = port['port']['network_id'] - LOG.warn(_LW("Action %(action)s for network %(net_id)s " - "could not complete successfully: %(reason)s"), - {"action": action, "net_id": net_id, 'reason': e}) + LOG.warning(_LW("Action %(action)s for network %(net_id)s " + "could not complete successfully: %(reason)s"), + {"action": action, "net_id": net_id, 'reason': e}) def get_active_networks(self, context, **kwargs): """Retrieve and return a list of the active network ids.""" diff --git a/neutron/db/agents_db.py b/neutron/db/agents_db.py index e4e5a85d866..6f6f405ab0d 100644 --- a/neutron/db/agents_db.py +++ b/neutron/db/agents_db.py @@ -198,8 +198,8 @@ class AgentDbMixin(ext_agent.AgentPluginBase, AgentAvailabilityZoneMixin): '%(host)s', {'agent_type': agent_type, 'host': host}) return if self.is_agent_down(agent.heartbeat_timestamp): - LOG.warn(_LW('%(agent_type)s agent %(agent_id)s is not active'), - {'agent_type': agent_type, 'agent_id': agent.id}) + LOG.warning(_LW('%(agent_type)s agent %(agent_id)s is not active'), + {'agent_type': agent_type, 'agent_id': agent.id}) return agent @staticmethod @@ -222,9 +222,9 @@ class AgentDbMixin(ext_agent.AgentPluginBase, AgentAvailabilityZoneMixin): except Exception: msg = _LW('Dictionary %(dict_name)s for agent %(agent_type)s on ' 'host %(host)s is invalid.') - LOG.warn(msg, {'dict_name': dict_name, - 'agent_type': agent_db.agent_type, - 'host': agent_db.host}) + LOG.warning(msg, {'dict_name': dict_name, + 'agent_type': agent_db.agent_type, + 'host': agent_db.host}) conf = {} return conf @@ -286,11 +286,11 @@ class AgentDbMixin(ext_agent.AgentPluginBase, AgentAvailabilityZoneMixin): (agent['agent_type'], agent['heartbeat_timestamp'], agent['host']) for agent in dead_agents]) - LOG.warn(_LW("Agent healthcheck: found %(count)s dead agents " - "out of %(total)s:\n%(data)s"), - {'count': len(dead_agents), - 'total': len(agents), - 'data': data}) + LOG.warning(_LW("Agent healthcheck: found %(count)s dead agents " + "out of %(total)s:\n%(data)s"), + {'count': len(dead_agents), + 'total': len(agents), + 'data': data}) else: LOG.debug("Agent healthcheck: found %s active agents", len(agents)) diff --git a/neutron/db/agentschedulers_db.py b/neutron/db/agentschedulers_db.py index a1b6796d27e..ec9735e1022 100644 --- a/neutron/db/agentschedulers_db.py +++ b/neutron/db/agentschedulers_db.py @@ -143,10 +143,11 @@ class AgentSchedulerDbMixin(agents_db.AgentDbMixin): tdelta = timeutils.utcnow() - getattr(self, '_clock_jump_canary', timeutils.utcnow()) if tdelta.total_seconds() > cfg.CONF.agent_down_time: - LOG.warn(_LW("Time since last %s agent reschedule check has " - "exceeded the interval between checks. Waiting " - "before check to allow agents to send a heartbeat " - "in case there was a clock adjustment."), agent_type) + LOG.warning(_LW("Time since last %s agent reschedule check has " + "exceeded the interval between checks. Waiting " + "before check to allow agents to send a heartbeat " + "in case there was a clock adjustment."), + agent_type) time.sleep(agent_dead_limit) self._clock_jump_canary = timeutils.utcnow() @@ -282,17 +283,17 @@ class DhcpAgentSchedulerDbMixin(dhcpagentscheduler active_agents = [agent for agent in agents if self.is_eligible_agent(context, True, agent)] if not active_agents: - LOG.warn(_LW("No DHCP agents available, " - "skipping rescheduling")) + LOG.warning(_LW("No DHCP agents available, " + "skipping rescheduling")) return for binding in dead_bindings: - LOG.warn(_LW("Removing network %(network)s from agent " - "%(agent)s because the agent did not report " - "to the server in the last %(dead_time)s " - "seconds."), - {'network': binding.network_id, - 'agent': binding.dhcp_agent_id, - 'dead_time': agent_dead_limit}) + LOG.warning(_LW("Removing network %(network)s from agent " + "%(agent)s because the agent did not report " + "to the server in the last %(dead_time)s " + "seconds."), + {'network': binding.network_id, + 'agent': binding.dhcp_agent_id, + 'dead_time': agent_dead_limit}) # save binding object to avoid ObjectDeletedError # in case binding is concurrently deleted from the DB saved_binding = {'net': binding.network_id, diff --git a/neutron/db/l3_agentschedulers_db.py b/neutron/db/l3_agentschedulers_db.py index 3cfe6fbd2dd..11551f6011c 100644 --- a/neutron/db/l3_agentschedulers_db.py +++ b/neutron/db/l3_agentschedulers_db.py @@ -118,7 +118,7 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase, agents_back_online.add(binding.l3_agent_id) continue - LOG.warn(_LW( + LOG.warning(_LW( "Rescheduling router %(router)s from agent %(agent)s " "because the agent did not report to the server in " "the last %(dead_time)s seconds."), diff --git a/neutron/db/securitygroups_rpc_base.py b/neutron/db/securitygroups_rpc_base.py index 5cd22278c89..694824b429d 100644 --- a/neutron/db/securitygroups_rpc_base.py +++ b/neutron/db/securitygroups_rpc_base.py @@ -375,8 +375,8 @@ class SecurityGroupServerRpcMixin(sg_db.SecurityGroupDbMixin): try: mac_address = query.one()[0] except (exc.NoResultFound, exc.MultipleResultsFound): - LOG.warn(_LW('No valid gateway port on subnet %s is ' - 'found for IPv6 RA'), subnet['id']) + LOG.warning(_LW('No valid gateway port on subnet %s is ' + 'found for IPv6 RA'), subnet['id']) return lla_ip = str(ipv6.get_ipv6_addr_by_EUI64( n_const.IPV6_LLA_PREFIX, diff --git a/neutron/debug/debug_agent.py b/neutron/debug/debug_agent.py index d1f069c9b6c..cda800f1b06 100644 --- a/neutron/debug/debug_agent.py +++ b/neutron/debug/debug_agent.py @@ -108,7 +108,7 @@ class NeutronDebugAgent(object): try: ip.netns.delete(namespace) except Exception: - LOG.warn(_LW('Failed to delete namespace %s'), namespace) + LOG.warning(_LW('Failed to delete namespace %s'), namespace) else: self.driver.unplug(self.driver.get_device_name(port), bridge=bridge) diff --git a/neutron/hacking/checks.py b/neutron/hacking/checks.py index d6048599829..dee63e1e932 100644 --- a/neutron/hacking/checks.py +++ b/neutron/hacking/checks.py @@ -35,7 +35,6 @@ _all_log_levels = { # a exception 'error': '_LE', 'info': '_LI', - 'warn': '_LW', 'warning': '_LW', 'critical': '_LC', 'exception': '_LE', @@ -55,6 +54,8 @@ log_translation_hint = re.compile( '|'.join('(?:%s)' % _regex_for_level(level, hint) for level, hint in six.iteritems(_all_log_levels))) +log_warn = re.compile( + r"(.)*LOG\.(warn)\(\s*('|\"|_)") contextlib_nested = re.compile(r"^with (contextlib\.)?nested\(") @@ -218,6 +219,12 @@ def check_assertequal_for_httpcode(logical_line, filename): yield (0, msg) +def check_log_warn_deprecated(logical_line, filename): + msg = "N333: Use LOG.warning due to compatibility with py3" + if log_warn.match(logical_line): + yield (0, msg) + + def factory(register): register(validate_log_translations) register(use_jsonutils) @@ -233,3 +240,4 @@ def factory(register): register(check_assertempty) register(check_assertisinstance) register(check_assertequal_for_httpcode) + register(check_log_warn_deprecated) diff --git a/neutron/pecan_wsgi/controllers/resource.py b/neutron/pecan_wsgi/controllers/resource.py index 81221b52bfb..5668a1e762c 100644 --- a/neutron/pecan_wsgi/controllers/resource.py +++ b/neutron/pecan_wsgi/controllers/resource.py @@ -71,7 +71,7 @@ class ItemController(utils.NeutronPecanController): controller = manager.NeutronManager.get_controller_for_resource( collection) if not controller: - LOG.warn(_LW("No controller found for: %s - returning response " + LOG.warning(_LW("No controller found for: %s - returning response " "code 404"), collection) pecan.abort(404) return controller, remainder diff --git a/neutron/pecan_wsgi/controllers/root.py b/neutron/pecan_wsgi/controllers/root.py index cea8a53f0b8..38533ed538c 100644 --- a/neutron/pecan_wsgi/controllers/root.py +++ b/neutron/pecan_wsgi/controllers/root.py @@ -93,8 +93,8 @@ class V2Controller(object): controller = manager.NeutronManager.get_controller_for_resource( collection) if not controller: - LOG.warn(_LW("No controller found for: %s - returning response " - "code 404"), collection) + LOG.warning(_LW("No controller found for: %s - returning response " + "code 404"), collection) pecan.abort(404) # Store resource and collection names in pecan request context so that # hooks can leverage them if necessary. The following code uses diff --git a/neutron/pecan_wsgi/startup.py b/neutron/pecan_wsgi/startup.py index 23c897b9eda..90e406e0a5f 100644 --- a/neutron/pecan_wsgi/startup.py +++ b/neutron/pecan_wsgi/startup.py @@ -52,7 +52,7 @@ def _plugin_for_resource(collection): hasattr(plugin, 'get_%s' % collection)): # This plugin implements this resource return plugin - LOG.warn(_LW("No plugin found for:%s"), collection) + LOG.warning(_LW("No plugin found for: %s"), collection) def _handle_plurals(collection): @@ -127,15 +127,15 @@ def initialize_all(): manager.NeutronManager.set_plugin_for_resource( resource, plugin) else: - LOG.warn(_LW("No plugin found for resource:%s. API calls " - "may not be correctly dispatched"), resource) + LOG.warning(_LW("No plugin found for resource:%s. API calls " + "may not be correctly dispatched"), resource) controller = pecan_controllers.get(collection) if not controller: LOG.debug("Building controller for resource:%s", resource) controller = res_ctrl.CollectionsController(collection, resource) else: - LOG.debug("There are already controllers for resource:%s", + LOG.debug("There are already controllers for resource: %s", resource) manager.NeutronManager.set_controller_for_resource( diff --git a/neutron/plugins/hyperv/agent/security_groups_driver.py b/neutron/plugins/hyperv/agent/security_groups_driver.py index 6f11d1ff41f..51ac293a2ea 100644 --- a/neutron/plugins/hyperv/agent/security_groups_driver.py +++ b/neutron/plugins/hyperv/agent/security_groups_driver.py @@ -24,10 +24,10 @@ LOG = logging.getLogger(__name__) # TODO(claudiub): Remove this module at the beginning of the O cycle. new_driver = 'hyperv.neutron.security_groups_driver.HyperVSecurityGroupsDriver' -LOG.warn(_LW("You are using the deprecated firewall driver: %(deprecated)s. " - "Use the recommended driver %(new)s instead."), - {'deprecated': '%s.HyperVSecurityGroupsDriver' % __name__, - 'new': new_driver}) +LOG.warning(_LW("You are using the deprecated firewall driver: " + "%(deprecated)s.Use the recommended driver %(new)s instead."), + {'deprecated': '%s.HyperVSecurityGroupsDriver' % __name__, + 'new': new_driver}) HyperVSecurityGroupsDriver = moves.moved_class( sg_driver.HyperVSecurityGroupsDriver, diff --git a/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py b/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py index b88592bdae5..620d9c560a8 100644 --- a/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py +++ b/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py @@ -326,7 +326,7 @@ class SriovNicSwitchAgent(object): self.ext_manager.delete_port(self.context, port) else: LOG.warning(_LW("port_id to device with MAC " - "%s not found"), mac) + "%s not found"), mac) dev_details = self.plugin_rpc.update_device_down(self.context, mac, self.agent_id, diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py index d19dc83d8ce..aa1c1dd2f40 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py @@ -138,8 +138,8 @@ class OpenFlowSwitchMixin(object): cookies = set([f.cookie for f in self.dump_flows()]) - \ self.reserved_cookies for c in cookies: - LOG.warn(_LW("Deleting flow with cookie 0x%(cookie)x") % { - 'cookie': c}) + LOG.warning(_LW("Deleting flow with cookie 0x%(cookie)x"), + {'cookie': c}) self.delete_flows(cookie=c, cookie_mask=((1 << 64) - 1)) def install_goto_next(self, table_id): diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py index 04bce234a54..4209ea94747 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py @@ -1368,8 +1368,9 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, # error condition of which operators should be aware port_needs_binding = True if not vif_port.ofport: - LOG.warn(_LW("VIF port: %s has no ofport configured, " - "and might not be able to transmit"), vif_port.vif_id) + LOG.warning(_LW("VIF port: %s has no ofport configured, " + "and might not be able to transmit"), + vif_port.vif_id) if vif_port: if admin_state_up: port_needs_binding = self.port_bound( @@ -1648,7 +1649,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, try: return '%08x' % netaddr.IPAddress(ip_address, version=4) except Exception: - LOG.warn(_LW("Invalid remote IP: %s"), ip_address) + LOG.warning(_LW("Invalid remote IP: %s"), ip_address) return def tunnel_sync(self): @@ -1701,11 +1702,11 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, # Check for the canary flow status = self.int_br.check_canary_table() if status == constants.OVS_RESTARTED: - LOG.warn(_LW("OVS is restarted. OVSNeutronAgent will reset " - "bridges and recover ports.")) + LOG.warning(_LW("OVS is restarted. OVSNeutronAgent will reset " + "bridges and recover ports.")) elif status == constants.OVS_DEAD: - LOG.warn(_LW("OVS is dead. OVSNeutronAgent will keep running " - "and checking OVS status periodically.")) + LOG.warning(_LW("OVS is dead. OVSNeutronAgent will keep running " + "and checking OVS status periodically.")) return status def loop_count_and_wait(self, start_time, port_stats): @@ -1760,7 +1761,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, consecutive_resyncs = consecutive_resyncs + 1 if (consecutive_resyncs >= constants.MAX_DEVICE_RETRIES): - LOG.warn(_LW( + LOG.warning(_LW( "Clearing cache of registered ports," " retries to resync were > %s"), constants.MAX_DEVICE_RETRIES) diff --git a/neutron/plugins/ml2/managers.py b/neutron/plugins/ml2/managers.py index e784ed9f05c..b70bd6f4e20 100644 --- a/neutron/plugins/ml2/managers.py +++ b/neutron/plugins/ml2/managers.py @@ -352,7 +352,7 @@ class MechanismManager(stevedore.named.NamedExtensionManager): else: # at least one of drivers does not support QoS, meaning # there are no rule types supported by all of them - LOG.warn( + LOG.warning( _LW("%s does not support QoS; " "no rule types available"), driver.name) diff --git a/neutron/policy.py b/neutron/policy.py index 19db42364aa..58a4e6bb289 100644 --- a/neutron/policy.py +++ b/neutron/policy.py @@ -109,8 +109,9 @@ def _build_subattr_match_rule(attr_name, attr, action, target): validate = attr['validate'] key = list(filter(lambda k: k.startswith('type:dict'), validate.keys())) if not key: - LOG.warn(_LW("Unable to find data type descriptor for attribute %s"), - attr_name) + LOG.warning(_LW("Unable to find data type descriptor " + "for attribute %s"), + attr_name) return data = validate[key[0]] if not isinstance(data, dict): diff --git a/neutron/quota/resource_registry.py b/neutron/quota/resource_registry.py index 26b9fbccec1..21df72285c1 100644 --- a/neutron/quota/resource_registry.py +++ b/neutron/quota/resource_registry.py @@ -212,7 +212,7 @@ class ResourceRegistry(object): def register_resource(self, resource): if resource.name in self._resources: - LOG.warn(_LW('%s is already registered'), resource.name) + LOG.warning(_LW('%s is already registered'), resource.name) if resource.name in self._tracked_resource_mappings: resource.register_events() self._resources[resource.name] = resource diff --git a/neutron/scheduler/dhcp_agent_scheduler.py b/neutron/scheduler/dhcp_agent_scheduler.py index 5d2ce95227a..aa9250d53b6 100644 --- a/neutron/scheduler/dhcp_agent_scheduler.py +++ b/neutron/scheduler/dhcp_agent_scheduler.py @@ -59,7 +59,8 @@ class AutoScheduler(object): for dhcp_agent in dhcp_agents: if agents_db.AgentDbMixin.is_agent_down( dhcp_agent.heartbeat_timestamp): - LOG.warn(_LW('DHCP agent %s is not active'), dhcp_agent.id) + LOG.warning(_LW('DHCP agent %s is not active'), + dhcp_agent.id) continue for net_id in net_ids: agents = plugin.get_dhcp_agents_hosting_networks( @@ -207,7 +208,7 @@ class DhcpFilter(base_resource_filter.BaseResourceFilter): active_dhcp_agents = plugin.get_agents_db( context, filters=filters) if not active_dhcp_agents: - LOG.warn(_LW('No more DHCP agents')) + LOG.warning(_LW('No more DHCP agents')) return [] return active_dhcp_agents diff --git a/neutron/scheduler/l3_agent_scheduler.py b/neutron/scheduler/l3_agent_scheduler.py index 0033a2968e9..a9a399b6278 100644 --- a/neutron/scheduler/l3_agent_scheduler.py +++ b/neutron/scheduler/l3_agent_scheduler.py @@ -145,8 +145,8 @@ class L3Scheduler(object): target_routers = self._get_routers_can_schedule( context, plugin, unscheduled_routers, l3_agent) if not target_routers: - LOG.warn(_LW('No routers compatible with L3 agent configuration ' - 'on host %s'), host) + LOG.warning(_LW('No routers compatible with L3 agent ' + 'configuration on host %s'), host) return False self._bind_routers(context, plugin, target_routers, l3_agent) @@ -170,14 +170,14 @@ class L3Scheduler(object): active_l3_agents = plugin.get_l3_agents(context, active=True) if not active_l3_agents: - LOG.warn(_LW('No active L3 agents')) + LOG.warning(_LW('No active L3 agents')) return [] candidates = plugin.get_l3_agent_candidates(context, sync_router, active_l3_agents) if not candidates: - LOG.warn(_LW('No L3 agents can host the router %s'), - sync_router['id']) + LOG.warning(_LW('No L3 agents can host the router %s'), + sync_router['id']) return candidates diff --git a/neutron/services/bgp/scheduler/bgp_dragent_scheduler.py b/neutron/services/bgp/scheduler/bgp_dragent_scheduler.py index 27e27585664..fa27609b72c 100644 --- a/neutron/services/bgp/scheduler/bgp_dragent_scheduler.py +++ b/neutron/services/bgp/scheduler/bgp_dragent_scheduler.py @@ -140,7 +140,7 @@ class BgpDrAgentSchedulerBase(BgpDrAgentFilter): if agents_db.AgentDbMixin.is_agent_down( bgp_dragent.heartbeat_timestamp): - LOG.warn(_LW('BgpDrAgent %s is down'), bgp_dragent.id) + LOG.warning(_LW('BgpDrAgent %s is down'), bgp_dragent.id) return False if self._is_bgp_speaker_hosted(context, bgp_dragent['id']): diff --git a/neutron/services/metering/agents/metering_agent.py b/neutron/services/metering/agents/metering_agent.py index d82afe7ac78..59fd14cf073 100644 --- a/neutron/services/metering/agents/metering_agent.py +++ b/neutron/services/metering/agents/metering_agent.py @@ -276,8 +276,8 @@ class MeteringAgentWithStateReport(MeteringAgent): self.use_call = False except AttributeError: # This means the server does not support report_state - LOG.warn(_LW("Neutron server does not support state report." - " State report for this agent will be disabled.")) + LOG.warning(_LW("Neutron server does not support state report. " + "State report for this agent will be disabled.")) self.heartbeat.stop() return except Exception: diff --git a/neutron/tests/unit/agent/dhcp/test_agent.py b/neutron/tests/unit/agent/dhcp/test_agent.py index a4e521a06b5..66c485296d7 100644 --- a/neutron/tests/unit/agent/dhcp/test_agent.py +++ b/neutron/tests/unit/agent/dhcp/test_agent.py @@ -707,7 +707,7 @@ class TestDhcpAgentEventHandler(base.BaseTestCase): def test_enable_dhcp_helper_network_none(self): self.plugin.get_network_info.return_value = None - with mock.patch.object(dhcp_agent.LOG, 'warn') as log: + with mock.patch.object(dhcp_agent.LOG, 'warning') as log: self.dhcp.enable_dhcp_helper('fake_id') self.plugin.assert_has_calls( [mock.call.get_network_info('fake_id')]) diff --git a/neutron/tests/unit/agent/linux/test_iptables_manager.py b/neutron/tests/unit/agent/linux/test_iptables_manager.py index a7a9ece875c..6183d494158 100644 --- a/neutron/tests/unit/agent/linux/test_iptables_manager.py +++ b/neutron/tests/unit/agent/linux/test_iptables_manager.py @@ -927,7 +927,7 @@ class IptablesManagerStateFulTestCase(base.BaseTestCase): def test_remove_nonexistent_rule(self): with mock.patch.object(iptables_manager, "LOG") as log: self.iptables.ipv4['filter'].remove_rule('nonexistent', '-j DROP') - log.warn.assert_called_once_with( + log.warning.assert_called_once_with( 'Tried to remove rule that was not there: ' '%(chain)r %(rule)r %(wrap)r %(top)r', {'wrap': True, 'top': False, 'rule': '-j DROP', @@ -1001,7 +1001,7 @@ class IptablesManagerStateFulTestCase(base.BaseTestCase): acc = self.iptables.get_traffic_counters('chain1') self.assertIsNone(acc) self.assertEqual(0, self.execute.call_count) - log.warn.assert_called_once_with( + log.warning.assert_called_once_with( 'Attempted to get traffic counters of chain %s which ' 'does not exist', 'chain1') diff --git a/neutron/tests/unit/api/rpc/agentnotifiers/test_dhcp_rpc_agent_api.py b/neutron/tests/unit/api/rpc/agentnotifiers/test_dhcp_rpc_agent_api.py index cd2ab2aee55..1d15196f3b0 100644 --- a/neutron/tests/unit/api/rpc/agentnotifiers/test_dhcp_rpc_agent_api.py +++ b/neutron/tests/unit/api/rpc/agentnotifiers/test_dhcp_rpc_agent_api.py @@ -51,7 +51,7 @@ class TestDhcpAgentNotifyAPI(base.BaseTestCase): new_agents = [] self.assertEqual(new_agents + existing_agents, agents) self.assertEqual(expected_casts, self.mock_cast.call_count) - self.assertEqual(expected_warnings, self.mock_log.warn.call_count) + self.assertEqual(expected_warnings, self.mock_log.warning.call_count) def test__schedule_network(self): agent = agents_db.Agent() @@ -86,7 +86,7 @@ class TestDhcpAgentNotifyAPI(base.BaseTestCase): if not cfg.CONF.enable_services_on_agents_with_admin_state_down: agents = [x for x in agents if x.admin_state_up] self.assertEqual(agents, enabled_agents) - self.assertEqual(expected_warnings, self.mock_log.warn.call_count) + self.assertEqual(expected_warnings, self.mock_log.warning.call_count) self.assertEqual(expected_errors, self.mock_log.error.call_count) def test__get_enabled_agents(self): diff --git a/neutron/tests/unit/db/test_agents_db.py b/neutron/tests/unit/db/test_agents_db.py index 9aa4f0a0ad0..740d15ad437 100644 --- a/neutron/tests/unit/db/test_agents_db.py +++ b/neutron/tests/unit/db/test_agents_db.py @@ -182,7 +182,7 @@ class TestAgentsDbMixin(TestAgentsDbBase): 'alive': True}] with mock.patch.object(self.plugin, 'get_agents', return_value=agents),\ - mock.patch.object(agents_db.LOG, 'warn') as warn,\ + mock.patch.object(agents_db.LOG, 'warning') as warn,\ mock.patch.object(agents_db.LOG, 'debug') as debug: self.plugin.agent_health_check() self.assertTrue(debug.called) diff --git a/neutron/tests/unit/hacking/test_checks.py b/neutron/tests/unit/hacking/test_checks.py index be1b114e745..74f98d0c811 100644 --- a/neutron/tests/unit/hacking/test_checks.py +++ b/neutron/tests/unit/hacking/test_checks.py @@ -29,7 +29,6 @@ class HackingTestCase(base.BaseTestCase): expected_marks = { 'error': '_LE', 'info': '_LI', - 'warn': '_LW', 'warning': '_LW', 'critical': '_LC', 'exception': '_LE',