diff --git a/neutron/_i18n.py b/neutron/_i18n.py index 5b9b5fa2fdd..65c95c0dc19 100644 --- a/neutron/_i18n.py +++ b/neutron/_i18n.py @@ -27,16 +27,6 @@ _C = _translators.contextual_form # The plural translation function using the name "_P" _P = _translators.plural_form -# Translators for log levels. -# -# The abbreviated names are meant to reflect the usual use of a short -# name like '_'. The "L" is for "log" and the other letter comes from -# the level. -_LI = _translators.log_info -_LW = _translators.log_warning -_LE = _translators.log_error -_LC = _translators.log_critical - def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) diff --git a/neutron/agent/agent_extensions_manager.py b/neutron/agent/agent_extensions_manager.py index 70a65376efc..a4758da782d 100644 --- a/neutron/agent/agent_extensions_manager.py +++ b/neutron/agent/agent_extensions_manager.py @@ -13,7 +13,6 @@ from oslo_log import log import stevedore -from neutron._i18n import _LI from neutron.conf.agent import agent_extensions_manager as agent_ext_mgr_config LOG = log.getLogger(__name__) @@ -28,7 +27,7 @@ class AgentExtensionsManager(stevedore.named.NamedExtensionManager): super(AgentExtensionsManager, self).__init__( namespace, conf.agent.extensions, invoke_on_load=True, name_order=True) - LOG.info(_LI("Loaded agent extensions: %s"), self.names()) + LOG.info("Loaded agent extensions: %s", self.names()) def initialize(self, connection, driver_type, agent_api=None): """Initialize enabled agent extensions. @@ -44,7 +43,7 @@ class AgentExtensionsManager(stevedore.named.NamedExtensionManager): """ # Initialize each agent extension in the list. for extension in self: - LOG.info(_LI("Initializing agent extension '%s'"), extension.name) + LOG.info("Initializing agent extension '%s'", extension.name) # If the agent has provided an agent_api object, this object will # be passed to all interested extensions. This object must be # consumed by each such extension before the extension's diff --git a/neutron/agent/common/ovs_lib.py b/neutron/agent/common/ovs_lib.py index cce168f6cb3..88412e3c941 100644 --- a/neutron/agent/common/ovs_lib.py +++ b/neutron/agent/common/ovs_lib.py @@ -27,7 +27,7 @@ from oslo_log import log as logging import six import tenacity -from neutron._i18n import _, _LE, _LI, _LW +from neutron._i18n import _ from neutron.agent.common import ip_lib from neutron.agent.common import utils from neutron.agent.ovsdb import api as ovsdb_api @@ -298,8 +298,8 @@ class OVSBridge(BaseOVS): "in 1 second. Attempt: %s/10", i) time.sleep(1) continue - LOG.error(_LE("Unable to execute %(cmd)s. Exception: " - "%(exception)s"), + LOG.error("Unable to execute %(cmd)s. Exception: " + "%(exception)s", {'cmd': full_args, 'exception': e}) break @@ -320,7 +320,7 @@ class OVSBridge(BaseOVS): try: ofport = self._get_port_val(port_name, "ofport") except tenacity.RetryError: - LOG.exception(_LE("Timed out retrieving ofport on port %s."), + LOG.exception("Timed out retrieving ofport on port %s.", port_name) return ofport @@ -330,7 +330,7 @@ class OVSBridge(BaseOVS): try: port_external_ids = self._get_port_val(port_name, "external_ids") except tenacity.RetryError: - LOG.exception(_LE("Timed out retrieving external_ids on port %s."), + LOG.exception("Timed out retrieving external_ids on port %s.", port_name) return port_external_ids @@ -526,10 +526,10 @@ class OVSBridge(BaseOVS): if_exists=True) for result in results: if result['ofport'] == UNASSIGNED_OFPORT: - LOG.warning(_LW("Found not yet ready openvswitch port: %s"), + LOG.warning("Found not yet ready openvswitch port: %s", result['name']) elif result['ofport'] == INVALID_OFPORT: - LOG.warning(_LW("Found failed openvswitch port: %s"), + LOG.warning("Found failed openvswitch port: %s", result['name']) elif 'attached-mac' in result['external_ids']: port_id = self.portid_from_external_ids(result['external_ids']) @@ -569,8 +569,8 @@ class OVSBridge(BaseOVS): for port_id in port_ids: result[port_id] = None if port_id not in by_id: - LOG.info(_LI("Port %(port_id)s not present in bridge " - "%(br_name)s"), + LOG.info("Port %(port_id)s not present in bridge " + "%(br_name)s", {'port_id': port_id, 'br_name': self.br_name}) continue pinfo = by_id[port_id] @@ -584,8 +584,8 @@ class OVSBridge(BaseOVS): @staticmethod def _check_ofport(port_id, port_info): if port_info['ofport'] in [UNASSIGNED_OFPORT, INVALID_OFPORT]: - LOG.warning(_LW("ofport: %(ofport)s for VIF: %(vif)s " - "is not a positive integer"), + LOG.warning("ofport: %(ofport)s for VIF: %(vif)s " + "is not a positive integer", {'ofport': port_info['ofport'], 'vif': port_id}) return False return True @@ -602,7 +602,7 @@ class OVSBridge(BaseOVS): continue mac = port['external_ids'].get('attached-mac') return VifPort(port['name'], port['ofport'], port_id, mac, self) - LOG.info(_LI("Port %(port_id)s not present in bridge %(br_name)s"), + LOG.info("Port %(port_id)s not present in bridge %(br_name)s", {'port_id': port_id, 'br_name': self.br_name}) def delete_ports(self, all_ports=False): @@ -837,7 +837,7 @@ class DeferredOVSBridge(object): if exc_type is None: self.apply_flows() else: - LOG.exception(_LE("OVS flows could not be applied on bridge %s"), + LOG.exception("OVS flows could not be applied on bridge %s", self.br.br_name) diff --git a/neutron/agent/common/utils.py b/neutron/agent/common/utils.py index ebc13634750..cafef1f2396 100644 --- a/neutron/agent/common/utils.py +++ b/neutron/agent/common/utils.py @@ -19,7 +19,6 @@ from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils -from neutron._i18n import _LE from neutron.common import utils as neutron_utils from neutron.conf.agent import common as config from neutron.conf.agent.database import agents_db @@ -53,7 +52,7 @@ def load_interface_driver(conf): INTERFACE_NAMESPACE, conf.interface_driver) return loaded_class(conf) except ImportError: - LOG.error(_LE("Error loading interface driver '%s'"), + LOG.error("Error loading interface driver '%s'", conf.interface_driver) raise SystemExit(1) diff --git a/neutron/agent/dhcp/agent.py b/neutron/agent/dhcp/agent.py index 515e35e4501..6e33239b5c4 100644 --- a/neutron/agent/dhcp/agent.py +++ b/neutron/agent/dhcp/agent.py @@ -29,7 +29,7 @@ from oslo_utils import fileutils from oslo_utils import importutils import six -from neutron._i18n import _, _LE, _LI, _LW +from neutron._i18n import _ from neutron.agent.linux import dhcp from neutron.agent.linux import external_process from neutron.agent.metadata import driver as metadata_driver @@ -120,7 +120,7 @@ class DhcpAgent(manager.Manager): def after_start(self): self.run() - LOG.info(_LI("DHCP agent started")) + LOG.info("DHCP agent started") def run(self): """Activate the DHCP agent.""" @@ -164,7 +164,7 @@ class DhcpAgent(manager.Manager): or isinstance(e, exceptions.NetworkNotFound)): LOG.debug("Network %s has been deleted.", network.id) else: - LOG.exception(_LE('Unable to %(action)s dhcp for %(net_id)s.'), + LOG.exception('Unable to %(action)s dhcp for %(net_id)s.', {'net_id': network.id, 'action': action}) def schedule_resync(self, reason, network_id=None): @@ -179,21 +179,21 @@ class DhcpAgent(manager.Manager): or 'None' is one of the networks, sync all of the networks. """ only_nets = set([] if (not networks or None in networks) else networks) - LOG.info(_LI('Synchronizing state')) + LOG.info('Synchronizing state') pool = eventlet.GreenPool(self.conf.num_sync_threads) known_network_ids = set(self.cache.get_network_ids()) try: active_networks = self.plugin_rpc.get_active_networks_info() - LOG.info(_LI('All active networks have been fetched through RPC.')) + LOG.info('All active networks have been fetched through RPC.') active_network_ids = set(network.id for network in active_networks) for deleted_id in known_network_ids - active_network_ids: try: self.disable_dhcp_helper(deleted_id) except Exception as e: self.schedule_resync(e, deleted_id) - LOG.exception(_LE('Unable to sync network state on ' - 'deleted network %s'), deleted_id) + LOG.exception('Unable to sync network state on ' + 'deleted network %s', deleted_id) for network in active_networks: if (not only_nets or # specifically resync all @@ -204,7 +204,7 @@ class DhcpAgent(manager.Manager): # we notify all ports in case some were created while the agent # was down self.dhcp_ready_ports |= set(self.cache.get_port_ids(only_nets)) - LOG.info(_LI('Synchronizing state complete')) + LOG.info('Synchronizing state complete') except Exception as e: if only_nets: @@ -212,7 +212,7 @@ class DhcpAgent(manager.Manager): self.schedule_resync(e, network_id) else: self.schedule_resync(e) - LOG.exception(_LE('Unable to sync network state.')) + LOG.exception('Unable to sync network state.') def _dhcp_ready_ports_loop(self): """Notifies the server of any ports that had reservations setup.""" @@ -226,12 +226,12 @@ class DhcpAgent(manager.Manager): self.plugin_rpc.dhcp_ready_on_ports(ports_to_send) continue except oslo_messaging.MessagingTimeout: - LOG.error(_LE("Timeout notifying server of ports ready. " - "Retrying...")) + LOG.error("Timeout notifying server of ports ready. " + "Retrying...") except Exception: - LOG.exception(_LE("Failure notifying DHCP server of " - "ready DHCP ports. Will retry on next " - "iteration.")) + LOG.exception("Failure notifying DHCP server of " + "ready DHCP ports. Will retry on next " + "iteration.") self.dhcp_ready_ports |= ports_to_send def start_ready_ports_loop(self): @@ -267,7 +267,7 @@ class DhcpAgent(manager.Manager): return network except Exception as e: self.schedule_resync(e, network_id) - LOG.exception(_LE('Network %s info call failed.'), network_id) + LOG.exception('Network %s info call failed.', network_id) def enable_dhcp_helper(self, network_id): """Enable DHCP for a network that meets enabling criteria.""" @@ -279,12 +279,12 @@ class DhcpAgent(manager.Manager): def safe_configure_dhcp_for_network(self, network): try: network_id = network.get('id') - LOG.info(_LI('Starting network %s dhcp configuration'), network_id) + LOG.info('Starting network %s dhcp configuration', network_id) self.configure_dhcp_for_network(network) - LOG.info(_LI('Finished network %s dhcp configuration'), network_id) + LOG.info('Finished network %s dhcp configuration', network_id) except (exceptions.NetworkNotFound, RuntimeError): - LOG.warning(_LW('Network %s may have been deleted and ' - 'its resources may have already been disposed.'), + LOG.warning('Network %s may have been deleted and ' + 'its resources may have already been disposed.', network.id) def configure_dhcp_for_network(self, network): @@ -411,7 +411,7 @@ class DhcpAgent(manager.Manager): network = self.cache.get_network_by_id(updated_port.network_id) if not network: return - LOG.info(_LI("Trigger reload_allocations for port %s"), + LOG.info("Trigger reload_allocations for port %s", updated_port) driver_action = 'reload_allocations' if self._is_port_on_this_agent(updated_port): @@ -498,10 +498,10 @@ class DhcpAgent(manager.Manager): if router_ports: # Multiple router ports should not be allowed if len(router_ports) > 1: - LOG.warning(_LW("%(port_num)d router ports found on the " - "metadata access network. Only the port " - "%(port_id)s, for router %(router_id)s " - "will be considered"), + LOG.warning("%(port_num)d router ports found on the " + "metadata access network. Only the port " + "%(port_id)s, for router %(router_id)s " + "will be considered", {'port_num': len(router_ports), 'port_id': router_ports[0].id, 'router_id': router_ports[0].device_id}) @@ -733,18 +733,18 @@ class DhcpAgentWithStateReport(DhcpAgent): agent_status = self.state_rpc.report_state( ctx, self.agent_state, True) if agent_status == n_const.AGENT_REVIVED: - LOG.info(_LI("Agent has just been revived. " - "Scheduling full sync")) + LOG.info("Agent has just been revived. " + "Scheduling full sync") self.schedule_resync("Agent has just been revived") except AttributeError: # This means the server does not support report_state - LOG.warning(_LW("Neutron server does not support state report. " - "State report for this agent will be disabled.")) + LOG.warning("Neutron server does not support state report. " + "State report for this agent will be disabled.") self.heartbeat.stop() self.run() return except Exception: - LOG.exception(_LE("Failed reporting state!")) + LOG.exception("Failed reporting state!") return if self.agent_state.pop('start_flag', None): self.run() @@ -753,7 +753,7 @@ class DhcpAgentWithStateReport(DhcpAgent): """Handle the agent_updated notification event.""" self.schedule_resync(_("Agent updated: %(payload)s") % {"payload": payload}) - LOG.info(_LI("agent_updated by server side %s!"), payload) + LOG.info("agent_updated by server side %s!", payload) def after_start(self): - LOG.info(_LI("DHCP agent started")) + LOG.info("DHCP agent started") diff --git a/neutron/agent/l2/extensions/fdb_population.py b/neutron/agent/l2/extensions/fdb_population.py index 9e34ab8c7b6..2c2f5253210 100644 --- a/neutron/agent/l2/extensions/fdb_population.py +++ b/neutron/agent/l2/extensions/fdb_population.py @@ -20,7 +20,6 @@ from neutron_lib.utils import helpers from oslo_config import cfg from oslo_log import log as logging -from neutron._i18n import _LE, _LW from neutron.agent.l2 import l2_agent_extension from neutron.agent.linux import bridge_lib from neutron.conf.agent import l2_ext_fdb_population @@ -73,9 +72,9 @@ class FdbPopulationAgentExtension( try: _stdout = bridge_lib.FdbInterface.show(device) except RuntimeError as e: - LOG.warning(_LW( + LOG.warning( 'Unable to find FDB Interface %(device)s. ' - 'Exception: %(e)s'), {'device': device, 'e': e}) + 'Exception: %(e)s', {'device': device, 'e': e}) continue self.device_to_macs[device] = _stdout.split()[::3] @@ -94,10 +93,10 @@ class FdbPopulationAgentExtension( try: bridge_lib.FdbInterface.add(mac, device) except RuntimeError as e: - LOG.warning(_LW( + LOG.warning( 'Unable to add mac %(mac)s ' 'to FDB Interface %(device)s. ' - 'Exception: %(e)s'), + 'Exception: %(e)s', {'mac': mac, 'device': device, 'e': e}) return self.device_to_macs[device].append(mac) @@ -105,19 +104,19 @@ class FdbPopulationAgentExtension( def delete_port(self, devices, port_id): mac = self.portid_to_mac.get(port_id) if mac is None: - LOG.warning(_LW('Port Id %(port_id)s does not have a rule for ' - 'devices %(devices)s in FDB table'), - {'port_id': port_id, 'devices': devices}) + LOG.warning('Port Id %(port_id)s does not have a rule for ' + 'devices %(devices)s in FDB table', + {'port_id': port_id, 'devices': devices}) return for device in devices: if mac in self.device_to_macs[device]: try: bridge_lib.FdbInterface.delete(mac, device) except RuntimeError as e: - LOG.warning(_LW( + LOG.warning( 'Unable to delete mac %(mac)s ' 'from FDB Interface %(device)s. ' - 'Exception: %(e)s'), + 'Exception: %(e)s', {'mac': mac, 'device': device, 'e': e}) return self.device_to_macs[device].remove(mac) @@ -129,17 +128,17 @@ class FdbPopulationAgentExtension( valid_driver_types = (linux_bridge_constants.EXTENSION_DRIVER_TYPE, ovs_constants.EXTENSION_DRIVER_TYPE) if driver_type not in valid_driver_types: - LOG.error(_LE('FDB extension is only supported for OVS and ' - 'linux bridge agent, currently uses ' - '%(driver_type)s'), {'driver_type': driver_type}) + LOG.error('FDB extension is only supported for OVS and ' + 'linux bridge agent, currently uses ' + '%(driver_type)s', {'driver_type': driver_type}) sys.exit(1) self.device_mappings = helpers.parse_mappings( cfg.CONF.FDB.shared_physical_device_mappings, unique_keys=False) devices = self._get_devices() if not devices: - LOG.error(_LE('Invalid configuration provided for FDB extension: ' - 'no physical devices')) + LOG.error('Invalid configuration provided for FDB extension: ' + 'no physical devices') sys.exit(1) self.fdb_tracker = self.FdbTableTracker(devices) diff --git a/neutron/agent/l2/extensions/qos.py b/neutron/agent/l2/extensions/qos.py index 590938c0bed..ca3a7f00ec6 100644 --- a/neutron/agent/l2/extensions/qos.py +++ b/neutron/agent/l2/extensions/qos.py @@ -20,7 +20,6 @@ from oslo_concurrency import lockutils from oslo_log import log as logging import six -from neutron._i18n import _LW, _LI from neutron.agent.l2 import l2_agent_extension from neutron.api.rpc.callbacks.consumer import registry from neutron.api.rpc.callbacks import events @@ -107,8 +106,8 @@ class QosAgentDriver(object): if rule_type in self.SUPPORTED_RULES: yield rule else: - LOG.warning(_LW('Unsupported QoS rule type for %(rule_id)s: ' - '%(rule_type)s; skipping'), + LOG.warning('Unsupported QoS rule type for %(rule_id)s: ' + '%(rule_type)s; skipping', {'rule_id': rule.id, 'rule_type': rule_type}) def _handle_rule_delete(self, port, rule_type, ingress=False): @@ -261,9 +260,9 @@ class QosAgentExtension(l2_agent_extension.L2AgentExtension): qos_policy = self.resource_rpc.pull( context, resources.QOS_POLICY, qos_policy_id) if qos_policy is None: - LOG.info(_LI("QoS policy %(qos_policy_id)s applied to port " - "%(port_id)s is not available on server, " - "it has been deleted. Skipping."), + LOG.info("QoS policy %(qos_policy_id)s applied to port " + "%(port_id)s is not available on server, " + "it has been deleted. Skipping.", {'qos_policy_id': qos_policy_id, 'port_id': port_id}) self._process_reset_port(port) else: diff --git a/neutron/agent/l2/l2_agent_extensions_manager.py b/neutron/agent/l2/l2_agent_extensions_manager.py index 4040b75fd99..8a2511b6545 100644 --- a/neutron/agent/l2/l2_agent_extensions_manager.py +++ b/neutron/agent/l2/l2_agent_extensions_manager.py @@ -12,7 +12,6 @@ from oslo_log import log -from neutron._i18n import _LE from neutron.agent import agent_extensions_manager as agent_ext_manager from neutron.conf.agent import agent_extensions_manager as agent_ext_mgr_config @@ -43,8 +42,8 @@ class L2AgentExtensionsManager(agent_ext_manager.AgentExtensionsManager): extension.obj.handle_port(context, data) else: LOG.error( - _LE("Agent Extension '%(name)s' does not " - "implement method handle_port"), + "Agent Extension '%(name)s' does not " + "implement method handle_port", {'name': extension.name} ) @@ -55,7 +54,7 @@ class L2AgentExtensionsManager(agent_ext_manager.AgentExtensionsManager): extension.obj.delete_port(context, data) else: LOG.error( - _LE("Agent Extension '%(name)s' does not " - "implement method delete_port"), + "Agent Extension '%(name)s' does not " + "implement method delete_port", {'name': extension.name} ) diff --git a/neutron/agent/l3/agent.py b/neutron/agent/l3/agent.py index 12a3ee3f6d5..3e20ed34aaa 100644 --- a/neutron/agent/l3/agent.py +++ b/neutron/agent/l3/agent.py @@ -33,7 +33,7 @@ from oslo_utils import excutils from oslo_utils import timeutils from osprofiler import profiler -from neutron._i18n import _, _LE, _LI, _LW +from neutron._i18n import _ from neutron.agent.common import utils as common_utils from neutron.agent.l3 import dvr from neutron.agent.l3 import dvr_edge_ha_router @@ -221,20 +221,20 @@ class L3NATAgent(ha.AgentMixin, self.neutron_service_plugins = ( self.plugin_rpc.get_service_plugin_list(self.context)) except oslo_messaging.RemoteError as e: - LOG.warning(_LW('l3-agent cannot check service plugins ' - 'enabled at the neutron server when ' - 'startup due to RPC error. It happens ' - 'when the server does not support this ' - 'RPC API. If the error is ' - 'UnsupportedVersion you can ignore this ' - 'warning. Detail message: %s'), e) + LOG.warning('l3-agent cannot check service plugins ' + 'enabled at the neutron server when ' + 'startup due to RPC error. It happens ' + 'when the server does not support this ' + 'RPC API. If the error is ' + 'UnsupportedVersion you can ignore this ' + 'warning. Detail message: %s', e) self.neutron_service_plugins = None except oslo_messaging.MessagingTimeout as e: - LOG.warning(_LW('l3-agent cannot contact neutron server ' - 'to retrieve service plugins enabled. ' - 'Check connectivity to neutron server. ' - 'Retrying... ' - 'Detailed message: %(msg)s.'), {'msg': e}) + LOG.warning('l3-agent cannot contact neutron server ' + 'to retrieve service plugins enabled. ' + 'Check connectivity to neutron server. ' + 'Retrying... ' + 'Detailed message: %(msg)s.', {'msg': e}) continue break @@ -272,15 +272,15 @@ class L3NATAgent(ha.AgentMixin, The actual values are not verified for correctness. """ if not self.conf.interface_driver: - msg = _LE('An interface driver must be specified') + msg = 'An interface driver must be specified' LOG.error(msg) raise SystemExit(1) if self.conf.ipv6_gateway: # ipv6_gateway configured. Check for valid v6 link-local address. try: - msg = _LE("%s used in config as ipv6_gateway is not a valid " - "IPv6 link-local address."), + msg = ("%s used in config as ipv6_gateway is not a valid " + "IPv6 link-local address.") ip_addr = netaddr.IPAddress(self.conf.ipv6_gateway) if ip_addr.version != 6 or not ip_addr.is_link_local(): LOG.error(msg, self.conf.ipv6_gateway) @@ -361,13 +361,13 @@ class L3NATAgent(ha.AgentMixin, except Exception: with excutils.save_and_reraise_exception(): del self.router_info[router_id] - LOG.exception(_LE('Error while initializing router %s'), + LOG.exception('Error while initializing router %s', router_id) self.namespaces_manager.ensure_router_cleanup(router_id) try: ri.delete() except Exception: - LOG.exception(_LE('Error while deleting router %s'), + LOG.exception('Error while deleting router %s', router_id) def _safe_router_removed(self, router_id): @@ -377,7 +377,7 @@ class L3NATAgent(ha.AgentMixin, self._router_removed(router_id) self.l3_ext_manager.delete_router(self.context, router_id) except Exception: - LOG.exception(_LE('Error while deleting router %s'), router_id) + LOG.exception('Error while deleting router %s', router_id) return False else: return True @@ -385,8 +385,8 @@ class L3NATAgent(ha.AgentMixin, def _router_removed(self, router_id): ri = self.router_info.get(router_id) if ri is None: - LOG.warning(_LW("Info for router %s was not found. " - "Performing router cleanup"), router_id) + LOG.warning("Info for router %s was not found. " + "Performing router cleanup", router_id) self.namespaces_manager.ensure_router_cleanup(router_id) return @@ -451,7 +451,7 @@ class L3NATAgent(ha.AgentMixin, def _process_router_if_compatible(self, router): if (self.conf.external_network_bridge and not ip_lib.device_exists(self.conf.external_network_bridge)): - LOG.error(_LE("The external network bridge '%s' does not exist"), + LOG.error("The external network bridge '%s' does not exist", self.conf.external_network_bridge) return @@ -513,7 +513,7 @@ class L3NATAgent(ha.AgentMixin, routers = self.plugin_rpc.get_routers(self.context, [update.id]) except Exception: - msg = _LE("Failed to fetch router information for '%s'") + msg = "Failed to fetch router information for '%s'" LOG.exception(msg, update.id) self._resync_router(update) continue @@ -540,12 +540,12 @@ class L3NATAgent(ha.AgentMixin, log_verbose_exc(e.msg, router) # Was the router previously handled by this agent? if router['id'] in self.router_info: - LOG.error(_LE("Removing incompatible router '%s'"), + LOG.error("Removing incompatible router '%s'", router['id']) self._safe_router_removed(router['id']) except Exception: log_verbose_exc( - _LE("Failed to process compatible router: %s") % update.id, + "Failed to process compatible router: %s" % update.id, router) self._resync_router(update) continue @@ -632,20 +632,20 @@ class L3NATAgent(ha.AgentMixin, self.sync_routers_chunk_size = max( self.sync_routers_chunk_size / 2, SYNC_ROUTERS_MIN_CHUNK_SIZE) - LOG.error(_LE('Server failed to return info for routers in ' - 'required time, decreasing chunk size to: %s'), + LOG.error('Server failed to return info for routers in ' + 'required time, decreasing chunk size to: %s', self.sync_routers_chunk_size) else: - LOG.error(_LE('Server failed to return info for routers in ' - 'required time even with min chunk size: %s. ' - 'It might be under very high load or ' - 'just inoperable'), + LOG.error('Server failed to return info for routers in ' + 'required time even with min chunk size: %s. ' + 'It might be under very high load or ' + 'just inoperable', self.sync_routers_chunk_size) raise except oslo_messaging.MessagingException: failed_routers = chunk or router_ids - LOG.exception(_LE("Failed synchronizing routers '%s' " - "due to RPC error"), failed_routers) + LOG.exception("Failed synchronizing routers '%s' " + "due to RPC error", failed_routers) raise n_exc.AbortSyncRouters() self.fullsync = False @@ -679,7 +679,7 @@ class L3NATAgent(ha.AgentMixin, # can have L3NATAgentWithStateReport as its base class instead of # L3NATAgent. eventlet.spawn_n(self._process_routers_loop) - LOG.info(_LI("L3 agent started")) + LOG.info("L3 agent started") def create_pd_router_update(self): router_id = None @@ -741,22 +741,22 @@ class L3NATAgentWithStateReport(L3NATAgent): self.agent_state, True) if agent_status == l3_constants.AGENT_REVIVED: - LOG.info(_LI('Agent has just been revived. ' - 'Doing a full sync.')) + LOG.info('Agent has just been revived. ' + 'Doing a full sync.') self.fullsync = True self.agent_state.pop('start_flag', None) except AttributeError: # This means the server does not support report_state - LOG.warning(_LW("Neutron server does not support state report. " - "State report for this agent will be disabled.")) + LOG.warning("Neutron server does not support state report. " + "State report for this agent will be disabled.") self.heartbeat.stop() return except Exception: - LOG.exception(_LE("Failed reporting state!")) + LOG.exception("Failed reporting state!") def after_start(self): eventlet.spawn_n(self._process_routers_loop) - LOG.info(_LI("L3 agent started")) + LOG.info("L3 agent started") # Do the report state before we do the first full sync. self._report_state() @@ -765,4 +765,4 @@ class L3NATAgentWithStateReport(L3NATAgent): def agent_updated(self, context, payload): """Handle the agent_updated notification event.""" self.fullsync = True - LOG.info(_LI("agent_updated by server side %s!"), payload) + LOG.info("agent_updated by server side %s!", payload) diff --git a/neutron/agent/l3/dvr_edge_router.py b/neutron/agent/l3/dvr_edge_router.py index 6287a283f9e..9e6125e544a 100644 --- a/neutron/agent/l3/dvr_edge_router.py +++ b/neutron/agent/l3/dvr_edge_router.py @@ -15,7 +15,6 @@ from neutron_lib import constants as lib_constants from oslo_log import log as logging -from neutron._i18n import _LE from neutron.agent.l3 import dvr_local_router from neutron.agent.l3 import dvr_snat_ns from neutron.agent.l3 import router_info as router @@ -211,8 +210,8 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter): super(DvrEdgeRouter, self)._update_routing_table( operation, route, namespace=ns_name) else: - LOG.error(_LE("The SNAT namespace %s does not exist for " - "the router."), ns_name) + LOG.error("The SNAT namespace %s does not exist for " + "the router.", ns_name) super(DvrEdgeRouter, self).update_routing_table(operation, route) def delete(self): diff --git a/neutron/agent/l3/dvr_fip_ns.py b/neutron/agent/l3/dvr_fip_ns.py index 327fc73e6cb..47db5215204 100644 --- a/neutron/agent/l3/dvr_fip_ns.py +++ b/neutron/agent/l3/dvr_fip_ns.py @@ -20,7 +20,7 @@ from oslo_concurrency import lockutils from oslo_log import log as logging from oslo_utils import excutils -from neutron._i18n import _, _LE, _LW +from neutron._i18n import _ from neutron.agent.l3 import fip_rule_priority_allocator as frpa from neutron.agent.l3 import link_local_allocator as lla from neutron.agent.l3 import namespaces @@ -117,8 +117,8 @@ class FipNamespace(namespaces.Namespace): yield except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE('DVR: FIP namespace config failure ' - 'for interface %s'), interface_name) + LOG.error('DVR: FIP namespace config failure ' + 'for interface %s', interface_name) def create_or_update_gateway_port(self, agent_gateway_port): interface_name = self.get_ext_device_name(agent_gateway_port['id']) @@ -147,8 +147,8 @@ class FipNamespace(namespaces.Namespace): with excutils.save_and_reraise_exception(): self.unsubscribe(agent_gateway_port['network_id']) self.delete() - LOG.exception(_LE('DVR: Gateway update in ' - 'FIP namespace failed')) + LOG.exception('DVR: Gateway update in ' + 'FIP namespace failed') def _create_gateway_port(self, ex_gw_port, interface_name): """Create namespace, request port creationg from Plugin, @@ -296,8 +296,8 @@ class FipNamespace(namespaces.Namespace): with excutils.save_and_reraise_exception(): self.unsubscribe(self.agent_gateway_port['network_id']) self.agent_gateway_port = None - LOG.exception(_LE('DVR: Gateway setup in FIP namespace ' - 'failed')) + LOG.exception('DVR: Gateway setup in FIP namespace ' + 'failed') # Now add the filter match rule for the table. ip_rule = ip_lib.IPRule(namespace=self.get_name()) @@ -328,10 +328,10 @@ class FipNamespace(namespaces.Namespace): # throw exceptions. Unsubscribe this external network so that # the next call will trigger the interface to be plugged. if not ipd.exists(): - LOG.warning(_LW('DVR: FIP gateway port with interface ' - 'name: %(device)s does not exist in the given ' - 'namespace: %(ns)s'), {'device': interface_name, - 'ns': ns_name}) + LOG.warning('DVR: FIP gateway port with interface ' + 'name: %(device)s does not exist in the given ' + 'namespace: %(ns)s', {'device': interface_name, + 'ns': ns_name}) msg = _('DVR: Gateway update route in FIP namespace failed, retry ' 'should be attempted on next call') raise n_exc.FloatingIpSetupException(msg) diff --git a/neutron/agent/l3/dvr_local_router.py b/neutron/agent/l3/dvr_local_router.py index c8fea766aa4..ae46c86dd4d 100644 --- a/neutron/agent/l3/dvr_local_router.py +++ b/neutron/agent/l3/dvr_local_router.py @@ -22,7 +22,6 @@ from oslo_log import log as logging from oslo_utils import excutils import six -from neutron._i18n import _LE, _LW from neutron.agent.l3 import dvr_fip_ns from neutron.agent.l3 import dvr_router_base from neutron.agent.linux import ip_lib @@ -239,16 +238,16 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase): return True else: if operation == 'add': - LOG.warning(_LW("Device %s does not exist so ARP entry " - "cannot be updated, will cache " - "information to be applied later " - "when the device exists"), + LOG.warning("Device %s does not exist so ARP entry " + "cannot be updated, will cache " + "information to be applied later " + "when the device exists", device) self._cache_arp_entry(ip, mac, subnet_id, operation) return False except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("DVR: Failed updating arp entry")) + LOG.exception("DVR: Failed updating arp entry") def _set_subnet_arp_info(self, subnet_id): """Set ARP info retrieved from Plugin for existing ports.""" @@ -356,10 +355,10 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase): priority=snat_idx) except Exception: if is_add: - exc = _LE('DVR: error adding redirection logic') + exc = 'DVR: error adding redirection logic' else: - exc = _LE('DVR: snat remove failed to clear the rule ' - 'and device') + exc = ('DVR: snat remove failed to clear the rule ' + 'and device') LOG.exception(exc) def _snat_redirect_add(self, gateway, sn_port, sn_int): diff --git a/neutron/agent/l3/dvr_router_base.py b/neutron/agent/l3/dvr_router_base.py index dcb06639c1f..9f1ba5a1ec9 100644 --- a/neutron/agent/l3/dvr_router_base.py +++ b/neutron/agent/l3/dvr_router_base.py @@ -12,7 +12,6 @@ from oslo_log import log as logging -from neutron._i18n import _LE from neutron.agent.l3 import router_info as router from neutron.common import constants as l3_constants @@ -47,8 +46,8 @@ class DvrRouterBase(router.RouterInfo): if ip['subnet_id'] in subnet_ids: return p - LOG.error(_LE('DVR: SNAT port not found in the list ' - '%(snat_list)s for the given router ' - 'internal port %(int_p)s'), { - 'snat_list': snat_ports, - 'int_p': int_port}) + LOG.error('DVR: SNAT port not found in the list ' + '%(snat_list)s for the given router ' + 'internal port %(int_p)s', { + 'snat_list': snat_ports, + 'int_p': int_port}) diff --git a/neutron/agent/l3/ha.py b/neutron/agent/l3/ha.py index d60bb13ba93..795e271964e 100644 --- a/neutron/agent/l3/ha.py +++ b/neutron/agent/l3/ha.py @@ -20,7 +20,6 @@ from oslo_log import log as logging from oslo_utils import fileutils import webob -from neutron._i18n import _LI from neutron.agent.linux import utils as agent_utils from neutron.common import constants from neutron.notifiers import batch_notifier @@ -88,8 +87,8 @@ class AgentMixin(object): try: return self.router_info[router_id] except KeyError: - LOG.info(_LI('Router %s is not managed by this agent. It was ' - 'possibly deleted concurrently.'), router_id) + LOG.info('Router %s is not managed by this agent. It was ' + 'possibly deleted concurrently.', router_id) def check_ha_state_for_router(self, router_id, current_state): ri = self._get_router_info(router_id) @@ -110,7 +109,7 @@ class AgentMixin(object): return self.conf.ha_vrrp_advert_int def enqueue_state_change(self, router_id, state): - LOG.info(_LI('Router %(router_id)s transitioned to %(state)s'), + LOG.info('Router %(router_id)s transitioned to %(state)s', {'router_id': router_id, 'state': state}) diff --git a/neutron/agent/l3/ha_router.py b/neutron/agent/l3/ha_router.py index e98cca1cef3..891ffda649e 100644 --- a/neutron/agent/l3/ha_router.py +++ b/neutron/agent/l3/ha_router.py @@ -21,7 +21,6 @@ from neutron_lib.api.definitions import portbindings from neutron_lib import constants as n_consts from oslo_log import log as logging -from neutron._i18n import _, _LE from neutron.agent.l3 import namespaces from neutron.agent.l3 import router_info as router from neutron.agent.linux import external_process @@ -93,7 +92,7 @@ class HaRouter(router.RouterInfo): with open(ha_state_path, 'w') as f: f.write(new_state) except (OSError, IOError): - LOG.error(_LE('Error while writing HA state for %s'), + LOG.error('Error while writing HA state for %s', self.router_id) @property @@ -112,8 +111,8 @@ class HaRouter(router.RouterInfo): def initialize(self, process_monitor): ha_port = self.router.get(n_consts.HA_INTERFACE_KEY) if not ha_port: - msg = _("Unable to process HA router %s without " - "HA port") % self.router_id + msg = ("Unable to process HA router %s without HA port" % + self.router_id) LOG.exception(msg) raise Exception(msg) super(HaRouter, self).initialize(process_monitor) diff --git a/neutron/agent/l3/item_allocator.py b/neutron/agent/l3/item_allocator.py index ebf228d20eb..64543442f6c 100644 --- a/neutron/agent/l3/item_allocator.py +++ b/neutron/agent/l3/item_allocator.py @@ -16,7 +16,6 @@ import os from oslo_log import log as logging -from neutron._i18n import _LW LOG = logging.getLogger(__name__) @@ -55,8 +54,8 @@ class ItemAllocator(object): self.remembered[key] = self.ItemClass(saved_value) except ValueError: read_error = True - LOG.warning(_LW("Invalid line in %(file)s, " - "ignoring: %(line)s"), + LOG.warning("Invalid line in %(file)s, " + "ignoring: %(line)s", {'file': state_file, 'line': line}) self.pool.difference_update(self.remembered.values()) diff --git a/neutron/agent/l3/keepalived_state_change.py b/neutron/agent/l3/keepalived_state_change.py index e31042db89b..43e6821ec75 100644 --- a/neutron/agent/l3/keepalived_state_change.py +++ b/neutron/agent/l3/keepalived_state_change.py @@ -21,7 +21,7 @@ import netaddr from oslo_config import cfg from oslo_log import log as logging -from neutron._i18n import _, _LE +from neutron._i18n import _ from neutron.agent.l3 import ha from neutron.agent.linux import daemon from neutron.agent.linux import ip_lib @@ -86,8 +86,8 @@ class MonitorDaemon(daemon.Daemon): # Remove this code once new keepalived versions are available. self.send_garp(event) except Exception: - LOG.exception(_LE( - 'Failed to process or handle event for line %s'), iterable) + LOG.exception('Failed to process or handle event for line %s', + iterable) def write_state_change(self, state): with open(os.path.join( diff --git a/neutron/agent/l3/l3_agent_extensions_manager.py b/neutron/agent/l3/l3_agent_extensions_manager.py index 1f60b0136e1..d2e0e87a244 100644 --- a/neutron/agent/l3/l3_agent_extensions_manager.py +++ b/neutron/agent/l3/l3_agent_extensions_manager.py @@ -15,7 +15,6 @@ from oslo_log import log -from neutron._i18n import _LE from neutron.agent import agent_extensions_manager as agent_ext_manager from neutron.conf.agent import agent_extensions_manager as agent_ext_mgr_config @@ -43,8 +42,8 @@ class L3AgentExtensionsManager(agent_ext_manager.AgentExtensionsManager): extension.obj.add_router(context, data) else: LOG.error( - _LE("Agent Extension '%(name)s' does not " - "implement method add_router"), + "Agent Extension '%(name)s' does not " + "implement method add_router", {'name': extension.name} ) @@ -55,8 +54,8 @@ class L3AgentExtensionsManager(agent_ext_manager.AgentExtensionsManager): extension.obj.update_router(context, data) else: LOG.error( - _LE("Agent Extension '%(name)s' does not " - "implement method update_router"), + "Agent Extension '%(name)s' does not " + "implement method update_router", {'name': extension.name} ) @@ -67,7 +66,7 @@ class L3AgentExtensionsManager(agent_ext_manager.AgentExtensionsManager): extension.obj.delete_router(context, data) else: LOG.error( - _LE("Agent Extension '%(name)s' does not " - "implement method delete_router"), + "Agent Extension '%(name)s' does not " + "implement method delete_router", {'name': extension.name} ) diff --git a/neutron/agent/l3/namespace_manager.py b/neutron/agent/l3/namespace_manager.py index 79c28be3523..bc42be997e0 100644 --- a/neutron/agent/l3/namespace_manager.py +++ b/neutron/agent/l3/namespace_manager.py @@ -12,7 +12,6 @@ from oslo_log import log as logging -from neutron._i18n import _LE from neutron.agent.l3 import dvr_fip_ns from neutron.agent.l3 import dvr_snat_ns from neutron.agent.l3 import namespaces @@ -119,8 +118,8 @@ class NamespaceManager(object): namespaces = root_ip.get_namespaces() return set(ns for ns in namespaces if self.is_managed(ns)) except RuntimeError: - LOG.exception(_LE('RuntimeError in obtaining namespace list for ' - 'namespace cleanup.')) + LOG.exception('RuntimeError in obtaining namespace list for ' + 'namespace cleanup.') return set() def ensure_router_cleanup(self, router_id): @@ -144,4 +143,4 @@ class NamespaceManager(object): self.process_monitor, ns_id, self.agent_conf, ns.name) ns.delete() except RuntimeError: - LOG.exception(_LE('Failed to destroy stale namespace %s'), ns) + LOG.exception('Failed to destroy stale namespace %s', ns) diff --git a/neutron/agent/l3/namespaces.py b/neutron/agent/l3/namespaces.py index 9be4efc6689..71e8cbcf35b 100644 --- a/neutron/agent/l3/namespaces.py +++ b/neutron/agent/l3/namespaces.py @@ -18,7 +18,6 @@ import functools from oslo_log import log as logging from oslo_utils import excutils -from neutron._i18n import _LE, _LW from neutron.agent.linux import ip_lib LOG = logging.getLogger(__name__) @@ -65,8 +64,8 @@ def check_ns_existence(f): @functools.wraps(f) def wrapped(self, *args, **kwargs): if not self.exists(): - LOG.warning(_LW('Namespace %(name)s does not exist. Skipping ' - '%(func)s'), + LOG.warning('Namespace %(name)s does not exist. Skipping ' + '%(func)s', {'name': self.name, 'func': f.__name__}) return try: @@ -111,7 +110,7 @@ class Namespace(object): try: self.ip_wrapper_root.netns.delete(self.name) except RuntimeError: - msg = _LE('Failed trying to delete namespace: %s') + msg = 'Failed trying to delete namespace: %s' LOG.exception(msg, self.name) def exists(self): diff --git a/neutron/agent/l3/router_info.py b/neutron/agent/l3/router_info.py index cf9ff63fda7..0f49d161428 100644 --- a/neutron/agent/l3/router_info.py +++ b/neutron/agent/l3/router_info.py @@ -19,7 +19,7 @@ from neutron_lib import constants as lib_constants from neutron_lib.utils import helpers from oslo_log import log as logging -from neutron._i18n import _, _LE, _LW +from neutron._i18n import _ from neutron.agent.l3 import namespaces from neutron.agent.linux import ip_lib from neutron.agent.linux import iptables_manager @@ -298,8 +298,8 @@ class RouterInfo(object): except RuntimeError: # any exception occurred here should cause the floating IP # to be set in error state - LOG.warning(_LW("Unable to configure IP address for " - "floating IP: %s"), fip['id']) + LOG.warning("Unable to configure IP address for " + "floating IP: %s", fip['id']) def add_floating_ip(self, fip, interface_name, device): raise NotImplementedError() @@ -882,7 +882,7 @@ class RouterInfo(object): except n_exc.FloatingIpSetupException: # All floating IPs must be put in error state - LOG.exception(_LE("Failed to process floating IPs.")) + LOG.exception("Failed to process floating IPs.") fip_statuses = self.put_fips_in_error_state() finally: self.update_fip_statuses(fip_statuses) @@ -908,7 +908,7 @@ class RouterInfo(object): except (n_exc.FloatingIpSetupException, n_exc.IpTablesApplyException): # All floating IPs must be put in error state - LOG.exception(_LE("Failed to process floating IPs.")) + LOG.exception("Failed to process floating IPs.") fip_statuses = self.put_fips_in_error_state() finally: self.update_fip_statuses(fip_statuses) @@ -1102,8 +1102,8 @@ class RouterInfo(object): self.agent.pd.sync_router(self.router['id']) self._process_external_on_delete() else: - LOG.warning(_LW("Can't gracefully delete the router %s: " - "no router namespace found."), self.router['id']) + LOG.warning("Can't gracefully delete the router %s: " + "no router namespace found.", self.router['id']) @common_utils.exception_logger() def process(self): diff --git a/neutron/agent/linux/async_process.py b/neutron/agent/linux/async_process.py index 21af640f095..1e138da3d9d 100644 --- a/neutron/agent/linux/async_process.py +++ b/neutron/agent/linux/async_process.py @@ -20,7 +20,7 @@ import eventlet.queue from neutron_lib.utils import helpers from oslo_log import log as logging -from neutron._i18n import _, _LE +from neutron._i18n import _ from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.common import utils as common_utils @@ -182,7 +182,7 @@ class AsyncProcess(object): # root and need to be killed via the same helper. utils.kill_process(pid, kill_signal, self.run_as_root) except Exception: - LOG.exception(_LE('An error occurred while killing [%s].'), + LOG.exception('An error occurred while killing [%s].', self.cmd) return False @@ -211,8 +211,8 @@ class AsyncProcess(object): if not output and output != "": break except Exception: - LOG.exception(_LE('An error occurred while communicating ' - 'with async process [%s].'), self.cmd) + LOG.exception('An error occurred while communicating ' + 'with async process [%s].', self.cmd) break # Ensure that watching a process with lots of output does # not block execution of other greenthreads. @@ -242,11 +242,11 @@ class AsyncProcess(object): def _read_stderr(self): data = self._read(self._process.stderr, self._stderr_lines) if self.log_output: - LOG.error(_LE('Error received from [%(cmd)s]: %(err)s'), + LOG.error('Error received from [%(cmd)s]: %(err)s', {'cmd': self.cmd, 'err': data}) if self.die_on_error: - LOG.error(_LE("Process [%(cmd)s] dies due to the error: %(err)s"), + LOG.error("Process [%(cmd)s] dies due to the error: %(err)s", {'cmd': self.cmd, 'err': data}) # the callback caller will use None to indicate the need to bail diff --git a/neutron/agent/linux/daemon.py b/neutron/agent/linux/daemon.py index 8f5e4a7ca9f..568c0e1790f 100644 --- a/neutron/agent/linux/daemon.py +++ b/neutron/agent/linux/daemon.py @@ -25,7 +25,7 @@ import sys from debtcollector import removals from oslo_log import log as logging -from neutron._i18n import _, _LE, _LI +from neutron._i18n import _ from neutron.common import exceptions LOG = logging.getLogger(__name__) @@ -113,7 +113,7 @@ def drop_privileges(user=None, group=None): if user is not None: setuid(user) - LOG.info(_LI("Process runs with uid/gid: %(uid)s/%(gid)s"), + LOG.info("Process runs with uid/gid: %(uid)s/%(gid)s", {'uid': os.getuid(), 'gid': os.getgid()}) @@ -126,7 +126,7 @@ class Pidfile(object): self.fd = os.open(pidfile, os.O_CREAT | os.O_RDWR) fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: - LOG.exception(_LE("Error while handling pidfile: %s"), pidfile) + LOG.exception("Error while handling pidfile: %s", pidfile) sys.exit(1) def __str__(self): @@ -191,7 +191,7 @@ class Daemon(object): if pid > 0: os._exit(0) except OSError: - LOG.exception(_LE('Fork failed')) + LOG.exception('Fork failed') sys.exit(1) def daemonize(self): @@ -244,8 +244,8 @@ class Daemon(object): if self.pidfile is not None and self.pidfile.is_running(): self.pidfile.unlock() - LOG.error(_LE('Pidfile %s already exist. Daemon already ' - 'running?'), self.pidfile) + LOG.error('Pidfile %s already exist. Daemon already ' + 'running?', self.pidfile) sys.exit(1) # Start the daemon diff --git a/neutron/agent/linux/dhcp.py b/neutron/agent/linux/dhcp.py index 38b34747b78..0229a8eb6ff 100644 --- a/neutron/agent/linux/dhcp.py +++ b/neutron/agent/linux/dhcp.py @@ -32,7 +32,7 @@ from oslo_utils import fileutils from oslo_utils import uuidutils import six -from neutron._i18n import _, _LI, _LW, _LE +from neutron._i18n import _ from neutron.agent.common import utils as agent_common_utils from neutron.agent.linux import external_process from neutron.agent.linux import ip_lib @@ -238,7 +238,7 @@ class DhcpLocalProcess(DhcpBase): try: self.device_manager.destroy(self.network, self.interface_name) except RuntimeError: - LOG.warning(_LW('Failed trying to delete interface: %s'), + LOG.warning('Failed trying to delete interface: %s', self.interface_name) ns_ip = ip_lib.IPWrapper(namespace=self.network.namespace) @@ -248,7 +248,7 @@ class DhcpLocalProcess(DhcpBase): try: ns_ip.netns.delete(self.network.namespace) except RuntimeError: - LOG.warning(_LW('Failed trying to delete namespace: %s'), + LOG.warning('Failed trying to delete namespace: %s', self.network.namespace) def _get_value_from_conf_file(self, kind, converter=None): @@ -421,8 +421,7 @@ class Dnsmasq(DhcpLocalProcess): if not os.path.exists(log_dir): os.makedirs(log_dir) except OSError: - LOG.error(_LE('Error while create dnsmasq log dir: %s'), - log_dir) + LOG.error('Error while create dnsmasq log dir: %s', log_dir) else: log_filename = os.path.join(log_dir, 'dhcp_dns_log') cmd.append('--log-queries') @@ -460,8 +459,8 @@ class Dnsmasq(DhcpLocalProcess): if self._IS_DHCP_RELEASE6_SUPPORTED is None: self._IS_DHCP_RELEASE6_SUPPORTED = checks.dhcp_release6_supported() if not self._IS_DHCP_RELEASE6_SUPPORTED: - LOG.warning(_LW("dhcp_release6 is not present on this system, " - "will not call it again.")) + LOG.warning("dhcp_release6 is not present on this system, " + "will not call it again.") return self._IS_DHCP_RELEASE6_SUPPORTED def _release_lease(self, mac_address, ip, client_id=None, @@ -483,8 +482,8 @@ class Dnsmasq(DhcpLocalProcess): except RuntimeError as e: # when failed to release single lease there's # no need to propagate error further - LOG.warning(_LW('DHCP release failed for %(cmd)s. ' - 'Reason: %(e)s'), {'cmd': cmd, 'e': e}) + LOG.warning('DHCP release failed for %(cmd)s. ' + 'Reason: %(e)s', {'cmd': cmd, 'e': e}) def _output_config_files(self): self._output_hosts_file() @@ -798,9 +797,9 @@ class Dnsmasq(DhcpLocalProcess): server_id = l.strip().split()[1] continue else: - LOG.warning(_LW('Multiple DUID entries in %s ' - 'lease file, dnsmasq is possibly ' - 'not functioning properly'), + LOG.warning('Multiple DUID entries in %s ' + 'lease file, dnsmasq is possibly ' + 'not functioning properly', filename) continue parts = l.strip().split() @@ -969,9 +968,9 @@ class Dnsmasq(DhcpLocalProcess): self._format_option(opt_ip_version, port.id, opt.opt_name, opt.opt_value)) else: - LOG.info(_LI("Cannot apply dhcp option %(opt)s " - "because it's ip_version %(version)d " - "is not in port's address IP versions"), + LOG.info("Cannot apply dhcp option %(opt)s " + "because it's ip_version %(version)d " + "is not in port's address IP versions", {'opt': opt.opt_name, 'version': opt_ip_version}) @@ -1269,8 +1268,8 @@ class DeviceManager(object): 'device_id': device_id}}) except oslo_messaging.RemoteError as e: if e.exc_type == 'DhcpPortInUse': - LOG.info(_LI("Skipping DHCP port %s as it is " - "already in use"), port.id) + LOG.info("Skipping DHCP port %s as it is " + "already in use", port.id) continue raise if port: @@ -1374,8 +1373,8 @@ class DeviceManager(object): try: self.unplug(d.name, network) except Exception: - LOG.exception(_LE("Exception during stale " - "dhcp device cleanup")) + LOG.exception("Exception during stale " + "dhcp device cleanup") def plug(self, network, port, interface_name): """Plug device settings for the network's DHCP on this host.""" @@ -1424,8 +1423,8 @@ class DeviceManager(object): self.plug(network, port, interface_name) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Unable to plug DHCP port for ' - 'network %s. Releasing port.'), + LOG.exception('Unable to plug DHCP port for ' + 'network %s. Releasing port.', network.id) self.plugin.release_dhcp_port(network.id, port.device_id) diff --git a/neutron/agent/linux/external_process.py b/neutron/agent/linux/external_process.py index 0855ddf823b..bd32fcafb0d 100644 --- a/neutron/agent/linux/external_process.py +++ b/neutron/agent/linux/external_process.py @@ -24,7 +24,7 @@ from oslo_utils import fileutils import psutil import six -from neutron._i18n import _, _LW, _LE +from neutron._i18n import _ from neutron.agent.linux import ip_lib from neutron.agent.linux import utils @@ -237,9 +237,9 @@ class ProcessMonitor(object): pm = self._monitored_processes.get(service_id) if pm and not pm.active: - LOG.error(_LE("%(service)s for %(resource_type)s " - "with uuid %(uuid)s not found. " - "The process should not have died"), + LOG.error("%(service)s for %(resource_type)s " + "with uuid %(uuid)s not found. " + "The process should not have died", {'service': service_id.service, 'resource_type': self._resource_type, 'uuid': service_id.uuid}) @@ -257,14 +257,14 @@ class ProcessMonitor(object): action_function(service_id) def _respawn_action(self, service_id): - LOG.warning(_LW("Respawning %(service)s for uuid %(uuid)s"), + LOG.warning("Respawning %(service)s for uuid %(uuid)s", {'service': service_id.service, 'uuid': service_id.uuid}) self._monitored_processes[service_id].enable() def _exit_action(self, service_id): - LOG.error(_LE("Exiting agent as programmed in check_child_processes_" - "actions")) + LOG.error("Exiting agent as programmed in check_child_processes_" + "actions") self._exit_handler(service_id.uuid, service_id.service) def _exit_handler(self, uuid, service): @@ -274,7 +274,7 @@ class ProcessMonitor(object): check_child_processes_actions, and one of our external processes die unexpectedly. """ - LOG.error(_LE("Exiting agent because of a malfunction with the " - "%(service)s process identified by uuid %(uuid)s"), + LOG.error("Exiting agent because of a malfunction with the " + "%(service)s process identified by uuid %(uuid)s", {'service': service, 'uuid': uuid}) raise SystemExit(1) diff --git a/neutron/agent/linux/interface.py b/neutron/agent/linux/interface.py index bf4d51eb73f..88d6e67f318 100644 --- a/neutron/agent/linux/interface.py +++ b/neutron/agent/linux/interface.py @@ -22,7 +22,7 @@ from oslo_config import cfg from oslo_log import log as logging import six -from neutron._i18n import _, _LE, _LI, _LW +from neutron._i18n import _ from neutron.agent.common import ovs_lib from neutron.agent.linux import ip_lib from neutron.agent.linux import utils @@ -265,12 +265,12 @@ class LinuxInterfaceDriver(object): self.plug_new(network_id, port_id, device_name, mac_address, bridge, namespace, prefix, mtu) else: - LOG.info(_LI("Device %s already exists"), device_name) + LOG.info("Device %s already exists", device_name) if mtu: self.set_mtu( device_name, mtu, namespace=namespace, prefix=prefix) else: - LOG.warning(_LW("No MTU configured for port %s"), port_id) + LOG.warning("No MTU configured for port %s", port_id) @abc.abstractmethod def unplug(self, device_name, bridge=None, namespace=None, prefix=None): @@ -296,7 +296,7 @@ class LinuxInterfaceDriver(object): def set_mtu(self, device_name, mtu, namespace=None, prefix=None): """Set MTU on the interface.""" if not self._mtu_update_warn_logged: - LOG.warning(_LW("Interface driver cannot update MTU for ports")) + LOG.warning("Interface driver cannot update MTU for ports") self._mtu_update_warn_logged = True @@ -367,7 +367,7 @@ class OVSInterfaceDriver(LinuxInterfaceDriver): ns_dev.link.set_address(mac_address) break except RuntimeError as e: - LOG.warning(_LW("Got error trying to set mac, retrying: %s"), + LOG.warning("Got error trying to set mac, retrying: %s", str(e)) time.sleep(1) else: @@ -386,7 +386,7 @@ class OVSInterfaceDriver(LinuxInterfaceDriver): if mtu: self.set_mtu(device_name, mtu, namespace=namespace, prefix=prefix) else: - LOG.warning(_LW("No MTU configured for port %s"), port_id) + LOG.warning("No MTU configured for port %s", port_id) ns_dev.link.set_up() if self.conf.ovs_use_veth: @@ -408,7 +408,7 @@ class OVSInterfaceDriver(LinuxInterfaceDriver): device.link.delete() LOG.debug("Unplugged interface '%s'", device_name) except RuntimeError: - LOG.error(_LE("Failed unplugging interface '%s'"), + LOG.error("Failed unplugging interface '%s'", device_name) def set_mtu(self, device_name, mtu, namespace=None, prefix=None): @@ -458,7 +458,7 @@ class IVSInterfaceDriver(LinuxInterfaceDriver): ns_dev.link.set_mtu(mtu) root_dev.link.set_mtu(mtu) else: - LOG.warning(_LW("No MTU configured for port %s"), port_id) + LOG.warning("No MTU configured for port %s", port_id) if namespace: namespace_obj = ip.ensure_namespace(namespace) @@ -477,7 +477,7 @@ class IVSInterfaceDriver(LinuxInterfaceDriver): device.link.delete() LOG.debug("Unplugged interface '%s'", device_name) except RuntimeError: - LOG.error(_LE("Failed unplugging interface '%s'"), + LOG.error("Failed unplugging interface '%s'", device_name) @@ -503,7 +503,7 @@ class BridgeInterfaceDriver(LinuxInterfaceDriver): if mtu: self.set_mtu(device_name, mtu, namespace=namespace, prefix=prefix) else: - LOG.warning(_LW("No MTU configured for port %s"), port_id) + LOG.warning("No MTU configured for port %s", port_id) root_veth.link.set_up() ns_veth.link.set_up() @@ -515,7 +515,7 @@ class BridgeInterfaceDriver(LinuxInterfaceDriver): device.link.delete() LOG.debug("Unplugged interface '%s'", device_name) except RuntimeError: - LOG.error(_LE("Failed unplugging interface '%s'"), + LOG.error("Failed unplugging interface '%s'", device_name) def set_mtu(self, device_name, mtu, namespace=None, prefix=None): diff --git a/neutron/agent/linux/ip_conntrack.py b/neutron/agent/linux/ip_conntrack.py index 33c08678184..56651d77a96 100644 --- a/neutron/agent/linux/ip_conntrack.py +++ b/neutron/agent/linux/ip_conntrack.py @@ -17,7 +17,6 @@ import netaddr from oslo_concurrency import lockutils from oslo_log import log as logging -from neutron._i18n import _LE from neutron.agent.linux import utils as linux_utils from neutron.common import constants as n_const from neutron.common import exceptions as n_exc @@ -107,8 +106,7 @@ class IpConntrackManager(object): check_exit_code=True, extra_ok_codes=[1]) except RuntimeError: - LOG.exception( - _LE("Failed execute conntrack command %s"), cmd) + LOG.exception("Failed execute conntrack command %s", cmd) def delete_conntrack_state_by_rule(self, device_info_list, rule): self._delete_conntrack_state(device_info_list, rule) diff --git a/neutron/agent/linux/ip_lib.py b/neutron/agent/linux/ip_lib.py index a7fb927de45..18bb6a1899d 100644 --- a/neutron/agent/linux/ip_lib.py +++ b/neutron/agent/linux/ip_lib.py @@ -27,7 +27,7 @@ from oslo_log import log as logging from oslo_utils import excutils import six -from neutron._i18n import _, _LE, _LW +from neutron._i18n import _ from neutron.agent.common import utils from neutron.common import exceptions as n_exc from neutron.common import utils as common_utils @@ -326,8 +326,8 @@ class IPDevice(SubProcessBase): extra_ok_codes=[1]) except RuntimeError: - LOG.exception(_LE("Failed deleting ingress connection state of" - " floatingip %s"), ip_str) + LOG.exception("Failed deleting ingress connection state of" + " floatingip %s", ip_str) # Delete conntrack state for egress traffic try: @@ -335,8 +335,8 @@ class IPDevice(SubProcessBase): check_exit_code=True, extra_ok_codes=[1]) except RuntimeError: - LOG.exception(_LE("Failed deleting egress connection state of" - " floatingip %s"), ip_str) + LOG.exception("Failed deleting egress connection state of" + " floatingip %s", ip_str) def disable_ipv6(self): sysctl_name = re.sub(r'\.', '/', self.name) @@ -1100,8 +1100,8 @@ def _arping(ns_name, iface_name, address, count, log_exception): 'ns': ns_name, 'err': exc}) if not exists: - LOG.warning(_LW("Interface %s might have been deleted " - "concurrently"), iface_name) + LOG.warning("Interface %s might have been deleted " + "concurrently", iface_name) return @@ -1160,7 +1160,7 @@ def sysctl(cmd, namespace=None, log_fail_as_error=True): log_fail_as_error=log_fail_as_error) except RuntimeError as rte: LOG.warning( - _LW("Setting %(cmd)s in namespace %(ns)s failed: %(err)s."), + "Setting %(cmd)s in namespace %(ns)s failed: %(err)s.", {'cmd': cmd, 'ns': namespace, 'err': rte}) @@ -1207,9 +1207,9 @@ def set_ip_nonlocal_bind_for_namespace(namespace): log_fail_as_error=False) if failed: LOG.warning( - _LW("%s will not be set to 0 in the root namespace in order to " - "not break DVR, which requires this value be set to 1. This " - "may introduce a race between moving a floating IP to a " - "different network node, and the peer side getting a " - "populated ARP cache for a given floating IP address."), + "%s will not be set to 0 in the root namespace in order to " + "not break DVR, which requires this value be set to 1. This " + "may introduce a race between moving a floating IP to a " + "different network node, and the peer side getting a " + "populated ARP cache for a given floating IP address.", IP_NONLOCAL_BIND) diff --git a/neutron/agent/linux/ip_link_support.py b/neutron/agent/linux/ip_link_support.py index 225a39beaf6..46d40ed4feb 100644 --- a/neutron/agent/linux/ip_link_support.py +++ b/neutron/agent/linux/ip_link_support.py @@ -18,7 +18,7 @@ import re from neutron_lib import exceptions as n_exc from oslo_log import log as logging -from neutron._i18n import _, _LE +from neutron._i18n import _ from neutron.agent.linux import utils @@ -103,6 +103,6 @@ class IpLinkSupport(object): return_stderr=True, log_fail_as_error=False) except Exception as e: - LOG.exception(_LE("Failed executing ip command")) + LOG.exception("Failed executing ip command") raise UnsupportedIpLinkCommand(reason=e) return _stdout or _stderr diff --git a/neutron/agent/linux/ip_monitor.py b/neutron/agent/linux/ip_monitor.py index 4e36c813b8d..33db43c9a61 100644 --- a/neutron/agent/linux/ip_monitor.py +++ b/neutron/agent/linux/ip_monitor.py @@ -16,7 +16,6 @@ from oslo_log import log as logging from oslo_utils import excutils -from neutron._i18n import _LE from neutron.agent.linux import async_process from neutron.agent.linux import ip_lib @@ -41,7 +40,7 @@ class IPMonitorEvent(object): first_word = route[0] except IndexError: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Unable to parse route "%s"'), line) + LOG.error('Unable to parse route "%s"', line) added = (first_word != 'Deleted') if not added: @@ -52,7 +51,7 @@ class IPMonitorEvent(object): cidr = route[3] except IndexError: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Unable to parse route "%s"'), line) + LOG.error('Unable to parse route "%s"', line) return cls(line, added, interface, cidr) diff --git a/neutron/agent/linux/iptables_firewall.py b/neutron/agent/linux/iptables_firewall.py index 807c023e943..bf2a8b8218c 100644 --- a/neutron/agent/linux/iptables_firewall.py +++ b/neutron/agent/linux/iptables_firewall.py @@ -21,7 +21,6 @@ from oslo_config import cfg from oslo_log import log as logging from oslo_utils import netutils -from neutron._i18n import _LI from neutron.agent import firewall from neutron.agent.linux import ip_conntrack from neutron.agent.linux import ipset_manager @@ -160,8 +159,8 @@ class IptablesFirewallDriver(firewall.FirewallDriver): def update_port_filter(self, port): LOG.debug("Updating device (%s) filter", port['device']) if port['device'] not in self.ports: - LOG.info(_LI('Attempted to update port filter which is not ' - 'filtered %s'), port['device']) + LOG.info('Attempted to update port filter which is not ' + 'filtered %s', port['device']) return self._remove_chains() self._set_ports(port) @@ -171,8 +170,8 @@ class IptablesFirewallDriver(firewall.FirewallDriver): def remove_port_filter(self, port): LOG.debug("Removing device (%s) filter", port['device']) if port['device'] not in self.ports: - LOG.info(_LI('Attempted to remove port filter which is not ' - 'filtered %r'), port) + LOG.info('Attempted to remove port filter which is not ' + 'filtered %r', port) return self._remove_chains() self._remove_conntrack_entries_from_port_deleted(port) diff --git a/neutron/agent/linux/iptables_manager.py b/neutron/agent/linux/iptables_manager.py index 1162e7898df..e07ca49bf34 100644 --- a/neutron/agent/linux/iptables_manager.py +++ b/neutron/agent/linux/iptables_manager.py @@ -30,7 +30,7 @@ from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils -from neutron._i18n import _, _LE, _LW +from neutron._i18n import _ from neutron.agent.linux import ip_lib from neutron.agent.linux import iptables_comments as ic from neutron.agent.linux import utils as linux_utils @@ -247,8 +247,8 @@ class IptablesTable(object): top, self.wrap_name, comment=comment))) except ValueError: - LOG.warning(_LW('Tried to remove rule that was not there:' - ' %(chain)r %(rule)r %(wrap)r %(top)r'), + LOG.warning('Tried to remove rule that was not there:' + ' %(chain)r %(rule)r %(wrap)r %(top)r', {'chain': chain, 'rule': rule, 'top': top, 'wrap': wrap}) @@ -533,8 +533,8 @@ class IptablesManager(object): commands[log_start:log_end], log_start + 1) ) - LOG.error(_LE("IPTablesManager.apply failed to apply the " - "following set of iptables rules:\n%s"), + LOG.error("IPTablesManager.apply failed to apply the " + "following set of iptables rules:\n%s", '\n'.join(log_lines)) LOG.debug("IPTablesManager.apply completed with success. %d iptables " "commands were issued", len(all_commands)) @@ -636,9 +636,9 @@ class IptablesManager(object): def _weed_out_duplicates(line): if line in seen_lines: thing = 'chain' if line.startswith(':') else 'rule' - LOG.warning(_LW("Duplicate iptables %(thing)s detected. This " - "may indicate a bug in the iptables " - "%(thing)s generation code. Line: %(line)s"), + LOG.warning("Duplicate iptables %(thing)s detected. This " + "may indicate a bug in the iptables " + "%(thing)s generation code. Line: %(line)s", {'thing': thing, 'line': line}) return False seen_lines.add(line) @@ -675,8 +675,8 @@ class IptablesManager(object): """Return the sum of the traffic counters of all rules of a chain.""" cmd_tables = self._get_traffic_counters_cmd_tables(chain, wrap) if not cmd_tables: - LOG.warning(_LW('Attempted to get traffic counters of chain %s ' - 'which does not exist'), chain) + LOG.warning('Attempted to get traffic counters of chain %s ' + 'which does not exist', chain) return name = get_chain_name(chain, wrap) diff --git a/neutron/agent/linux/keepalived.py b/neutron/agent/linux/keepalived.py index c5332911dc5..a0230eb76bf 100644 --- a/neutron/agent/linux/keepalived.py +++ b/neutron/agent/linux/keepalived.py @@ -23,7 +23,7 @@ from oslo_config import cfg from oslo_log import log as logging from oslo_utils import fileutils -from neutron._i18n import _, _LE +from neutron._i18n import _ from neutron.agent.linux import external_process from neutron.common import constants from neutron.common import utils @@ -406,8 +406,8 @@ class KeepalivedManager(object): os.remove(pid_file) except OSError as e: if e.errno != errno.ENOENT: - LOG.error(_LE("Could not delete file %s, keepalived can " - "refuse to start."), pid_file) + LOG.error("Could not delete file %s, keepalived can " + "refuse to start.", pid_file) def get_vrrp_pid_file_name(self, base_pid_file): return '%s-vrrp' % base_pid_file diff --git a/neutron/agent/linux/openvswitch_firewall/firewall.py b/neutron/agent/linux/openvswitch_firewall/firewall.py index 413b7ae70a1..3da5ef5dd40 100644 --- a/neutron/agent/linux/openvswitch_firewall/firewall.py +++ b/neutron/agent/linux/openvswitch_firewall/firewall.py @@ -20,7 +20,6 @@ from neutron_lib import constants as lib_const from oslo_log import log as logging from oslo_utils import netutils -from neutron._i18n import _LE from neutron.agent import firewall from neutron.agent.linux.openvswitch_firewall import constants as ovsfw_consts from neutron.agent.linux.openvswitch_firewall import exceptions @@ -463,8 +462,8 @@ class OVSFirewallDriver(firewall.FirewallDriver): # allow_address_pair MACs will be updated in # self.get_or_create_ofport(port) if old_of_port: - LOG.error(_LE("Initializing port %s that was already " - "initialized."), + LOG.error("Initializing port %s that was already " + "initialized.", port['device']) self.delete_all_port_flows(old_of_port) of_port = self.get_or_create_ofport(port) diff --git a/neutron/agent/linux/ovsdb_monitor.py b/neutron/agent/linux/ovsdb_monitor.py index 9ea878cdc60..ed4dff56a11 100644 --- a/neutron/agent/linux/ovsdb_monitor.py +++ b/neutron/agent/linux/ovsdb_monitor.py @@ -15,7 +15,6 @@ from oslo_log import log as logging from oslo_serialization import jsonutils -from neutron._i18n import _LE from neutron.agent.linux import async_process from neutron.agent.ovsdb import api as ovsdb from neutron.agent.ovsdb.native import helpers @@ -81,7 +80,7 @@ class SimpleInterfaceMonitor(OvsdbMonitor): temporary if respawn_interval is set. """ if not self.is_active(): - LOG.error(_LE("Interface monitor is not active")) + LOG.error("Interface monitor is not active") else: self.process_events() return bool(self.new_events['added'] or self.new_events['removed']) diff --git a/neutron/agent/linux/pd.py b/neutron/agent/linux/pd.py index 2899bcb6861..02ef2e85171 100644 --- a/neutron/agent/linux/pd.py +++ b/neutron/agent/linux/pd.py @@ -27,7 +27,7 @@ from oslo_utils import netutils import six from stevedore import driver -from neutron._i18n import _, _LE +from neutron._i18n import _ from neutron.common import constants as l3_constants from neutron.common import utils @@ -392,8 +392,8 @@ def update_router(resource, event, l3_agent, **kwargs): updated_router = kwargs['router'] router = l3_agent.pd.routers.get(updated_router.router_id) if not router: - LOG.exception(_LE("Router to be updated is not in internal routers " - "list: %s"), updated_router.router_id) + LOG.exception("Router to be updated is not in internal routers " + "list: %s", updated_router.router_id) else: router['ns_name'] = updated_router.get_gw_ns_name() diff --git a/neutron/agent/linux/utils.py b/neutron/agent/linux/utils.py index deaa60eedf6..7dc84894bb6 100644 --- a/neutron/agent/linux/utils.py +++ b/neutron/agent/linux/utils.py @@ -33,7 +33,7 @@ from oslo_utils import excutils from oslo_utils import fileutils from six.moves import http_client as httplib -from neutron._i18n import _, _LE +from neutron._i18n import _ from neutron.agent.linux import xenapi_root_helper from neutron.common import utils from neutron.conf.agent import common as config @@ -111,7 +111,7 @@ def execute_rootwrap_daemon(cmd, process_input, addl_env): return client.execute(cmd, process_input) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Rootwrap error running command: %s"), cmd) + LOG.error("Rootwrap error running command: %s", cmd) def execute(cmd, process_input=None, addl_env=None, @@ -249,7 +249,7 @@ def get_value_from_file(filename, converter=None): try: return converter(f.read()) if converter else f.read() except ValueError: - LOG.error(_LE('Unable to convert value in %s'), filename) + LOG.error('Unable to convert value in %s', filename) except IOError: LOG.debug('Unable to access %s', filename) diff --git a/neutron/agent/linux/xenapi_root_helper.py b/neutron/agent/linux/xenapi_root_helper.py index f78320876a6..4dacc1e4d80 100644 --- a/neutron/agent/linux/xenapi_root_helper.py +++ b/neutron/agent/linux/xenapi_root_helper.py @@ -29,7 +29,6 @@ from oslo_log import log as logging from oslo_rootwrap import cmd as oslo_rootwrap_cmd from oslo_serialization import jsonutils -from neutron._i18n import _LE from neutron.conf.agent import xenapi_conf @@ -88,6 +87,6 @@ class XenAPIClient(object): err = result['err'] return returncode, out, err except XenAPI.Failure as failure: - LOG.exception(_LE('Failed to execute command: %s'), cmd) + LOG.exception('Failed to execute command: %s', cmd) returncode = self._get_return_code(failure.details) return returncode, out, err diff --git a/neutron/agent/metadata/agent.py b/neutron/agent/metadata/agent.py index f849945ce03..d3cdb9302d9 100644 --- a/neutron/agent/metadata/agent.py +++ b/neutron/agent/metadata/agent.py @@ -27,7 +27,7 @@ import six import six.moves.urllib.parse as urlparse import webob -from neutron._i18n import _, _LE, _LW +from neutron._i18n import _ from neutron.agent.linux import utils as agent_utils from neutron.agent import rpc as agent_rpc from neutron.common import cache_utils as cache @@ -92,7 +92,7 @@ class MetadataProxyHandler(object): return webob.exc.HTTPNotFound() except Exception: - LOG.exception(_LE("Unexpected error.")) + LOG.exception("Unexpected error.") msg = _('An unknown error has occurred. ' 'Please try your request again.') explanation = six.text_type(msg) @@ -198,10 +198,10 @@ class MetadataProxyHandler(object): LOG.debug(str(resp)) return req.response elif resp.status == 403: - LOG.warning(_LW( + LOG.warning( 'The remote metadata server responded with Forbidden. This ' 'response usually occurs when shared secrets do not match.' - )) + ) return webob.exc.HTTPForbidden() elif resp.status == 400: return webob.exc.HTTPBadRequest() @@ -262,12 +262,12 @@ class UnixDomainMetadataProxy(object): use_call=self.agent_state.get('start_flag')) except AttributeError: # This means the server does not support report_state - LOG.warning(_LW('Neutron server does not support state report.' - ' State report for this agent will be disabled.')) + LOG.warning('Neutron server does not support state report.' + ' State report for this agent will be disabled.') self.heartbeat.stop() return except Exception: - LOG.exception(_LE("Failed reporting state!")) + LOG.exception("Failed reporting state!") return self.agent_state.pop('start_flag', None) diff --git a/neutron/agent/ovsdb/impl_vsctl.py b/neutron/agent/ovsdb/impl_vsctl.py index 24dab28549d..4440554c779 100644 --- a/neutron/agent/ovsdb/impl_vsctl.py +++ b/neutron/agent/ovsdb/impl_vsctl.py @@ -22,7 +22,6 @@ from oslo_utils import excutils from oslo_utils import uuidutils import six -from neutron._i18n import _LE from neutron.agent.common import utils from neutron.agent.ovsdb import api as ovsdb @@ -70,8 +69,8 @@ class Transaction(ovsdb.Transaction): except Exception as e: with excutils.save_and_reraise_exception() as ctxt: if self.log_errors: - LOG.error(_LE("Unable to execute %(cmd)s. " - "Exception: %(exception)s"), + LOG.error("Unable to execute %(cmd)s. " + "Exception: %(exception)s", {'cmd': full_args, 'exception': e}) if not self.check_error: ctxt.reraise = False @@ -130,8 +129,8 @@ class DbCommand(BaseCommand): # This shouldn't happen, but if it does and we check_errors # log and raise. with excutils.save_and_reraise_exception(): - LOG.error(_LE("Could not parse: %(raw_result)s. " - "Exception: %(exception)s"), + LOG.error("Could not parse: %(raw_result)s. " + "Exception: %(exception)s", {'raw_result': raw_result, 'exception': e}) headings = json['headings'] diff --git a/neutron/agent/securitygroups_rpc.py b/neutron/agent/securitygroups_rpc.py index 21df1ddea5b..48da746de43 100644 --- a/neutron/agent/securitygroups_rpc.py +++ b/neutron/agent/securitygroups_rpc.py @@ -21,7 +21,6 @@ from oslo_config import cfg from oslo_log import log as logging import oslo_messaging -from neutron._i18n import _LI, _LW from neutron.agent import firewall from neutron.api.rpc.handlers import securitygroups_rpc from neutron.conf.agent import securitygroups_rpc as sc_cfg @@ -44,9 +43,9 @@ def _disable_extension(extension, aliases): def disable_security_group_extension_by_config(aliases): if not is_firewall_enabled(): - LOG.info(_LI('Disabled security-group extension.')) + LOG.info('Disabled security-group extension.') _disable_extension('security-group', aliases) - LOG.info(_LI('Disabled allowed-address-pairs extension.')) + LOG.info('Disabled allowed-address-pairs extension.') _disable_extension('allowed-address-pairs', aliases) @@ -91,10 +90,10 @@ class SecurityGroupAgentRpc(object): self.plugin_rpc.security_group_info_for_devices( self.context, devices=[]) except oslo_messaging.UnsupportedVersion: - LOG.warning(_LW('security_group_info_for_devices rpc call not ' - 'supported by the server, falling back to old ' - 'security_group_rules_for_devices which scales ' - 'worse.')) + LOG.warning('security_group_info_for_devices rpc call not ' + 'supported by the server, falling back to old ' + 'security_group_rules_for_devices which scales ' + 'worse.') return False return True @@ -103,8 +102,8 @@ class SecurityGroupAgentRpc(object): def decorated_function(self, *args, **kwargs): if (isinstance(self.firewall, firewall.NoopFirewallDriver) or not is_firewall_enabled()): - LOG.info(_LI("Skipping method %s as firewall is disabled " - "or configured as NoopFirewallDriver."), + LOG.info("Skipping method %s as firewall is disabled " + "or configured as NoopFirewallDriver.", func.__name__) else: return func(self, # pylint: disable=not-callable @@ -115,7 +114,7 @@ class SecurityGroupAgentRpc(object): def prepare_devices_filter(self, device_ids): if not device_ids: return - LOG.info(_LI("Preparing filters for devices %s"), device_ids) + LOG.info("Preparing filters for devices %s", device_ids) self._apply_port_filter(device_ids) def _apply_port_filter(self, device_ids, update_filter=False): @@ -155,16 +154,16 @@ class SecurityGroupAgentRpc(object): remote_sg_id, member_ips) def security_groups_rule_updated(self, security_groups): - LOG.info(_LI("Security group " - "rule updated %r"), security_groups) + LOG.info("Security group " + "rule updated %r", security_groups) self._security_group_updated( security_groups, 'security_groups', 'sg_rule') def security_groups_member_updated(self, security_groups): - LOG.info(_LI("Security group " - "member updated %r"), security_groups) + LOG.info("Security group " + "member updated %r", security_groups) self._security_group_updated( security_groups, 'security_group_source_groups', @@ -188,7 +187,7 @@ class SecurityGroupAgentRpc(object): self.refresh_firewall(devices) def security_groups_provider_updated(self, port_ids_to_update): - LOG.info(_LI("Provider rule updated")) + LOG.info("Provider rule updated") if port_ids_to_update is None: # Update all devices if self.defer_refresh_firewall: @@ -211,7 +210,7 @@ class SecurityGroupAgentRpc(object): def remove_devices_filter(self, device_ids): if not device_ids: return - LOG.info(_LI("Remove device filter for %r"), device_ids) + LOG.info("Remove device filter for %r", device_ids) with self.firewall.defer_apply(): for device_id in device_ids: device = self.firewall.ports.get(device_id) @@ -222,11 +221,11 @@ class SecurityGroupAgentRpc(object): @skip_if_noopfirewall_or_firewall_disabled def refresh_firewall(self, device_ids=None): - LOG.info(_LI("Refresh firewall rules")) + LOG.info("Refresh firewall rules") if not device_ids: device_ids = self.firewall.ports.keys() if not device_ids: - LOG.info(_LI("No ports here to refresh firewall")) + LOG.info("No ports here to refresh firewall") return self._apply_port_filter(device_ids, update_filter=True) diff --git a/neutron/agent/windows/ip_lib.py b/neutron/agent/windows/ip_lib.py index cd2439134b2..bae94e4fe98 100644 --- a/neutron/agent/windows/ip_lib.py +++ b/neutron/agent/windows/ip_lib.py @@ -17,8 +17,6 @@ import netifaces from oslo_log import log as logging -from neutron._i18n import _LE - LOG = logging.getLogger(__name__) OPTS = [] @@ -38,7 +36,7 @@ class IPWrapper(object): try: return [IPDevice(iface) for iface in netifaces.interfaces()] except (OSError, MemoryError): - LOG.error(_LE("Failed to get network interfaces.")) + LOG.error("Failed to get network interfaces.") return [] @@ -52,11 +50,11 @@ class IPDevice(object): try: device_addresses = netifaces.ifaddresses(self.name) except ValueError: - LOG.error(_LE("The device does not exist on the system: %s."), + LOG.error("The device does not exist on the system: %s.", self.name) return except OSError: - LOG.error(_LE("Failed to get interface addresses: %s."), + LOG.error("Failed to get interface addresses: %s.", self.name) return return device_addresses diff --git a/neutron/api/api_common.py b/neutron/api/api_common.py index b8155c05b05..aac92bf368c 100644 --- a/neutron/api/api_common.py +++ b/neutron/api/api_common.py @@ -24,7 +24,7 @@ from oslo_serialization import jsonutils from six.moves.urllib import parse from webob import exc -from neutron._i18n import _, _LW +from neutron._i18n import _ from neutron.api import extensions from neutron.common import constants from neutron import wsgi @@ -153,8 +153,8 @@ def _get_pagination_max_limit(): if max_limit == 0: raise ValueError() except ValueError: - LOG.warning(_LW("Invalid value for pagination_max_limit: %s. It " - "should be an integer greater to 0"), + LOG.warning("Invalid value for pagination_max_limit: %s. It " + "should be an integer greater to 0", cfg.CONF.pagination_max_limit) return max_limit diff --git a/neutron/api/extensions.py b/neutron/api/extensions.py index 927e434272b..bcddb6f69e1 100644 --- a/neutron/api/extensions.py +++ b/neutron/api/extensions.py @@ -26,7 +26,7 @@ import routes import webob.dec import webob.exc -from neutron._i18n import _, _LE, _LI, _LW +from neutron._i18n import _ from neutron.common import exceptions from neutron import extensions as core_extensions from neutron.plugins.common import constants as const @@ -283,7 +283,7 @@ class ExtensionManager(object): """ def __init__(self, path): - LOG.info(_LI('Initializing extension manager.')) + LOG.info('Initializing extension manager.') self.path = path self.extensions = {} self._load_all_extensions() @@ -359,10 +359,10 @@ class ExtensionManager(object): break if exts_to_process: unloadable_extensions = set(exts_to_process.keys()) - LOG.error(_LE("Unable to process extensions (%s) because " - "the configured plugins do not satisfy " - "their requirements. Some features will not " - "work as expected."), + LOG.error("Unable to process extensions (%s) because " + "the configured plugins do not satisfy " + "their requirements. Some features will not " + "work as expected.", ', '.join(unloadable_extensions)) self._check_faulty_extensions(unloadable_extensions) # Extending extensions' attributes map. @@ -398,7 +398,7 @@ class ExtensionManager(object): 'desc': extension.get_description(), 'updated': extension.get_updated()}) except AttributeError: - LOG.exception(_LE("Exception loading extension")) + LOG.exception("Exception loading extension") return False return isinstance(extension, api_extensions.ExtensionDescriptor) @@ -417,7 +417,7 @@ class ExtensionManager(object): if os.path.exists(path): self._load_all_extensions_from_path(path) else: - LOG.error(_LE("Extension path '%s' doesn't exist!"), path) + LOG.error("Extension path '%s' doesn't exist!", path) def _load_all_extensions_from_path(self, path): # Sorting the extension list makes the order in which they @@ -433,16 +433,16 @@ class ExtensionManager(object): ext_name = mod_name.capitalize() new_ext_class = getattr(mod, ext_name, None) if not new_ext_class: - LOG.warning(_LW('Did not find expected name ' - '"%(ext_name)s" in %(file)s'), + LOG.warning('Did not find expected name ' + '"%(ext_name)s" in %(file)s', {'ext_name': ext_name, 'file': ext_path}) continue new_ext = new_ext_class() self.add_extension(new_ext) except Exception as exception: - LOG.warning(_LW("Extension file %(f)s wasn't loaded due to " - "%(exception)s"), + LOG.warning("Extension file %(f)s wasn't loaded due to " + "%(exception)s", {'f': f, 'exception': exception}) def add_extension(self, ext): @@ -451,7 +451,7 @@ class ExtensionManager(object): return alias = ext.get_alias() - LOG.info(_LI('Loaded extension: %s'), alias) + LOG.info('Loaded extension: %s', alias) if alias in self.extensions: raise exceptions.DuplicatedExtension(alias=alias) @@ -485,9 +485,8 @@ class PluginAwareExtensionManager(ExtensionManager): alias = extension.get_alias() supports_extension = alias in self.get_supported_extension_aliases() if not supports_extension: - LOG.info(_LI("Extension %s not supported by any of loaded " - "plugins"), - alias) + LOG.info("Extension %s not supported by any of loaded " + "plugins", alias) return supports_extension def _plugins_implement_interface(self, extension): @@ -496,8 +495,8 @@ class PluginAwareExtensionManager(ExtensionManager): for plugin in self.plugins.values(): if isinstance(plugin, extension.get_plugin_interface()): return True - LOG.warning(_LW("Loaded plugins do not implement extension " - "%s interface"), + LOG.warning("Loaded plugins do not implement extension " + "%s interface", extension.get_alias()) return False diff --git a/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py b/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py index 24dcf2e8a92..64a2a67be16 100644 --- a/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py +++ b/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py @@ -22,7 +22,6 @@ from oslo_config import cfg from oslo_log import log as logging import oslo_messaging -from neutron._i18n import _LE, _LW from neutron.common import constants as n_const from neutron.common import rpc as n_rpc from neutron.common import topics @@ -105,9 +104,9 @@ class DhcpAgentNotifyAPI(object): context, 'network_create_end', {'network': {'id': network['id']}}, agent['host']) elif not existing_agents: - LOG.warning(_LW('Unable to schedule network %s: no agents ' - 'available; will retry on subsequent port ' - 'and subnet creation events.'), + LOG.warning('Unable to schedule network %s: no agents ' + 'available; will retry on subsequent port ' + 'and subnet creation events.', network['id']) return new_agents + existing_agents @@ -123,10 +122,10 @@ class DhcpAgentNotifyAPI(object): len_enabled_agents = len(enabled_agents) len_active_agents = len(active_agents) if len_active_agents < len_enabled_agents: - LOG.warning(_LW("Only %(active)d of %(total)d DHCP agents " - "associated with network '%(net_id)s' " - "are marked as active, so notifications " - "may be sent to inactive agents."), + LOG.warning("Only %(active)d of %(total)d DHCP agents " + "associated with network '%(net_id)s' " + "are marked as active, so notifications " + "may be sent to inactive agents.", {'active': len_active_agents, 'total': len_enabled_agents, 'net_id': network_id}) @@ -136,9 +135,9 @@ class DhcpAgentNotifyAPI(object): notification_required = ( num_ports > 0 and len(network['subnets']) >= 1) if notification_required: - LOG.error(_LE("Will not send event %(method)s for network " - "%(net_id)s: no agent available. Payload: " - "%(payload)s"), + LOG.error("Will not send event %(method)s for network " + "%(net_id)s: no agent available. Payload: " + "%(payload)s", {'method': method, 'net_id': network_id, 'payload': payload}) diff --git a/neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py b/neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py index fff43c3404d..cde4c77c008 100644 --- a/neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py +++ b/neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py @@ -21,7 +21,6 @@ from neutron_lib.plugins import directory from oslo_log import log as logging import oslo_messaging -from neutron._i18n import _LE from neutron.api.rpc.agentnotifiers import utils as ag_utils from neutron.common import rpc as n_rpc from neutron.common import topics @@ -88,8 +87,8 @@ class L3AgentNotifyAPI(object): """Notify all the agents that are hosting the routers.""" plugin = directory.get_plugin(plugin_constants.L3) if not plugin: - LOG.error(_LE('No plugin for L3 routing registered. Cannot notify ' - 'agents with the message %s'), method) + LOG.error('No plugin for L3 routing registered. Cannot notify ' + 'agents with the message %s', method) return if utils.is_extension_supported( plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): diff --git a/neutron/api/rpc/agentnotifiers/utils.py b/neutron/api/rpc/agentnotifiers/utils.py index 65ae99a9d63..dd004fc2576 100644 --- a/neutron/api/rpc/agentnotifiers/utils.py +++ b/neutron/api/rpc/agentnotifiers/utils.py @@ -17,8 +17,6 @@ from oslo_log import log as logging import oslo_messaging from oslo_utils import excutils -from neutron._i18n import _LW - LOG = logging.getLogger(__name__) @@ -42,8 +40,8 @@ def _call_with_retry(max_attempts): with excutils.save_and_reraise_exception( reraise=False) as ctxt: LOG.warning( - _LW('Failed to execute %(action)s. %(attempt)d out' - ' of %(max_attempts)d'), + 'Failed to execute %(action)s. %(attempt)d out' + ' of %(max_attempts)d', {'attempt': attempt, 'max_attempts': max_attempts, 'action': action}) diff --git a/neutron/api/rpc/handlers/dhcp_rpc.py b/neutron/api/rpc/handlers/dhcp_rpc.py index 1001566f816..56817611114 100644 --- a/neutron/api/rpc/handlers/dhcp_rpc.py +++ b/neutron/api/rpc/handlers/dhcp_rpc.py @@ -28,7 +28,7 @@ from oslo_log import log as logging import oslo_messaging from oslo_utils import excutils -from neutron._i18n import _, _LW +from neutron._i18n import _ from neutron.common import constants as n_const from neutron.common import exceptions as n_exc from neutron.common import utils @@ -120,9 +120,9 @@ class DhcpRpcCallback(object): ctxt.reraise = True if ctxt.reraise: net_id = port['port']['network_id'] - LOG.warning(_LW("Action %(action)s for network %(net_id)s " - "could not complete successfully: " - "%(reason)s"), + LOG.warning("Action %(action)s for network %(net_id)s " + "could not complete successfully: " + "%(reason)s", {"action": action, "net_id": net_id, 'reason': e}) diff --git a/neutron/api/rpc/handlers/securitygroups_rpc.py b/neutron/api/rpc/handlers/securitygroups_rpc.py index 379d7782436..44d0ecf0474 100644 --- a/neutron/api/rpc/handlers/securitygroups_rpc.py +++ b/neutron/api/rpc/handlers/securitygroups_rpc.py @@ -19,7 +19,6 @@ from neutron_lib.utils import net from oslo_log import log as logging import oslo_messaging -from neutron._i18n import _LW from neutron.api.rpc.handlers import resources_rpc from neutron.callbacks import events from neutron.callbacks import registry @@ -192,9 +191,9 @@ class SecurityGroupAgentRpcCallbackMixin(object): sg_agent = None def _security_groups_agent_not_set(self): - LOG.warning(_LW("Security group agent binding currently not set. " - "This should be set by the end of the init " - "process.")) + LOG.warning("Security group agent binding currently not set. " + "This should be set by the end of the init " + "process.") def security_groups_rule_updated(self, context, **kwargs): """Callback for security group rule update. diff --git a/neutron/api/v2/base.py b/neutron/api/v2/base.py index 9c21a70b794..857dc1903f2 100644 --- a/neutron/api/v2/base.py +++ b/neutron/api/v2/base.py @@ -26,7 +26,7 @@ from oslo_policy import policy as oslo_policy from oslo_utils import excutils import webob.exc -from neutron._i18n import _, _LE, _LI +from neutron._i18n import _ from neutron.api import api_common from neutron.api.v2 import resource as wsgi_resource from neutron.common import constants as n_const @@ -125,8 +125,8 @@ class Controller(object): _("Native pagination depend on native sorting") ) if not self._allow_sorting: - LOG.info(_LI("Allow sorting is enabled because native " - "pagination requires native sorting")) + LOG.info("Allow sorting is enabled because native " + "pagination requires native sorting") self._allow_sorting = True self.parent = parent if parent: @@ -419,8 +419,8 @@ class Controller(object): except Exception: # broad catch as our only purpose is to log the # exception - LOG.exception(_LE("Unable to undo add for " - "%(resource)s %(id)s"), + LOG.exception("Unable to undo add for " + "%(resource)s %(id)s", {'resource': self._resource, 'id': obj['id']}) # TODO(salvatore-orlando): The object being processed when the diff --git a/neutron/api/v2/resource.py b/neutron/api/v2/resource.py index 5721ab5b720..a272a13734c 100644 --- a/neutron/api/v2/resource.py +++ b/neutron/api/v2/resource.py @@ -21,7 +21,6 @@ from oslo_log import log as logging import webob.dec import webob.exc -from neutron._i18n import _LE, _LI from neutron.api import api_common from neutron.common import utils from neutron import wsgi @@ -101,16 +100,15 @@ def Resource(controller, faults=None, deserializers=None, serializers=None, mapped_exc = api_common.convert_exception_to_http_exc(e, faults, language) if hasattr(mapped_exc, 'code') and 400 <= mapped_exc.code < 500: - LOG.info(_LI('%(action)s failed (client error): %(exc)s'), + LOG.info('%(action)s failed (client error): %(exc)s', {'action': action, 'exc': mapped_exc}) else: - LOG.exception( - _LE('%(action)s failed: %(details)s'), - { - 'action': action, - 'details': utils.extract_exc_details(e), - } - ) + LOG.exception('%(action)s failed: %(details)s', + { + 'action': action, + 'details': utils.extract_exc_details(e), + } + ) raise mapped_exc status = action_status.get(action, 200) diff --git a/neutron/cmd/ipset_cleanup.py b/neutron/cmd/ipset_cleanup.py index 053b1791417..914e5e12d7b 100644 --- a/neutron/cmd/ipset_cleanup.py +++ b/neutron/cmd/ipset_cleanup.py @@ -16,7 +16,6 @@ from oslo_config import cfg from oslo_log import log as logging -from neutron._i18n import _LE, _LI from neutron.agent.linux import utils from neutron.common import config from neutron.conf.agent import cmd as command @@ -43,7 +42,7 @@ def remove_iptables_reference(ipset): if ipset in iptables_save: cmd = ['iptables'] if 'IPv4' in ipset else ['ip6tables'] - LOG.info(_LI("Removing iptables rule for IPset: %s"), ipset) + LOG.info("Removing iptables rule for IPset: %s", ipset) for rule in iptables_save.splitlines(): if '--match-set %s ' % ipset in rule and rule.startswith('-A'): # change to delete @@ -52,8 +51,8 @@ def remove_iptables_reference(ipset): try: utils.execute(cmd + params, run_as_root=True) except Exception: - LOG.exception(_LE('Error, unable to remove iptables rule ' - 'for IPset: %s'), ipset) + LOG.exception('Error, unable to remove iptables rule ' + 'for IPset: %s', ipset) def destroy_ipset(conf, ipset): @@ -62,17 +61,17 @@ def destroy_ipset(conf, ipset): if conf.force: remove_iptables_reference(ipset) - LOG.info(_LI("Destroying IPset: %s"), ipset) + LOG.info("Destroying IPset: %s", ipset) cmd = ['ipset', 'destroy', ipset] try: utils.execute(cmd, run_as_root=True) except Exception: - LOG.exception(_LE('Error, unable to destroy IPset: %s'), ipset) + LOG.exception('Error, unable to destroy IPset: %s', ipset) def cleanup_ipsets(conf): # Identify ipsets for destruction. - LOG.info(_LI("Destroying IPsets with prefix: %s"), conf.prefix) + LOG.info("Destroying IPsets with prefix: %s", conf.prefix) cmd = ['ipset', '-L', '-n'] ipsets = utils.execute(cmd, run_as_root=True) @@ -80,7 +79,7 @@ def cleanup_ipsets(conf): if conf.allsets or ipset.startswith(conf.prefix): destroy_ipset(conf, ipset) - LOG.info(_LI("IPset cleanup completed successfully")) + LOG.info("IPset cleanup completed successfully") def main(): diff --git a/neutron/cmd/linuxbridge_cleanup.py b/neutron/cmd/linuxbridge_cleanup.py index 0965da8d719..9853c6e2b7b 100644 --- a/neutron/cmd/linuxbridge_cleanup.py +++ b/neutron/cmd/linuxbridge_cleanup.py @@ -16,7 +16,6 @@ from neutron_lib.utils import helpers from oslo_config import cfg from oslo_log import log as logging -from neutron._i18n import _LE, _LI from neutron.common import config from neutron.plugins.ml2.drivers.linuxbridge.agent \ import linuxbridge_neutron_agent @@ -30,17 +29,17 @@ def remove_empty_bridges(): interface_mappings = helpers.parse_mappings( cfg.CONF.LINUX_BRIDGE.physical_interface_mappings) except ValueError as e: - LOG.error(_LE("Parsing physical_interface_mappings failed: %s."), e) + LOG.error("Parsing physical_interface_mappings failed: %s.", e) sys.exit(1) - LOG.info(_LI("Interface mappings: %s."), interface_mappings) + LOG.info("Interface mappings: %s.", interface_mappings) try: bridge_mappings = helpers.parse_mappings( cfg.CONF.LINUX_BRIDGE.bridge_mappings) except ValueError as e: - LOG.error(_LE("Parsing bridge_mappings failed: %s."), e) + LOG.error("Parsing bridge_mappings failed: %s.", e) sys.exit(1) - LOG.info(_LI("Bridge mappings: %s."), bridge_mappings) + LOG.info("Bridge mappings: %s.", bridge_mappings) lb_manager = linuxbridge_neutron_agent.LinuxBridgeManager( bridge_mappings, interface_mappings) @@ -52,10 +51,10 @@ def remove_empty_bridges(): try: lb_manager.delete_bridge(bridge_name) - LOG.info(_LI("Linux bridge %s deleted"), bridge_name) + LOG.info("Linux bridge %s deleted", bridge_name) except RuntimeError: - LOG.exception(_LE("Linux bridge %s delete failed"), bridge_name) - LOG.info(_LI("Linux bridge cleanup completed successfully")) + LOG.exception("Linux bridge %s delete failed", bridge_name) + LOG.info("Linux bridge cleanup completed successfully") def main(): diff --git a/neutron/cmd/netns_cleanup.py b/neutron/cmd/netns_cleanup.py index b16f82b2a06..247f6e53c0d 100644 --- a/neutron/cmd/netns_cleanup.py +++ b/neutron/cmd/netns_cleanup.py @@ -23,7 +23,6 @@ from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils -from neutron._i18n import _LE, _LW from neutron.agent.common import ovs_lib from neutron.agent.l3 import dvr_fip_ns from neutron.agent.l3 import dvr_snat_ns @@ -197,7 +196,7 @@ def _kill_listen_processes(namespace, force=False): # implementation in the right module. Ideally, netns_cleanup wouldn't # kill any processes as the responsible module should've killed them # before cleaning up the namespace - LOG.warning(_LW("Killing (%(signal)d) [%(pid)s] %(cmdline)s"), + LOG.warning("Killing (%(signal)d) [%(pid)s] %(cmdline)s", {'signal': kill_signal, 'pid': pid, 'cmdline': ' '.join(utils.get_cmdline_from_pid(pid))[:80] @@ -205,8 +204,8 @@ def _kill_listen_processes(namespace, force=False): try: utils.kill_process(pid, kill_signal, run_as_root=True) except Exception as ex: - LOG.error(_LE('An error occurred while killing ' - '[%(pid)s]: %(msg)s'), {'pid': pid, 'msg': ex}) + LOG.error('An error occurred while killing ' + '[%(pid)s]: %(msg)s', {'pid': pid, 'msg': ex}) return len(pids) @@ -246,14 +245,14 @@ def destroy_namespace(conf, namespace, force=False): # This is unlikely since, at this point, we have SIGKILLed # all remaining processes but if there are still some, log # the error and continue with the cleanup - LOG.error(_LE('Not all processes were killed in %s'), + LOG.error('Not all processes were killed in %s', namespace) for device in ip.get_devices(): unplug_device(conf, device) ip.garbage_collect_namespace() except Exception: - LOG.exception(_LE('Error unable to destroy namespace: %s'), namespace) + LOG.exception('Error unable to destroy namespace: %s', namespace) def cleanup_network_namespaces(conf): diff --git a/neutron/cmd/ovs_cleanup.py b/neutron/cmd/ovs_cleanup.py index c781fdf77fe..dc53a6ca3e8 100644 --- a/neutron/cmd/ovs_cleanup.py +++ b/neutron/cmd/ovs_cleanup.py @@ -16,7 +16,6 @@ from oslo_config import cfg from oslo_log import log as logging -from neutron._i18n import _LI from neutron.agent.common import ovs_lib from neutron.agent.linux import interface from neutron.agent.linux import ip_lib @@ -73,7 +72,7 @@ def delete_neutron_ports(ports): device = ip_lib.IPDevice(port) if device.exists(): device.link.delete() - LOG.info(_LI("Deleting port: %s"), port) + LOG.info("Deleting port: %s", port) def main(): @@ -103,7 +102,7 @@ def main(): ports = collect_neutron_ports(available_configuration_bridges) for bridge in bridges: - LOG.info(_LI("Cleaning bridge: %s"), bridge) + LOG.info("Cleaning bridge: %s", bridge) ovs = ovs_lib.OVSBridge(bridge) if conf.ovs_all_ports: port_names = ovs.get_port_name_list() @@ -115,4 +114,4 @@ def main(): # Remove remaining ports created by Neutron (usually veth pair) delete_neutron_ports(ports) - LOG.info(_LI("OVS cleanup completed successfully")) + LOG.info("OVS cleanup completed successfully") diff --git a/neutron/cmd/sanity/checks.py b/neutron/cmd/sanity/checks.py index 8ea755547cc..753c7f6f517 100644 --- a/neutron/cmd/sanity/checks.py +++ b/neutron/cmd/sanity/checks.py @@ -23,7 +23,6 @@ from oslo_config import cfg from oslo_log import log as logging from oslo_utils import uuidutils -from neutron._i18n import _LE from neutron.agent.common import ovs_lib from neutron.agent.l3 import ha_router from neutron.agent.l3 import namespaces @@ -104,8 +103,8 @@ def ofctl_arg_supported(cmd, **kwargs): "command %s. Exception: %s", full_args, e) return False except Exception: - LOG.exception(_LE("Unexpected exception while checking supported" - " feature via command: %s"), full_args) + LOG.exception("Unexpected exception while checking supported" + " feature via command: %s", full_args) return False else: return True @@ -157,8 +156,8 @@ def _vf_management_support(required_caps): LOG.debug("ip link command does not support " "vf capability '%(cap)s'", {'cap': cap}) except ip_link_support.UnsupportedIpLinkCommand: - LOG.exception(_LE("Unexpected exception while checking supported " - "ip link command")) + LOG.exception("Unexpected exception while checking supported " + "ip link command") return False return is_supported @@ -362,11 +361,11 @@ def ovsdb_native_supported(): ovs.get_bridges() return True except ImportError as ex: - LOG.error(_LE("Failed to import required modules. Ensure that the " - "python-openvswitch package is installed. Error: %s"), + LOG.error("Failed to import required modules. Ensure that the " + "python-openvswitch package is installed. Error: %s", ex) except Exception: - LOG.exception(_LE("Unexpected exception occurred.")) + LOG.exception("Unexpected exception occurred.") return False diff --git a/neutron/cmd/sanity_check.py b/neutron/cmd/sanity_check.py index 0e1997ec43e..e7de8c9afb8 100644 --- a/neutron/cmd/sanity_check.py +++ b/neutron/cmd/sanity_check.py @@ -18,7 +18,7 @@ import sys from oslo_config import cfg from oslo_log import log as logging -from neutron._i18n import _, _LE, _LW +from neutron._i18n import _ from neutron.agent import dhcp_agent from neutron.cmd.sanity import checks from neutron.common import config @@ -52,52 +52,52 @@ class BoolOptCallback(cfg.BoolOpt): def check_ovs_vxlan(): result = checks.ovs_vxlan_supported() if not result: - LOG.error(_LE('Check for Open vSwitch VXLAN support failed. ' - 'Please ensure that the version of openvswitch ' - 'being used has VXLAN support.')) + LOG.error('Check for Open vSwitch VXLAN support failed. ' + 'Please ensure that the version of openvswitch ' + 'being used has VXLAN support.') return result def check_ovs_geneve(): result = checks.ovs_geneve_supported() if not result: - LOG.error(_LE('Check for Open vSwitch Geneve support failed. ' - 'Please ensure that the version of openvswitch ' - 'and kernel being used has Geneve support.')) + LOG.error('Check for Open vSwitch Geneve support failed. ' + 'Please ensure that the version of openvswitch ' + 'and kernel being used has Geneve support.') return result def check_iproute2_vxlan(): result = checks.iproute2_vxlan_supported() if not result: - LOG.error(_LE('Check for iproute2 VXLAN support failed. Please ensure ' - 'that the iproute2 has VXLAN support.')) + LOG.error('Check for iproute2 VXLAN support failed. Please ensure ' + 'that the iproute2 has VXLAN support.') return result def check_ovs_patch(): result = checks.patch_supported() if not result: - LOG.error(_LE('Check for Open vSwitch patch port support failed. ' - 'Please ensure that the version of openvswitch ' - 'being used has patch port support or disable features ' - 'requiring patch ports (gre/vxlan, etc.).')) + LOG.error('Check for Open vSwitch patch port support failed. ' + 'Please ensure that the version of openvswitch ' + 'being used has patch port support or disable features ' + 'requiring patch ports (gre/vxlan, etc.).') return result def check_read_netns(): required = checks.netns_read_requires_helper() if not required and cfg.CONF.AGENT.use_helper_for_ns_read: - LOG.warning(_LW("The user that is executing neutron can read the " - "namespaces without using the root_helper. Disable " - "the use_helper_for_ns_read option to avoid a " - "performance impact.")) + LOG.warning("The user that is executing neutron can read the " + "namespaces without using the root_helper. Disable " + "the use_helper_for_ns_read option to avoid a " + "performance impact.") # Don't fail because nothing is actually broken. Just not optimal. result = True elif required and not cfg.CONF.AGENT.use_helper_for_ns_read: - LOG.error(_LE("The user that is executing neutron does not have " - "permissions to read the namespaces. Enable the " - "use_helper_for_ns_read configuration option.")) + LOG.error("The user that is executing neutron does not have " + "permissions to read the namespaces. Enable the " + "use_helper_for_ns_read configuration option.") result = False else: # everything is configured appropriately @@ -112,8 +112,8 @@ def check_read_netns(): def check_dnsmasq_version(): result = checks.dnsmasq_version_supported() if not result: - LOG.error(_LE('The installed version of dnsmasq is too old. ' - 'Please update to at least version %s.'), + LOG.error('The installed version of dnsmasq is too old. ' + 'Please update to at least version %s.', checks.get_minimal_dnsmasq_version_supported()) return result @@ -121,17 +121,17 @@ def check_dnsmasq_version(): def check_keepalived_ipv6_support(): result = checks.keepalived_ipv6_supported() if not result: - LOG.error(_LE('The installed version of keepalived does not support ' - 'IPv6. Please update to at least version 1.2.10 for ' - 'IPv6 support.')) + LOG.error('The installed version of keepalived does not support ' + 'IPv6. Please update to at least version 1.2.10 for ' + 'IPv6 support.') return result def check_dibbler_version(): result = checks.dibbler_version_supported() if not result: - LOG.error(_LE('The installed version of dibbler-client is too old. ' - 'Please update to at least version %s.'), + LOG.error('The installed version of dibbler-client is too old. ' + 'Please update to at least version %s.', checks.get_minimal_dibbler_version_supported()) return result @@ -139,56 +139,56 @@ def check_dibbler_version(): def check_nova_notify(): result = checks.nova_notify_supported() if not result: - LOG.error(_LE('Nova notifications are enabled, but novaclient is not ' - 'installed. Either disable nova notifications or ' - 'install python-novaclient.')) + LOG.error('Nova notifications are enabled, but novaclient is not ' + 'installed. Either disable nova notifications or ' + 'install python-novaclient.') return result def check_arp_responder(): result = checks.arp_responder_supported() if not result: - LOG.error(_LE('Check for Open vSwitch ARP responder support failed. ' - 'Please ensure that the version of openvswitch ' - 'being used has ARP flows support.')) + LOG.error('Check for Open vSwitch ARP responder support failed. ' + 'Please ensure that the version of openvswitch ' + 'being used has ARP flows support.') return result def check_arp_header_match(): result = checks.arp_header_match_supported() if not result: - LOG.error(_LE('Check for Open vSwitch support of ARP header matching ' - 'failed. ARP spoofing suppression will not work. A ' - 'newer version of OVS is required.')) + LOG.error('Check for Open vSwitch support of ARP header matching ' + 'failed. ARP spoofing suppression will not work. A ' + 'newer version of OVS is required.') return result def check_icmpv6_header_match(): result = checks.icmpv6_header_match_supported() if not result: - LOG.error(_LE('Check for Open vSwitch support of ICMPv6 header ' - 'matching failed. ICMPv6 Neighbor Advt spoofing (part ' - 'of arp spoofing) suppression will not work. A newer ' - 'version of OVS is required.')) + LOG.error('Check for Open vSwitch support of ICMPv6 header ' + 'matching failed. ICMPv6 Neighbor Advt spoofing (part ' + 'of arp spoofing) suppression will not work. A newer ' + 'version of OVS is required.') return result def check_vf_management(): result = checks.vf_management_supported() if not result: - LOG.error(_LE('Check for VF management support failed. ' - 'Please ensure that the version of ip link ' - 'being used has VF support.')) + LOG.error('Check for VF management support failed. ' + 'Please ensure that the version of ip link ' + 'being used has VF support.') return result def check_vf_extended_management(): result = checks.vf_extended_management_supported() if not result: - LOG.error(_LE('Check for VF extended management support failed. ' - 'Please ensure that the version of ip link ' - 'being used has VF extended support: version ' - '"iproute2-ss140804", git tag "v3.16.0"')) + LOG.error('Check for VF extended management support failed. ' + 'Please ensure that the version of ip link ' + 'being used has VF extended support: version ' + '"iproute2-ss140804", git tag "v3.16.0"') return result @@ -196,67 +196,67 @@ def check_ovsdb_native(): cfg.CONF.set_override('ovsdb_interface', 'native', group='OVS') result = checks.ovsdb_native_supported() if not result: - LOG.error(_LE('Check for native OVSDB support failed.')) + LOG.error('Check for native OVSDB support failed.') return result def check_ovs_conntrack(): result = checks.ovs_conntrack_supported() if not result: - LOG.error(_LE('Check for Open vSwitch support of conntrack support ' - 'failed. OVS/CT firewall will not work. A newer ' - 'version of OVS (2.5+) and linux kernel (4.3+) are ' - 'required. See ' - 'https://github.com/openvswitch/ovs/blob/master/FAQ.md ' - 'for more information.')) + LOG.error('Check for Open vSwitch support of conntrack support ' + 'failed. OVS/CT firewall will not work. A newer ' + 'version of OVS (2.5+) and linux kernel (4.3+) are ' + 'required. See ' + 'https://github.com/openvswitch/ovs/blob/master/FAQ.md ' + 'for more information.') return result def check_ebtables(): result = checks.ebtables_supported() if not result: - LOG.error(_LE('Cannot run ebtables. Please ensure that it ' - 'is installed.')) + LOG.error('Cannot run ebtables. Please ensure that it ' + 'is installed.') return result def check_ipset(): result = checks.ipset_supported() if not result: - LOG.error(_LE('Cannot run ipset. Please ensure that it ' - 'is installed.')) + LOG.error('Cannot run ipset. Please ensure that it ' + 'is installed.') return result def check_ip6tables(): result = checks.ip6tables_supported() if not result: - LOG.error(_LE('Cannot run ip6tables. Please ensure that it ' - 'is installed.')) + LOG.error('Cannot run ip6tables. Please ensure that it ' + 'is installed.') return result def check_conntrack(): result = checks.conntrack_supported() if not result: - LOG.error(_LE('Cannot run conntrack. Please ensure that it ' - 'is installed.')) + LOG.error('Cannot run conntrack. Please ensure that it ' + 'is installed.') return result def check_dhcp_release6(): result = checks.dhcp_release6_supported() if not result: - LOG.error(_LE('No dhcp_release6 tool detected. The installed version ' - 'of dnsmasq does not support releasing IPv6 leases. ' - 'Please update to at least version %s if you need this ' - 'feature. If you do not use IPv6 stateful subnets you ' - 'can continue to use this version of dnsmasq, as ' - 'other IPv6 address assignment mechanisms besides ' - 'stateful DHCPv6 should continue to work without ' - 'the dhcp_release6 utility. ' - 'Current version of dnsmasq is ok if other checks ' - 'pass.'), + LOG.error('No dhcp_release6 tool detected. The installed version ' + 'of dnsmasq does not support releasing IPv6 leases. ' + 'Please update to at least version %s if you need this ' + 'feature. If you do not use IPv6 stateful subnets you ' + 'can continue to use this version of dnsmasq, as ' + 'other IPv6 address assignment mechanisms besides ' + 'stateful DHCPv6 should continue to work without ' + 'the dhcp_release6 utility. ' + 'Current version of dnsmasq is ok if other checks ' + 'pass.', checks.get_dnsmasq_version_with_dhcp_release6()) return result @@ -264,19 +264,19 @@ def check_dhcp_release6(): def check_bridge_firewalling_enabled(): result = checks.bridge_firewalling_enabled() if not result: - LOG.error(_LE('Bridge firewalling is not enabled. It may be the case ' - 'that bridge and/or br_netfilter kernel modules are not ' - 'loaded. Alternatively, corresponding sysctl settings ' - 'may be overridden to disable it by default.')) + LOG.error('Bridge firewalling is not enabled. It may be the case ' + 'that bridge and/or br_netfilter kernel modules are not ' + 'loaded. Alternatively, corresponding sysctl settings ' + 'may be overridden to disable it by default.') return result def check_ip_nonlocal_bind(): result = checks.ip_nonlocal_bind() if not result: - LOG.error(_LE('This kernel does not isolate ip_nonlocal_bind kernel ' - 'option in namespaces. Please update to kernel ' - 'version > 3.19.')) + LOG.error('This kernel does not isolate ip_nonlocal_bind kernel ' + 'option in namespaces. Please update to kernel ' + 'version > 3.19.') return result diff --git a/neutron/common/config.py b/neutron/common/config.py index 13714ffd6d9..75eebfc85b6 100644 --- a/neutron/common/config.py +++ b/neutron/common/config.py @@ -27,7 +27,7 @@ import oslo_messaging from oslo_middleware import cors from oslo_service import wsgi -from neutron._i18n import _, _LI +from neutron._i18n import _ from neutron.conf import common as common_config from neutron import policy from neutron import version @@ -97,8 +97,8 @@ def setup_logging(): logging.set_defaults(default_log_levels=logging.get_default_log_levels() + EXTRA_LOG_LEVEL_DEFAULTS) logging.setup(cfg.CONF, product_name) - LOG.info(_LI("Logging enabled!")) - LOG.info(_LI("%(prog)s version %(version)s"), + LOG.info("Logging enabled!") + LOG.info("%(prog)s version %(version)s", {'prog': sys.argv[0], 'version': version.version_info.release_string()}) LOG.debug("command line: %s", " ".join(sys.argv)) diff --git a/neutron/common/ipv6_utils.py b/neutron/common/ipv6_utils.py index 38a13d74805..d0fc0fa02b4 100644 --- a/neutron/common/ipv6_utils.py +++ b/neutron/common/ipv6_utils.py @@ -23,8 +23,6 @@ import netaddr from neutron_lib import constants as const from oslo_log import log -from neutron._i18n import _LI - LOG = log.getLogger(__name__) _IS_IPV6_ENABLED = None @@ -45,10 +43,10 @@ def is_enabled_and_bind_by_default(): else: _IS_IPV6_ENABLED = False if not _IS_IPV6_ENABLED: - LOG.info(_LI("IPv6 not present or configured not to bind to new " - "interfaces on this system. Please ensure IPv6 is " - "enabled and /proc/sys/net/ipv6/conf/default/" - "disable_ipv6 is set to 0 to enable IPv6.")) + LOG.info("IPv6 not present or configured not to bind to new " + "interfaces on this system. Please ensure IPv6 is " + "enabled and /proc/sys/net/ipv6/conf/default/" + "disable_ipv6 is set to 0 to enable IPv6.") return _IS_IPV6_ENABLED diff --git a/neutron/common/profiler.py b/neutron/common/profiler.py index 5d7afaefd68..b08059fa1ae 100644 --- a/neutron/common/profiler.py +++ b/neutron/common/profiler.py @@ -17,8 +17,6 @@ import osprofiler.initializer from osprofiler import opts as profiler_opts import osprofiler.web -from neutron._i18n import _LI - CONF = cfg.CONF profiler_opts.set_defaults(CONF) @@ -41,11 +39,11 @@ def setup(name, host='0.0.0.0'): # nosec service=name, host=host ) - LOG.info(_LI("OSProfiler is enabled.\n" - "Traces provided from the profiler " - "can only be subscribed to using the same HMAC keys that " - "are configured in Neutron's configuration file " - "under the [profiler] section.\n To disable OSprofiler " - "set in /etc/neutron/neutron.conf:\n" - "[profiler]\n" - "enabled=false")) + LOG.info("OSProfiler is enabled.\n" + "Traces provided from the profiler " + "can only be subscribed to using the same HMAC keys that " + "are configured in Neutron's configuration file " + "under the [profiler] section.\n To disable OSprofiler " + "set in /etc/neutron/neutron.conf:\n" + "[profiler]\n" + "enabled=false") diff --git a/neutron/common/rpc.py b/neutron/common/rpc.py index a7cc8c17506..105e94031ce 100644 --- a/neutron/common/rpc.py +++ b/neutron/common/rpc.py @@ -29,7 +29,6 @@ from oslo_service import service from oslo_utils import excutils from osprofiler import profiler -from neutron._i18n import _LE, _LW from neutron.common import exceptions @@ -168,19 +167,19 @@ class _BackingOffContextWrapper(_ContextWrapper): min(self._METHOD_TIMEOUTS[scoped_method], TRANSPORT.conf.rpc_response_timeout) ) - LOG.error(_LE("Timeout in RPC method %(method)s. Waiting for " - "%(wait)s seconds before next attempt. If the " - "server is not down, consider increasing the " - "rpc_response_timeout option as Neutron " - "server(s) may be overloaded and unable to " - "respond quickly enough."), + LOG.error("Timeout in RPC method %(method)s. Waiting for " + "%(wait)s seconds before next attempt. If the " + "server is not down, consider increasing the " + "rpc_response_timeout option as Neutron " + "server(s) may be overloaded and unable to " + "respond quickly enough.", {'wait': int(round(wait)), 'method': scoped_method}) new_timeout = min( self._original_context.timeout * 2, self.get_max_timeout()) if new_timeout > self._METHOD_TIMEOUTS[scoped_method]: - LOG.warning(_LW("Increasing timeout for %(method)s calls " - "to %(new)s seconds. Restart the agent to " - "restore it to the default value."), + LOG.warning("Increasing timeout for %(method)s calls " + "to %(new)s seconds. Restart the agent to " + "restore it to the default value.", {'method': scoped_method, 'new': new_timeout}) self._METHOD_TIMEOUTS[scoped_method] = new_timeout time.sleep(wait) diff --git a/neutron/common/utils.py b/neutron/common/utils.py index 08426840c83..9a5612f9a9b 100644 --- a/neutron/common/utils.py +++ b/neutron/common/utils.py @@ -49,7 +49,7 @@ import six from stevedore import driver import neutron -from neutron._i18n import _, _LE +from neutron._i18n import _ from neutron.db import api as db_api TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" @@ -307,7 +307,7 @@ def load_class_by_alias_or_classname(namespace, name): """ if not name: - LOG.error(_LE("Alias or class name is not set")) + LOG.error("Alias or class name is not set") raise ImportError(_("Class not found.")) try: # Try to resolve class by alias @@ -320,9 +320,9 @@ def load_class_by_alias_or_classname(namespace, name): try: class_to_load = importutils.import_class(name) except (ImportError, ValueError): - LOG.error(_LE("Error loading class by alias"), + LOG.error("Error loading class by alias", exc_info=e1_info) - LOG.error(_LE("Error loading class by class name"), + LOG.error("Error loading class by class name", exc_info=True) raise ImportError(_("Class not found.")) return class_to_load @@ -636,7 +636,7 @@ def create_object_with_dependency(creator, dep_getter, dep_creator, try: dep_deleter(dependency) except Exception: - LOG.exception(_LE("Failed cleaning up dependency %s"), + LOG.exception("Failed cleaning up dependency %s", dep_id) return result, dependency @@ -743,7 +743,7 @@ def attach_exc_details(e, msg, args=_NO_ARGS_MARKER): def extract_exc_details(e): for attr in ('_error_context_msg', '_error_context_args'): if not hasattr(e, attr): - return _LE('No details.') + return u'No details.' details = e._error_context_msg args = e._error_context_args if args is _NO_ARGS_MARKER: diff --git a/neutron/db/_utils.py b/neutron/db/_utils.py index 3d162317da8..b50480e43be 100644 --- a/neutron/db/_utils.py +++ b/neutron/db/_utils.py @@ -22,7 +22,6 @@ from oslo_log import log as logging from oslo_utils import excutils from sqlalchemy.ext import associationproxy -from neutron._i18n import _LE LOG = logging.getLogger(__name__) @@ -67,9 +66,9 @@ def safe_creation(context, create_fn, delete_fn, create_bindings, try: delete_fn(obj['id']) except Exception as e: - LOG.error(_LE("Cannot clean up created object %(obj)s. " - "Exception: %(exc)s"), {'obj': obj['id'], - 'exc': e}) + LOG.error("Cannot clean up created object %(obj)s. " + "Exception: %(exc)s", {'obj': obj['id'], + 'exc': e}) return obj, value diff --git a/neutron/db/agents_db.py b/neutron/db/agents_db.py index 94c7906fa5b..cfa9ceb0cf8 100644 --- a/neutron/db/agents_db.py +++ b/neutron/db/agents_db.py @@ -33,7 +33,7 @@ from oslo_utils import timeutils from sqlalchemy.orm import exc from sqlalchemy import sql -from neutron._i18n import _, _LE, _LI, _LW +from neutron._i18n import _ from neutron.agent.common import utils from neutron.api.rpc.callbacks import version_manager from neutron.common import constants as n_const @@ -163,7 +163,7 @@ class AgentDbMixin(ext_agent.AgentPluginBase, AgentAvailabilityZoneMixin): return if utils.is_agent_down(agent.heartbeat_timestamp): - LOG.warning(_LW('%(agent_type)s agent %(agent_id)s is not active'), + LOG.warning('%(agent_type)s agent %(agent_id)s is not active', {'agent_type': agent_type, 'agent_id': agent.id}) return agent @@ -193,8 +193,8 @@ class AgentDbMixin(ext_agent.AgentPluginBase, AgentAvailabilityZoneMixin): conf = jsonutils.loads(json_value) except Exception: if json_value or not ignore_missing: - msg = _LW('Dictionary %(dict_name)s for agent %(agent_type)s ' - 'on host %(host)s is invalid.') + msg = ('Dictionary %(dict_name)s for agent %(agent_type)s ' + 'on host %(host)s is invalid.') LOG.warning(msg, {'dict_name': dict_name, 'agent_type': agent_db.agent_type, 'host': agent_db.host}) @@ -271,8 +271,8 @@ class AgentDbMixin(ext_agent.AgentPluginBase, AgentAvailabilityZoneMixin): (agent['agent_type'], agent['heartbeat_timestamp'], agent['host']) for agent in dead_agents]) - LOG.warning(_LW("Agent healthcheck: found %(count)s dead agents " - "out of %(total)s:\n%(data)s"), + LOG.warning("Agent healthcheck: found %(count)s dead agents " + "out of %(total)s:\n%(data)s", {'count': len(dead_agents), 'total': len(agents), 'data': data}) @@ -314,8 +314,8 @@ class AgentDbMixin(ext_agent.AgentPluginBase, AgentAvailabilityZoneMixin): def _log_heartbeat(self, state, agent_db, agent_conf): if agent_conf.get('log_agent_heartbeats'): delta = timeutils.utcnow() - agent_db.heartbeat_timestamp - LOG.info(_LI("Heartbeat received from %(type)s agent on " - "host %(host)s, uuid %(uuid)s after %(delta)s"), + LOG.info("Heartbeat received from %(type)s agent on " + "host %(host)s, uuid %(uuid)s after %(delta)s", {'type': agent_db.agent_type, 'host': agent_db.host, 'uuid': state.get('uuid'), @@ -492,10 +492,10 @@ class AgentExtRpcCallback(object): 'serv_time': (datetime.datetime.isoformat (time_server_now)), 'diff': diff} - LOG.error(_LE("Message received from the host: %(host)s " - "during the registration of %(agent_name)s has " - "a timestamp: %(agent_time)s. This differs from " - "the current server timestamp: %(serv_time)s by " - "%(diff)s seconds, which is more than the " - "threshold agent down" - "time: %(threshold)s."), log_dict) + LOG.error("Message received from the host: %(host)s " + "during the registration of %(agent_name)s has " + "a timestamp: %(agent_time)s. This differs from " + "the current server timestamp: %(serv_time)s by " + "%(diff)s seconds, which is more than the " + "threshold agent down" + "time: %(threshold)s.", log_dict) diff --git a/neutron/db/agentschedulers_db.py b/neutron/db/agentschedulers_db.py index 716889deae3..3e1a7c7f973 100644 --- a/neutron/db/agentschedulers_db.py +++ b/neutron/db/agentschedulers_db.py @@ -26,7 +26,7 @@ from oslo_utils import timeutils from sqlalchemy import orm from sqlalchemy.orm import exc -from neutron._i18n import _, _LE, _LI, _LW +from neutron._i18n import _ from neutron.agent.common import utils as agent_utils from neutron.common import constants as n_const from neutron.common import utils @@ -130,10 +130,10 @@ class AgentSchedulerDbMixin(agents_db.AgentDbMixin): tdelta = timeutils.utcnow() - getattr(self, '_clock_jump_canary', timeutils.utcnow()) if tdelta.total_seconds() > cfg.CONF.agent_down_time: - LOG.warning(_LW("Time since last %s agent reschedule check has " - "exceeded the interval between checks. Waiting " - "before check to allow agents to send a heartbeat " - "in case there was a clock adjustment."), + LOG.warning("Time since last %s agent reschedule check has " + "exceeded the interval between checks. Waiting " + "before check to allow agents to send a heartbeat " + "in case there was a clock adjustment.", agent_type) time.sleep(agent_dead_limit) self._clock_jump_canary = timeutils.utcnow() @@ -176,10 +176,10 @@ class AgentSchedulerDbMixin(agents_db.AgentDbMixin): agents_back_online.add(binding_agent_id) continue - LOG.warning(_LW( + LOG.warning( "Rescheduling %(resource_name)s %(resource)s from agent " "%(agent)s because the agent did not report to the server " - "in the last %(dead_time)s seconds."), + "in the last %(dead_time)s seconds.", {'resource_name': resource_name, 'resource': binding_resource_id, 'agent': binding_agent_id, @@ -189,15 +189,15 @@ class AgentSchedulerDbMixin(agents_db.AgentDbMixin): except (rescheduling_failed, oslo_messaging.RemoteError): # Catch individual rescheduling errors here # so one broken one doesn't stop the iteration. - LOG.exception(_LE("Failed to reschedule %(resource_name)s " - "%(resource)s"), + LOG.exception("Failed to reschedule %(resource_name)s " + "%(resource)s", {'resource_name': resource_name, 'resource': binding_resource_id}) except Exception: # we want to be thorough and catch whatever is raised # to avoid loop abortion - LOG.exception(_LE("Exception encountered during %(resource_name)s " - "rescheduling."), + LOG.exception("Exception encountered during %(resource_name)s " + "rescheduling.", {'resource_name': resource_name}) @@ -211,8 +211,8 @@ class DhcpAgentSchedulerDbMixin(dhcpagentscheduler def add_periodic_dhcp_agent_status_check(self): if not cfg.CONF.allow_automatic_dhcp_failover: - LOG.info(_LI("Skipping periodic DHCP agent status check because " - "automatic network rescheduling is disabled.")) + LOG.info("Skipping periodic DHCP agent status check because " + "automatic network rescheduling is disabled.") return self.add_agent_status_check_worker( @@ -249,23 +249,23 @@ class DhcpAgentSchedulerDbMixin(dhcpagentscheduler return agent_expected_up > timeutils.utcnow() def _schedule_network(self, context, network_id, dhcp_notifier): - LOG.info(_LI("Scheduling unhosted network %s"), network_id) + LOG.info("Scheduling unhosted network %s", network_id) try: # TODO(enikanorov): have to issue redundant db query # to satisfy scheduling interface network = self.get_network(context, network_id) agents = self.schedule_network(context, network) if not agents: - LOG.info(_LI("Failed to schedule network %s, " - "no eligible agents or it might be " - "already scheduled by another server"), + LOG.info("Failed to schedule network %s, " + "no eligible agents or it might be " + "already scheduled by another server", network_id) return if not dhcp_notifier: return for agent in agents: - LOG.info(_LI("Adding network %(net)s to agent " - "%(agent)s on host %(host)s"), + LOG.info("Adding network %(net)s to agent " + "%(agent)s on host %(host)s", {'net': network_id, 'agent': agent.id, 'host': agent.host}) @@ -275,7 +275,7 @@ class DhcpAgentSchedulerDbMixin(dhcpagentscheduler # catching any exception during scheduling # so if _schedule_network is invoked in the loop it could # continue in any case - LOG.exception(_LE("Failed to schedule network %s"), network_id) + LOG.exception("Failed to schedule network %s", network_id) def _filter_bindings(self, context, bindings): """Skip bindings for which the agent is dead, but starting up.""" @@ -332,14 +332,14 @@ class DhcpAgentSchedulerDbMixin(dhcpagentscheduler active_agents = [agent for agent in agents if self.is_eligible_agent(context, True, agent)] if not active_agents: - LOG.warning(_LW("No DHCP agents available, " - "skipping rescheduling")) + LOG.warning("No DHCP agents available, " + "skipping rescheduling") return for binding in dead_bindings: - LOG.warning(_LW("Removing network %(network)s from agent " - "%(agent)s because the agent did not report " - "to the server in the last %(dead_time)s " - "seconds."), + LOG.warning("Removing network %(network)s from agent " + "%(agent)s because the agent did not report " + "to the server in the last %(dead_time)s " + "seconds.", {'network': binding.network_id, 'agent': binding.dhcp_agent_id, 'dead_time': agent_dead_limit}) @@ -362,9 +362,9 @@ class DhcpAgentSchedulerDbMixin(dhcpagentscheduler saved_binding) # still continue and allow concurrent scheduling attempt except Exception: - LOG.exception(_LE("Unexpected exception occurred while " - "removing network %(net)s from agent " - "%(agent)s"), + LOG.exception("Unexpected exception occurred while " + "removing network %(net)s from agent " + "%(agent)s", saved_binding) if cfg.CONF.network_auto_schedule: @@ -373,8 +373,8 @@ class DhcpAgentSchedulerDbMixin(dhcpagentscheduler except Exception: # we want to be thorough and catch whatever is raised # to avoid loop abortion - LOG.exception(_LE("Exception encountered during network " - "rescheduling")) + LOG.exception("Exception encountered during network " + "rescheduling") def get_dhcp_agents_hosting_networks( self, context, network_ids, active=None, admin_state_up=None, diff --git a/neutron/db/api.py b/neutron/db/api.py index b1e36b26cfa..c69fe45fbd6 100644 --- a/neutron/db/api.py +++ b/neutron/db/api.py @@ -35,7 +35,6 @@ from sqlalchemy import exc as sql_exc from sqlalchemy import orm from sqlalchemy.orm import exc -from neutron._i18n import _LE from neutron.objects import exceptions as obj_exc @@ -148,8 +147,8 @@ def retry_if_session_inactive(context_var_name='context'): # functions ctx_arg_index = p_util.getargspec(f).args.index(context_var_name) except ValueError: - raise RuntimeError(_LE("Could not find position of var %s") - % context_var_name) + raise RuntimeError("Could not find position of var %s" % + context_var_name) f_with_retry = retry_db_errors(f) @six.wraps(f) diff --git a/neutron/db/db_base_plugin_v2.py b/neutron/db/db_base_plugin_v2.py index d51fe717055..e17fdb285de 100644 --- a/neutron/db/db_base_plugin_v2.py +++ b/neutron/db/db_base_plugin_v2.py @@ -37,7 +37,7 @@ from sqlalchemy import and_ from sqlalchemy import exc as sql_exc from sqlalchemy import not_ -from neutron._i18n import _, _LE, _LI +from neutron._i18n import _ from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api from neutron.common import constants as n_const from neutron.common import exceptions as n_exc @@ -356,8 +356,8 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, objects.append(obj_creator(context, item)) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE("An exception occurred while creating " - "the %(resource)s:%(item)s"), + LOG.error("An exception occurred while creating " + "the %(resource)s:%(item)s", {'resource': resource, 'item': item}) return objects @@ -968,9 +968,9 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, def _ensure_no_user_ports_on_subnet(self, context, id): alloc = self._subnet_get_user_allocation(context, id) if alloc: - LOG.info(_LI("Found port (%(port_id)s, %(ip)s) having IP " - "allocation on subnet " - "%(subnet)s, cannot delete"), + LOG.info("Found port (%(port_id)s, %(ip)s) having IP " + "allocation on subnet " + "%(subnet)s, cannot delete", {'ip': alloc.ip_address, 'port_id': alloc.port_id, 'subnet': id}) diff --git a/neutron/db/dns_db.py b/neutron/db/dns_db.py index 652284500ff..f3f2a3a9998 100644 --- a/neutron/db/dns_db.py +++ b/neutron/db/dns_db.py @@ -18,7 +18,7 @@ from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_log import log as logging -from neutron._i18n import _, _LE +from neutron._i18n import _ from neutron.common import utils from neutron.db import _resource_extend as resource_extend from neutron.extensions import dns @@ -59,8 +59,8 @@ class DNSDbMixin(object): cfg.CONF.external_dns_driver) return self._dns_driver except ImportError: - LOG.exception(_LE("ImportError exception occurred while loading " - "the external DNS service driver")) + LOG.exception("ImportError exception occurred while loading " + "the external DNS service driver") raise dns.ExternalDNSDriverNotFound( driver=cfg.CONF.external_dns_driver) @@ -210,10 +210,10 @@ class DNSDbMixin(object): self.dns_driver.delete_record_set(context, dns_domain, dns_name, records) except (dns.DNSDomainNotFound, dns.DuplicateRecordSet) as e: - LOG.exception(_LE("Error deleting Floating IP data from external " - "DNS service. Name: '%(name)s'. Domain: " - "'%(domain)s'. IP addresses '%(ips)s'. DNS " - "service driver message '%(message)s'"), + LOG.exception("Error deleting Floating IP data from external " + "DNS service. Name: '%(name)s'. Domain: " + "'%(domain)s'. IP addresses '%(ips)s'. DNS " + "service driver message '%(message)s'", {"name": dns_name, "domain": dns_domain, "message": e.msg, @@ -241,10 +241,10 @@ class DNSDbMixin(object): self.dns_driver.create_record_set(context, dns_domain, dns_name, records) except (dns.DNSDomainNotFound, dns.DuplicateRecordSet) as e: - LOG.exception(_LE("Error publishing floating IP data in external " - "DNS service. Name: '%(name)s'. Domain: " - "'%(domain)s'. DNS service driver message " - "'%(message)s'"), + LOG.exception("Error publishing floating IP data in external " + "DNS service. Name: '%(name)s'. Domain: " + "'%(domain)s'. DNS service driver message " + "'%(message)s'", {"name": dns_name, "domain": dns_domain, "message": e.msg}) diff --git a/neutron/db/dvr_mac_db.py b/neutron/db/dvr_mac_db.py index 17885ede207..5184960c8f5 100644 --- a/neutron/db/dvr_mac_db.py +++ b/neutron/db/dvr_mac_db.py @@ -27,7 +27,7 @@ from oslo_log import helpers as log_helpers from oslo_log import log as logging from sqlalchemy import or_ -from neutron._i18n import _, _LE +from neutron._i18n import _ from neutron.common import utils from neutron.db import api as db_api from neutron.db import models_v2 @@ -120,7 +120,7 @@ class DVRDbMixin(ext_dvr.DVRMacAddressPluginBase): try: return self._create_dvr_mac_address_retry(context, host, base_mac) except exceptions.NeutronDbObjectDuplicateEntry: - LOG.error(_LE("MAC generation error after %s attempts"), + LOG.error("MAC generation error after %s attempts", db_api.MAX_RETRIES) raise ext_dvr.MacAddressGenerationFailure(host=host) @@ -200,8 +200,8 @@ class DVRDbMixin(ext_dvr.DVRMacAddressPluginBase): internal_gateway_ports = self.plugin.get_ports( context, filters=filter) if not internal_gateway_ports: - LOG.error(_LE("Could not retrieve gateway port " - "for subnet %s"), subnet_info) + LOG.error("Could not retrieve gateway port " + "for subnet %s", subnet_info) return {} internal_port = internal_gateway_ports[0] subnet_info['gateway_mac'] = internal_port['mac_address'] diff --git a/neutron/db/ipam_backend_mixin.py b/neutron/db/ipam_backend_mixin.py index d97fe200003..e7009da7cb3 100644 --- a/neutron/db/ipam_backend_mixin.py +++ b/neutron/db/ipam_backend_mixin.py @@ -28,7 +28,7 @@ from oslo_log import log as logging from sqlalchemy import and_, or_ from sqlalchemy.orm import exc as orm_exc -from neutron._i18n import _, _LI +from neutron._i18n import _ from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.common import ipv6_utils @@ -72,8 +72,8 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon): ip_range_pools.append(netaddr.IPRange(ip_pool['start'], ip_pool['end'])) except netaddr.AddrFormatError: - LOG.info(_LI("Found invalid IP address in pool: " - "%(start)s - %(end)s:"), + LOG.info("Found invalid IP address in pool: " + "%(start)s - %(end)s:", {'start': ip_pool['start'], 'end': ip_pool['end']}) raise n_exc.InvalidAllocationPool(pool=ip_pool) @@ -241,14 +241,14 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon): if ((netaddr.IPSet([subnet.cidr]) & new_subnet_ipset) and subnet.cidr != constants.PROVISIONAL_IPV6_PD_PREFIX): # don't give out details of the overlapping subnet - err_msg = (_("Requested subnet with cidr: %(cidr)s for " - "network: %(network_id)s overlaps with another " - "subnet") % + err_msg = ("Requested subnet with cidr: %(cidr)s for " + "network: %(network_id)s overlaps with another " + "subnet" % {'cidr': new_subnet_cidr, 'network_id': network.id}) - LOG.info(_LI("Validation for CIDR: %(new_cidr)s failed - " - "overlaps with subnet %(subnet_id)s " - "(CIDR: %(cidr)s)"), + LOG.info("Validation for CIDR: %(new_cidr)s failed - " + "overlaps with subnet %(subnet_id)s " + "(CIDR: %(cidr)s)", {'new_cidr': new_subnet_cidr, 'subnet_id': subnet.id, 'cidr': subnet.cidr}) @@ -284,12 +284,12 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon): end_ip = netaddr.IPAddress(ip_pool.last, ip_pool.version) if (start_ip.version != subnet.version or end_ip.version != subnet.version): - LOG.info(_LI("Specified IP addresses do not match " - "the subnet IP version")) + LOG.info("Specified IP addresses do not match " + "the subnet IP version") raise n_exc.InvalidAllocationPool(pool=ip_pool) if start_ip < subnet_first_ip or end_ip > subnet_last_ip: - LOG.info(_LI("Found pool larger than subnet " - "CIDR:%(start)s - %(end)s"), + LOG.info("Found pool larger than subnet " + "CIDR:%(start)s - %(end)s", {'start': start_ip, 'end': end_ip}) raise n_exc.OutOfBoundsAllocationPool( pool=ip_pool, @@ -309,8 +309,8 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon): if ip_sets[l_cursor] & ip_sets[r_cursor]: l_range = ip_ranges[l_cursor] r_range = ip_ranges[r_cursor] - LOG.info(_LI("Found overlapping ranges: %(l_range)s and " - "%(r_range)s"), + LOG.info("Found overlapping ranges: %(l_range)s and " + "%(r_range)s", {'l_range': l_range, 'r_range': r_range}) raise n_exc.OverlappingAllocationPools( pool_1=l_range, diff --git a/neutron/db/ipam_pluggable_backend.py b/neutron/db/ipam_pluggable_backend.py index dc2c7050071..b71208b9fb0 100644 --- a/neutron/db/ipam_pluggable_backend.py +++ b/neutron/db/ipam_pluggable_backend.py @@ -24,7 +24,6 @@ from oslo_log import log as logging from oslo_utils import excutils from sqlalchemy import and_ -from neutron._i18n import _LE, _LW from neutron.common import constants as n_const from neutron.common import ipv6_utils from neutron.db import api as db_api @@ -55,7 +54,7 @@ class IpamPluggableBackend(ipam_backend_mixin.IpamBackendMixin): try: func(*args, **kwargs) except Exception as e: - LOG.warning(_LW("Revert failed with: %s"), e) + LOG.warning("Revert failed with: %s", e) def _ipam_deallocate_ips(self, context, ipam_driver, port, ips, revert_on_fail=True): @@ -92,8 +91,8 @@ class IpamPluggableBackend(ipam_backend_mixin.IpamBackendMixin): elif not revert_on_fail and ips: addresses = ', '.join(self._get_failed_ips(ips, deallocated)) - LOG.error(_LE("IP deallocation failed on " - "external system for %s"), addresses) + LOG.error("IP deallocation failed on " + "external system for %s", addresses) return deallocated def _ipam_allocate_ips(self, context, ipam_driver, port, ips, @@ -146,8 +145,8 @@ class IpamPluggableBackend(ipam_backend_mixin.IpamBackendMixin): elif not revert_on_fail and ips: addresses = ', '.join(self._get_failed_ips(ips, allocated)) - LOG.error(_LE("IP allocation failed on " - "external system for %s"), addresses) + LOG.error("IP allocation failed on " + "external system for %s", addresses) return allocated diff --git a/neutron/db/l3_agentschedulers_db.py b/neutron/db/l3_agentschedulers_db.py index e3b6167a5cf..e7eaae83a17 100644 --- a/neutron/db/l3_agentschedulers_db.py +++ b/neutron/db/l3_agentschedulers_db.py @@ -22,7 +22,7 @@ from oslo_log import log as logging import oslo_messaging from sqlalchemy import or_ -from neutron._i18n import _, _LI +from neutron._i18n import _ from neutron.agent.common import utils as agent_utils from neutron.common import constants as l_consts from neutron.common import utils as n_utils @@ -66,8 +66,8 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase, def add_periodic_l3_agent_status_check(self): if not cfg.CONF.allow_automatic_l3agent_failover: - LOG.info(_LI("Skipping period L3 agent status check because " - "automatic router rescheduling is disabled.")) + LOG.info("Skipping period L3 agent status check because " + "automatic router rescheduling is disabled.") return self.add_agent_status_check_worker( @@ -322,8 +322,8 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase, agent = self._get_agent_by_type_and_host( context, constants.AGENT_TYPE_L3, host) if not agentschedulers_db.services_available(agent.admin_state_up): - LOG.info(_LI("Agent has its services disabled. Returning " - "no active routers. Agent: %s"), agent) + LOG.info("Agent has its services disabled. Returning " + "no active routers. Agent: %s", agent) return [] scheduled_router_ids = self._get_router_ids_for_agent( context, agent, router_ids) diff --git a/neutron/db/l3_db.py b/neutron/db/l3_db.py index ef3606776ad..82843692e48 100644 --- a/neutron/db/l3_db.py +++ b/neutron/db/l3_db.py @@ -35,7 +35,7 @@ import six from sqlalchemy import orm from sqlalchemy.orm import exc -from neutron._i18n import _, _LE, _LI, _LW +from neutron._i18n import _ from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api from neutron.common import constants as n_const from neutron.common import ipv6_utils @@ -145,21 +145,21 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase, try: self._fix_or_kill_floating_port(context, port_id) except Exception: - LOG.exception(_LE("Error cleaning up floating IP port: %s"), + LOG.exception("Error cleaning up floating IP port: %s", port_id) def _fix_or_kill_floating_port(self, context, port_id): fip = (context.session.query(l3_models.FloatingIP). filter_by(floating_port_id=port_id).first()) if fip: - LOG.warning(_LW("Found incorrect device_id on floating port " - "%(pid)s, correcting to %(fip)s."), + LOG.warning("Found incorrect device_id on floating port " + "%(pid)s, correcting to %(fip)s.", {'pid': port_id, 'fip': fip.id}) self._core_plugin.update_port( context, port_id, {'port': {'device_id': fip.id}}) else: - LOG.warning(_LW("Found floating IP port %s without floating IP, " - "deleting."), port_id) + LOG.warning("Found floating IP port %s without floating IP, " + "deleting.", port_id) self._core_plugin.delete_port( context, port_id, l3_port_check=False) @@ -1616,8 +1616,8 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase, if not fixed_ips: # Skip ports without IPs, which can occur if a subnet # attached to a router is deleted - LOG.info(_LI("Skipping port %s as no IP is configure on " - "it"), + LOG.info("Skipping port %s as no IP is configure on " + "it", port['id']) continue yield port diff --git a/neutron/db/l3_dvr_db.py b/neutron/db/l3_dvr_db.py index 26e631764cb..c80895408c8 100644 --- a/neutron/db/l3_dvr_db.py +++ b/neutron/db/l3_dvr_db.py @@ -29,7 +29,7 @@ from oslo_log import log as logging from oslo_utils import excutils import six -from neutron._i18n import _, _LE, _LI, _LW +from neutron._i18n import _ from neutron.common import constants as l3_const from neutron.common import utils as n_utils from neutron.db import api as db_api @@ -227,8 +227,8 @@ class DVRResourceOperationHandler(object): port_type=const.DEVICE_OWNER_DVR_INTERFACE ) ) - LOG.info(_LI('SNAT interface port list does not exist,' - ' so create one: %s'), port_list) + LOG.info('SNAT interface port list does not exist,' + ' so create one: %s', port_list) for intf in int_ports: if intf.fixed_ips: # Passing the subnet for the port to make sure the IP's @@ -431,8 +431,8 @@ class DVRResourceOperationHandler(object): try: revert() except Exception: - LOG.exception(_LE("Failed to revert change " - "to router port %s."), + LOG.exception("Failed to revert change " + "to router port %s.", port['id']) LOG.debug("CSNAT port updated for IPv6 subnet: %s", updated_port) @@ -544,7 +544,7 @@ class DVRResourceOperationHandler(object): for p in c_snat_ports: if subnet_id is None or not p['fixed_ips']: if not p['fixed_ips']: - LOG.info(_LI("CSNAT port has no IPs: %s"), p) + LOG.info("CSNAT port has no IPs: %s", p) self.l3plugin._core_plugin.delete_port(context, p['id'], l3_port_check=False) @@ -847,8 +847,8 @@ class _DVRAgentInterfaceMixin(object): f_port = self._get_agent_gw_ports_exist_for_network( context, network_id, host, l3_agent_db['id']) if not f_port: - LOG.info(_LI('Agent Gateway port does not exist,' - ' so create one: %s'), f_port) + LOG.info('Agent Gateway port does not exist,' + ' so create one: %s', f_port) port_data = {'tenant_id': '', 'network_id': network_id, 'device_id': l3_agent_db['id'], @@ -1010,8 +1010,8 @@ class L3_NAT_with_dvr_db_mixin(_DVRAgentInterfaceMixin, # using admin context as router may belong to admin tenant router = self._get_router(context.elevated(), router_id) except l3.RouterNotFound: - LOG.warning(_LW("Router %s was not found. " - "Skipping agent notification."), + LOG.warning("Router %s was not found. " + "Skipping agent notification.", router_id) return diff --git a/neutron/db/l3_hamode_db.py b/neutron/db/l3_hamode_db.py index d9b134ed15b..983bb12987d 100644 --- a/neutron/db/l3_hamode_db.py +++ b/neutron/db/l3_hamode_db.py @@ -35,7 +35,7 @@ import sqlalchemy as sa from sqlalchemy import exc as sql_exc from sqlalchemy import orm -from neutron._i18n import _, _LE, _LI, _LW +from neutron._i18n import _ from neutron.common import constants as n_const from neutron.common import utils as n_utils from neutron.db import _utils as db_utils @@ -166,8 +166,8 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin, return allocation.vr_id except db_exc.DBDuplicateEntry: - LOG.info(_LI("Attempt %(count)s to allocate a VRID in the " - "network %(network)s for the router %(router)s"), + LOG.info("Attempt %(count)s to allocate a VRID in the " + "network %(network)s for the router %(router)s", {'count': count, 'network': network_id, 'router': router_id}) @@ -255,9 +255,9 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin, max_agents = cfg.CONF.max_l3_agents_per_router if max_agents: if max_agents > num_agents: - LOG.info(_LI("Number of active agents lower than " - "max_l3_agents_per_router. L3 agents " - "available: %s"), num_agents) + LOG.info("Number of active agents lower than " + "max_l3_agents_per_router. L3 agents " + "available: %s", num_agents) else: num_agents = max_agents @@ -414,9 +414,9 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin, with excutils.save_and_reraise_exception() as ctx: if isinstance(e, l3_ha.NoVRIDAvailable): ctx.reraise = False - LOG.warning(_LW("No more VRIDs for router: %s"), e) + LOG.warning("No more VRIDs for router: %s", e) else: - LOG.exception(_LE("Failed to schedule HA router %s."), + LOG.exception("Failed to schedule HA router %s.", router_id) router['status'] = self._update_router_db( context, router_id, @@ -502,15 +502,15 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin, LOG.debug( "HA network for tenant %s was already deleted.", tenant_id) except sa.exc.InvalidRequestError: - LOG.info(_LI("HA network %s can not be deleted."), net_id) + LOG.info("HA network %s can not be deleted.", net_id) except n_exc.NetworkInUse: # network is still in use, this is normal so we don't # log anything pass else: - LOG.info(_LI("HA network %(network)s was deleted as " - "no HA routers are present in tenant " - "%(tenant)s."), + LOG.info("HA network %(network)s was deleted as " + "no HA routers are present in tenant " + "%(tenant)s.", {'network': net_id, 'tenant': tenant_id}) @registry.receives(resources.ROUTER, [events.PRECOMMIT_DELETE]) @@ -622,8 +622,8 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin, port = binding.port if not port: # Filter the HA router has no ha port here - LOG.info(_LI("HA router %s is missing HA router port " - "bindings. Skipping it."), + LOG.info("HA router %s is missing HA router port " + "bindings. Skipping it.", binding.router_id) routers_dict.pop(binding.router_id) continue diff --git a/neutron/db/metering/metering_rpc.py b/neutron/db/metering/metering_rpc.py index 68d7982fe23..c925e672ab6 100644 --- a/neutron/db/metering/metering_rpc.py +++ b/neutron/db/metering/metering_rpc.py @@ -18,7 +18,6 @@ from neutron_lib.plugins import directory from oslo_log import log as logging import oslo_messaging -from neutron._i18n import _LE from neutron.common import utils LOG = logging.getLogger(__name__) @@ -44,7 +43,7 @@ class MeteringRpcCallbacks(object): else: agents = l3_plugin.get_l3_agents(context, filters={'host': [host]}) if not agents: - LOG.error(_LE('Unable to find agent %s.'), host) + LOG.error('Unable to find agent %s.', host) return routers = l3_plugin.list_routers_on_l3_agent(context, agents[0].id) diff --git a/neutron/db/provisioning_blocks.py b/neutron/db/provisioning_blocks.py index b246ef89486..3b939749d83 100644 --- a/neutron/db/provisioning_blocks.py +++ b/neutron/db/provisioning_blocks.py @@ -17,7 +17,6 @@ from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from oslo_log import log as logging -from neutron._i18n import _LE from neutron.db import api as db_api from neutron.db import models_v2 from neutron.objects import provisioning_blocks as pb_obj @@ -122,7 +121,7 @@ def provisioning_complete(context, object_id, object_type, entity): # this can't be called in a transaction to avoid REPEATABLE READ # tricking us into thinking there are remaining provisioning components if context.session.is_active: - raise RuntimeError(_LE("Must not be called in a transaction")) + raise RuntimeError("Must not be called in a transaction") standard_attr_id = _get_standard_attr_id(context, object_id, object_type) if not standard_attr_id: @@ -162,10 +161,10 @@ def is_object_blocked(context, object_id, object_type): def _get_standard_attr_id(context, object_id, object_type): model = _RESOURCE_TO_MODEL_MAP.get(object_type) if not model: - raise RuntimeError(_LE("Could not find model for %s. If you are " - "adding provisioning blocks for a new resource " - "you must call add_model_for_resource during " - "initialization for your type.") % object_type) + raise RuntimeError("Could not find model for %s. If you are " + "adding provisioning blocks for a new resource " + "you must call add_model_for_resource during " + "initialization for your type." % object_type) obj = (context.session.query(model).enable_eagerloads(False). filter_by(id=object_id).first()) if not obj: diff --git a/neutron/db/segments_db.py b/neutron/db/segments_db.py index 706724f6e7a..405869fc516 100644 --- a/neutron/db/segments_db.py +++ b/neutron/db/segments_db.py @@ -16,7 +16,6 @@ from neutron_lib.callbacks import resources from oslo_log import log as logging from oslo_utils import uuidutils -from neutron._i18n import _LI from neutron.db import api as db_api from neutron.db.models import segment as segments_model from neutron.objects import base as base_obj @@ -55,8 +54,8 @@ def add_network_segment(context, network_id, segment, segment_index=0, context=context, segment=netseg_obj) segment['id'] = netseg_obj.id - LOG.info(_LI("Added segment %(id)s of type %(network_type)s for network " - "%(network_id)s"), + LOG.info("Added segment %(id)s of type %(network_type)s for network " + "%(network_id)s", {'id': netseg_obj.id, 'network_type': netseg_obj.network_type, 'network_id': netseg_obj.network_id}) diff --git a/neutron/db/standard_attr.py b/neutron/db/standard_attr.py index 7e20d7759a8..ad89a6474bf 100644 --- a/neutron/db/standard_attr.py +++ b/neutron/db/standard_attr.py @@ -21,7 +21,7 @@ from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.ext import declarative from sqlalchemy.orm import session as se -from neutron._i18n import _, _LE +from neutron._i18n import _ from neutron.db import sqlalchemytypes @@ -178,9 +178,9 @@ def get_standard_attr_resource_model_map(): for subclass in HasStandardAttributes.__subclasses__(): for resource in subclass.get_api_collections(): if resource in rs_map: - raise RuntimeError(_LE("Model %(sub)s tried to register for " - "API resource %(res)s which conflicts " - "with model %(other)s.") % + raise RuntimeError("Model %(sub)s tried to register for " + "API resource %(res)s which conflicts " + "with model %(other)s." % dict(sub=subclass, other=rs_map[resource], res=resource)) rs_map[resource] = subclass @@ -206,8 +206,8 @@ def get_tag_resource_parent_map(): @event.listens_for(se.Session, 'after_bulk_delete') def throw_exception_on_bulk_delete_of_listened_for_objects(delete_context): if hasattr(delete_context.mapper.class_, 'revises_on_change'): - raise RuntimeError(_LE("%s may not be deleted in bulk because it " - "bumps the revision of other resources via " - "SQLAlchemy event handlers, which are not " - "compatible with bulk deletes.") % + raise RuntimeError("%s may not be deleted in bulk because it " + "bumps the revision of other resources via " + "SQLAlchemy event handlers, which are not " + "compatible with bulk deletes." % delete_context.mapper.class_) diff --git a/neutron/debug/commands.py b/neutron/debug/commands.py index 7caa353bf2d..6aa350e3f46 100644 --- a/neutron/debug/commands.py +++ b/neutron/debug/commands.py @@ -18,7 +18,7 @@ from neutronclient.common import utils from neutronclient.neutron import v2_0 as client from neutronclient.neutron.v2_0 import port -from neutron._i18n import _, _LI +from neutron._i18n import _ class ProbeCommand(client.NeutronCommand): @@ -84,7 +84,7 @@ class ClearProbe(ProbeCommand): def take_action(self, parsed_args): debug_agent = self.get_debug_agent() cleared_probes_count = debug_agent.clear_probes() - self.log.info(_LI('%d probe(s) deleted'), cleared_probes_count) + self.log.info('%d probe(s) deleted', cleared_probes_count) class ExecProbe(ProbeCommand): diff --git a/neutron/debug/debug_agent.py b/neutron/debug/debug_agent.py index 07f7b182d4d..d4fcaa94cac 100644 --- a/neutron/debug/debug_agent.py +++ b/neutron/debug/debug_agent.py @@ -21,7 +21,6 @@ from neutron_lib.api.definitions import portbindings from neutron_lib import constants from oslo_log import log as logging -from neutron._i18n import _LW from neutron.agent.linux import dhcp from neutron.agent.linux import ip_lib @@ -108,7 +107,7 @@ class NeutronDebugAgent(object): try: ip.netns.delete(namespace) except Exception: - LOG.warning(_LW('Failed to delete namespace %s'), namespace) + LOG.warning('Failed to delete namespace %s', namespace) else: self.driver.unplug(self.driver.get_device_name(port), bridge=bridge) diff --git a/neutron/debug/shell.py b/neutron/debug/shell.py index d60534c994d..fafa93e0dd8 100644 --- a/neutron/debug/shell.py +++ b/neutron/debug/shell.py @@ -19,7 +19,6 @@ from oslo_config import cfg from oslo_utils import importutils from neutron._i18n import _ -from neutron._i18n import _LW from neutron.agent.common import utils from neutron.agent.linux import interface from neutron.conf.agent import common as config @@ -81,9 +80,9 @@ class NeutronDebugShell(shell.NeutronShell): self.debug_agent = debug_agent.NeutronDebugAgent(cfg.CONF, client, driver) - self.log.warning(_LW('This tool is deprecated and will be removed ' - 'in the future to be replaced with a more ' - 'powerful troubleshooting toolkit.')) + self.log.warning('This tool is deprecated and will be removed ' + 'in the future to be replaced with a more ' + 'powerful troubleshooting toolkit.') def main(argv=None): diff --git a/neutron/extensions/l3agentscheduler.py b/neutron/extensions/l3agentscheduler.py index db5b98048c3..ef2d9810bba 100644 --- a/neutron/extensions/l3agentscheduler.py +++ b/neutron/extensions/l3agentscheduler.py @@ -24,7 +24,7 @@ from oslo_log import log as logging import six import webob.exc -from neutron._i18n import _, _LE +from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import base from neutron.api.v2 import resource @@ -47,8 +47,8 @@ class RouterSchedulerController(wsgi.Controller): def get_plugin(self): plugin = directory.get_plugin(plugin_constants.L3) if not plugin: - LOG.error(_LE('No plugin for L3 routing registered to handle ' - 'router scheduling')) + LOG.error('No plugin for L3 routing registered to handle ' + 'router scheduling') msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) return plugin @@ -89,8 +89,8 @@ class L3AgentsHostingRouterController(wsgi.Controller): def get_plugin(self): plugin = directory.get_plugin(plugin_constants.L3) if not plugin: - LOG.error(_LE('No plugin for L3 routing registered to handle ' - 'router scheduling')) + LOG.error('No plugin for L3 routing registered to handle ' + 'router scheduling') msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) return plugin diff --git a/neutron/extensions/vlantransparent.py b/neutron/extensions/vlantransparent.py index 36938935fa5..56136064b28 100644 --- a/neutron/extensions/vlantransparent.py +++ b/neutron/extensions/vlantransparent.py @@ -20,7 +20,7 @@ from neutron_lib import exceptions from oslo_config import cfg from oslo_log import log as logging -from neutron._i18n import _, _LI +from neutron._i18n import _ LOG = logging.getLogger(__name__) @@ -45,7 +45,7 @@ def disable_extension_by_config(aliases): if not cfg.CONF.vlan_transparent: if 'vlan-transparent' in aliases: aliases.remove('vlan-transparent') - LOG.info(_LI('Disabled vlantransparent extension.')) + LOG.info('Disabled vlantransparent extension.') def get_vlan_transparent(network): diff --git a/neutron/ipam/drivers/neutrondb_ipam/driver.py b/neutron/ipam/drivers/neutrondb_ipam/driver.py index c969c3c305e..02fdb2ab570 100644 --- a/neutron/ipam/drivers/neutrondb_ipam/driver.py +++ b/neutron/ipam/drivers/neutrondb_ipam/driver.py @@ -23,7 +23,7 @@ from oslo_db import exception as db_exc from oslo_log import log from oslo_utils import uuidutils -from neutron._i18n import _, _LE +from neutron._i18n import _ from neutron.ipam import driver as ipam_base from neutron.ipam.drivers.neutrondb_ipam import db_api as ipam_db_api from neutron.ipam import exceptions as ipam_exc @@ -90,9 +90,8 @@ class NeutronDbSubnet(ipam_base.Subnet): ipam_subnet = ipam_db_api.IpamSubnetManager.load_by_neutron_subnet_id( ctx, neutron_subnet_id) if not ipam_subnet: - LOG.error(_LE("IPAM subnet referenced to " - "Neutron subnet %s does not exist"), - neutron_subnet_id) + LOG.error("IPAM subnet referenced to " + "Neutron subnet %s does not exist", neutron_subnet_id) raise n_exc.SubnetNotFound(subnet_id=neutron_subnet_id) pools = [] for pool in ipam_subnet.allocation_pools: @@ -316,9 +315,8 @@ class NeutronDbPool(subnet_alloc.SubnetAllocator): count = ipam_db_api.IpamSubnetManager.delete(self._context, subnet_id) if count < 1: - LOG.error(_LE("IPAM subnet referenced to " - "Neutron subnet %s does not exist"), - subnet_id) + LOG.error("IPAM subnet referenced to " + "Neutron subnet %s does not exist", subnet_id) raise n_exc.SubnetNotFound(subnet_id=subnet_id) def needs_rollback(self): diff --git a/neutron/manager.py b/neutron/manager.py index 2ab5fc8330f..a0217d14340 100644 --- a/neutron/manager.py +++ b/neutron/manager.py @@ -25,7 +25,7 @@ from oslo_utils import excutils from osprofiler import profiler import six -from neutron._i18n import _, _LE, _LI +from neutron._i18n import _ from neutron.common import utils from neutron.plugins.common import constants @@ -126,7 +126,7 @@ class NeutronManager(object): # intentionally to allow v2 plugins to be monitored # for performance metrics. plugin_provider = cfg.CONF.core_plugin - LOG.info(_LI("Loading core plugin: %s"), plugin_provider) + LOG.info("Loading core plugin: %s", plugin_provider) # NOTE(armax): keep hold of the actual plugin object plugin = self._get_plugin_instance(CORE_PLUGINS_NAMESPACE, plugin_provider) @@ -159,7 +159,7 @@ class NeutronManager(object): plugin_provider) except ImportError: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Plugin '%s' not found."), plugin_provider) + LOG.error("Plugin '%s' not found.", plugin_provider) def _get_plugin_instance(self, namespace, plugin_provider): plugin_class = self.load_class_for_provider(namespace, plugin_provider) @@ -174,7 +174,7 @@ class NeutronManager(object): if ext_alias in constants.EXT_TO_SERVICE_MAPPING: service_type = constants.EXT_TO_SERVICE_MAPPING[ext_alias] directory.add_plugin(service_type, plugin) - LOG.info(_LI("Service %s is supported by the core plugin"), + LOG.info("Service %s is supported by the core plugin", service_type) def _get_default_service_plugins(self): @@ -194,7 +194,7 @@ class NeutronManager(object): if provider == '': continue - LOG.info(_LI("Loading Plugin: %s"), provider) + LOG.info("Loading Plugin: %s", provider) plugin_inst = self._get_plugin_instance('neutron.service_plugins', provider) diff --git a/neutron/notifiers/nova.py b/neutron/notifiers/nova.py index 196bd316c92..43245f89f19 100644 --- a/neutron/notifiers/nova.py +++ b/neutron/notifiers/nova.py @@ -29,7 +29,6 @@ from oslo_log import log as logging from oslo_utils import uuidutils from sqlalchemy.orm import attributes as sql_attr -from neutron._i18n import _LE, _LI, _LW from neutron.notifiers import batch_notifier @@ -162,8 +161,8 @@ class Notifier(object): def _can_notify(self, port): if not port.id: - LOG.warning(_LW("Port ID not set! Nova will not be notified of " - "port status change.")) + LOG.warning("Port ID not set! Nova will not be notified of " + "port status change.") return False # If there is no device_id set there is nothing we can do here. @@ -248,11 +247,11 @@ class Notifier(object): LOG.debug("Nova returned NotFound for event: %s", batched_events) except Exception: - LOG.exception(_LE("Failed to notify nova on events: %s"), + LOG.exception("Failed to notify nova on events: %s", batched_events) else: if not isinstance(response, list): - LOG.error(_LE("Error response returned from nova: %s"), + LOG.error("Error response returned from nova: %s", response) return response_error = False @@ -263,10 +262,10 @@ class Notifier(object): response_error = True continue if code != 200: - LOG.warning(_LW("Nova event: %s returned with failed " - "status"), event) + LOG.warning("Nova event: %s returned with failed " + "status", event) else: - LOG.info(_LI("Nova event response: %s"), event) + LOG.info("Nova event response: %s", event) if response_error: - LOG.error(_LE("Error response returned from nova: %s"), + LOG.error("Error response returned from nova: %s", response) diff --git a/neutron/pecan_wsgi/controllers/resource.py b/neutron/pecan_wsgi/controllers/resource.py index 67204a450a0..87379272293 100644 --- a/neutron/pecan_wsgi/controllers/resource.py +++ b/neutron/pecan_wsgi/controllers/resource.py @@ -16,7 +16,6 @@ from oslo_log import log as logging import pecan from pecan import request -from neutron._i18n import _LW from neutron import manager from neutron.pecan_wsgi.controllers import utils @@ -85,8 +84,8 @@ class ItemController(utils.NeutronPecanController): collection_path) if not controller: if collection not in self._member_actions: - LOG.warning(_LW("No controller found for: %s - returning" - "response code 404"), collection) + LOG.warning("No controller found for: %s - returning" + "response code 404", collection) pecan.abort(404) # collection is a member action, so we create a new controller # for it. diff --git a/neutron/pecan_wsgi/controllers/root.py b/neutron/pecan_wsgi/controllers/root.py index e0e0dce37ce..670447444e2 100644 --- a/neutron/pecan_wsgi/controllers/root.py +++ b/neutron/pecan_wsgi/controllers/root.py @@ -20,7 +20,6 @@ import pecan from pecan import request import six.moves.urllib.parse as urlparse -from neutron._i18n import _LW from neutron.api.v2 import attributes from neutron.api.views import versions as versions_view from neutron import manager @@ -120,8 +119,8 @@ class V2Controller(object): controller = manager.NeutronManager.get_controller_for_resource( collection) if not controller: - LOG.warning(_LW("No controller found for: %s - returning response " - "code 404"), collection) + LOG.warning("No controller found for: %s - returning response " + "code 404", collection) pecan.abort(404) # Store resource and collection names in pecan request context so that # hooks can leverage them if necessary. The following code uses diff --git a/neutron/pecan_wsgi/hooks/translation.py b/neutron/pecan_wsgi/hooks/translation.py index bda1f07a96e..0379f19ad7e 100644 --- a/neutron/pecan_wsgi/hooks/translation.py +++ b/neutron/pecan_wsgi/hooks/translation.py @@ -17,7 +17,6 @@ import oslo_i18n from oslo_log import log as logging from pecan import hooks -from neutron._i18n import _LE, _LI from neutron.api import api_common from neutron.api.v2 import base as v2base @@ -34,8 +33,8 @@ class ExceptionTranslationHook(hooks.PecanHook): exc = api_common.convert_exception_to_http_exc(e, v2base.FAULT_MAP, language) if hasattr(exc, 'code') and 400 <= exc.code < 500: - LOG.info(_LI('%(action)s failed (client error): %(exc)s'), + LOG.info('%(action)s failed (client error): %(exc)s', {'action': state.request.method, 'exc': exc}) else: - LOG.exception(_LE('%s failed.'), state.request.method) + LOG.exception('%s failed.', state.request.method) return exc diff --git a/neutron/plugins/common/utils.py b/neutron/plugins/common/utils.py index 638dcb3fe45..4eb6f93a252 100644 --- a/neutron/plugins/common/utils.py +++ b/neutron/plugins/common/utils.py @@ -32,7 +32,7 @@ from oslo_utils import encodeutils from oslo_utils import excutils import webob.exc -from neutron._i18n import _, _LE, _LI +from neutron._i18n import _ from neutron.api.v2 import attributes from neutron.common import exceptions as n_exc from neutron.plugins.common import constants as p_const @@ -201,7 +201,7 @@ def delete_port_on_error(core_plugin, context, port_id): except exceptions.PortNotFound: LOG.debug("Port %s not found", port_id) except Exception: - LOG.exception(_LE("Failed to delete port: %s"), port_id) + LOG.exception("Failed to delete port: %s", port_id) @contextlib.contextmanager @@ -214,7 +214,7 @@ def update_port_on_error(core_plugin, context, port_id, revert_value): core_plugin.update_port(context, port_id, {'port': revert_value}) except Exception: - LOG.exception(_LE("Failed to update port: %s"), port_id) + LOG.exception("Failed to update port: %s", port_id) def get_interface_name(name, prefix='', max_len=n_const.DEVICE_NAME_MAX_LEN): @@ -240,9 +240,9 @@ def get_interface_name(name, prefix='', max_len=n_const.DEVICE_NAME_MAX_LEN): new_name = ('%(prefix)s%(truncated)s%(hash)s' % {'prefix': prefix, 'truncated': name[0:namelen], 'hash': hashed_name.hexdigest()[0:INTERFACE_HASH_LEN]}) - LOG.info(_LI("The requested interface name %(requested_name)s exceeds the " - "%(limit)d character limitation. It was shortened to " - "%(new_name)s to fit."), + LOG.info("The requested interface name %(requested_name)s exceeds the " + "%(limit)d character limitation. It was shortened to " + "%(new_name)s to fit.", {'requested_name': requested_name, 'limit': max_len, 'new_name': new_name}) return new_name diff --git a/neutron/plugins/ml2/db.py b/neutron/plugins/ml2/db.py index ef676d26dc3..c89c79c6b54 100644 --- a/neutron/plugins/ml2/db.py +++ b/neutron/plugins/ml2/db.py @@ -27,7 +27,7 @@ import six from sqlalchemy import or_ from sqlalchemy.orm import exc -from neutron._i18n import _, _LE +from neutron._i18n import _ from neutron.db import api as db_api from neutron.db.models import securitygroup as sg_models from neutron.db import models_v2 @@ -166,7 +166,7 @@ def get_port(context, port_id): except exc.NoResultFound: return except exc.MultipleResultsFound: - LOG.error(_LE("Multiple ports have port_id starting with %s"), + LOG.error("Multiple ports have port_id starting with %s", port_id) return @@ -251,7 +251,7 @@ def get_port_binding_host(context, port_id): {'port_id': port_id}) return except exc.MultipleResultsFound: - LOG.error(_LE("Multiple ports have port_id starting with %s"), + LOG.error("Multiple ports have port_id starting with %s", port_id) return return query.host @@ -312,7 +312,7 @@ def partial_port_ids_to_full_ids(context, partial_ids): if len(matching) < 1: LOG.info("No ports have port_id starting with %s", partial_id) elif len(matching) > 1: - LOG.error(_LE("Multiple ports have port_id starting with %s"), + LOG.error("Multiple ports have port_id starting with %s", partial_id) return result diff --git a/neutron/plugins/ml2/driver_context.py b/neutron/plugins/ml2/driver_context.py index 822dc59af2f..34df36d092c 100644 --- a/neutron/plugins/ml2/driver_context.py +++ b/neutron/plugins/ml2/driver_context.py @@ -20,7 +20,6 @@ from oslo_log import log from oslo_serialization import jsonutils import sqlalchemy -from neutron._i18n import _LW from neutron.db import segments_db from neutron.plugins.ml2 import driver_api as api @@ -245,7 +244,7 @@ class PortContext(MechanismDriverContext, api.PortContext): segment = segments_db.get_segment_by_id(self._plugin_context, segment_id) if not segment: - LOG.warning(_LW("Could not expand segment %s"), segment_id) + LOG.warning("Could not expand segment %s", segment_id) return segment @property diff --git a/neutron/plugins/ml2/drivers/agent/_common_agent.py b/neutron/plugins/ml2/drivers/agent/_common_agent.py index 329a3c74265..dade68564f1 100644 --- a/neutron/plugins/ml2/drivers/agent/_common_agent.py +++ b/neutron/plugins/ml2/drivers/agent/_common_agent.py @@ -31,7 +31,6 @@ from oslo_service import service from oslo_utils import excutils from osprofiler import profiler -from neutron._i18n import _LE, _LI from neutron.agent.l2 import l2_agent_extensions_manager as ext_manager from neutron.agent import rpc as agent_rpc from neutron.agent import securitygroups_rpc as agent_sg_rpc @@ -72,9 +71,9 @@ class CommonAgentLoop(service.Service): def _validate_manager_class(self): if not isinstance(self.mgr, amb.CommonAgentManagerBase): - LOG.error(_LE("Manager class must inherit from " - "CommonAgentManagerBase to ensure CommonAgent " - "works properly.")) + LOG.error("Manager class must inherit from " + "CommonAgentManagerBase to ensure CommonAgent " + "works properly.") sys.exit(1) def start(self): @@ -112,7 +111,7 @@ class CommonAgentLoop(service.Service): self.daemon_loop() def stop(self, graceful=True): - LOG.info(_LI("Stopping %s agent."), self.agent_type) + LOG.info("Stopping %s agent.", self.agent_type) if graceful and self.quitting_rpc_timeout: self.set_rpc_timeout(self.quitting_rpc_timeout) super(CommonAgentLoop, self).stop(graceful) @@ -128,22 +127,22 @@ class CommonAgentLoop(service.Service): self.agent_state, True) if agent_status == n_const.AGENT_REVIVED: - LOG.info(_LI('%s Agent has just been revived. ' - 'Doing a full sync.'), + LOG.info('%s Agent has just been revived. ' + 'Doing a full sync.', self.agent_type) self.fullsync = True # we only want to update resource versions on startup self.agent_state.pop('resource_versions', None) self.agent_state.pop('start_flag', None) except Exception: - LOG.exception(_LE("Failed reporting state!")) + LOG.exception("Failed reporting state!") def _validate_rpc_endpoints(self): if not isinstance(self.endpoints[0], amb.CommonAgentManagerRpcCallBackBase): - LOG.error(_LE("RPC Callback class must inherit from " - "CommonAgentManagerRpcCallBackBase to ensure " - "CommonAgent works properly.")) + LOG.error("RPC Callback class must inherit from " + "CommonAgentManagerRpcCallBackBase to ensure " + "CommonAgent works properly.") sys.exit(1) def setup_rpc(self): @@ -153,7 +152,7 @@ class CommonAgentLoop(service.Service): self.context, self.sg_plugin_rpc, defer_refresh_firewall=True) self.agent_id = self.mgr.get_agent_id() - LOG.info(_LI("RPC agent_id: %s"), self.agent_id) + LOG.info("RPC agent_id: %s", self.agent_id) self.topic = topics.AGENT self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS) @@ -217,7 +216,7 @@ class CommonAgentLoop(service.Service): devices_details_list = self.plugin_rpc.get_devices_details_list( self.context, devices, self.agent_id, host=cfg.CONF.host) except Exception: - LOG.exception(_LE("Unable to get port details for %s"), devices) + LOG.exception("Unable to get port details for %s", devices) # resync is needed return True @@ -234,7 +233,7 @@ class CommonAgentLoop(service.Service): LOG.debug("Port %s added", device) if 'port_id' in device_details: - LOG.info(_LI("Port %(device)s updated. Details: %(details)s"), + LOG.info("Port %(device)s updated. Details: %(details)s", {'device': device, 'details': device_details}) self.mgr.setup_arp_spoofing_protection(device, device_details) @@ -312,7 +311,7 @@ class CommonAgentLoop(service.Service): context=self.context, device_details=device_details) else: - LOG.info(_LI("Device %s not defined on plugin"), device) + LOG.info("Device %s not defined on plugin", device) @contextlib.contextmanager def _ignore_missing_device_exceptions(self, device): @@ -328,7 +327,7 @@ class CommonAgentLoop(service.Service): resync = False self.sg_agent.remove_devices_filter(devices) for device in devices: - LOG.info(_LI("Attachment %s removed"), device) + LOG.info("Attachment %s removed", device) details = None try: details = self.plugin_rpc.update_device_down(self.context, @@ -336,11 +335,11 @@ class CommonAgentLoop(service.Service): self.agent_id, cfg.CONF.host) except Exception: - LOG.exception(_LE("Error occurred while removing port %s"), + LOG.exception("Error occurred while removing port %s", device) resync = True if details and details['exists']: - LOG.info(_LI("Port %s updated."), device) + LOG.info("Port %s updated.", device) else: LOG.debug("Device %s not defined on plugin", device) port_id = self._clean_network_ports(device) @@ -429,7 +428,7 @@ class CommonAgentLoop(service.Service): or device_info.get('removed')) def daemon_loop(self): - LOG.info(_LI("%s Agent RPC Daemon Started!"), self.agent_type) + LOG.info("%s Agent RPC Daemon Started!", self.agent_type) device_info = None sync = True @@ -441,7 +440,7 @@ class CommonAgentLoop(service.Service): self.fullsync = False if sync: - LOG.info(_LI("%s Agent out of sync with plugin!"), + LOG.info("%s Agent out of sync with plugin!", self.agent_type) device_info = self.scan_devices(previous=device_info, sync=sync) @@ -453,7 +452,7 @@ class CommonAgentLoop(service.Service): try: sync = self.process_network_devices(device_info) except Exception: - LOG.exception(_LE("Error in agent loop. Devices info: %s"), + LOG.exception("Error in agent loop. Devices info: %s", device_info) sync = True diff --git a/neutron/plugins/ml2/drivers/helpers.py b/neutron/plugins/ml2/drivers/helpers.py index 30cf3f26042..c3f5f1e586c 100644 --- a/neutron/plugins/ml2/drivers/helpers.py +++ b/neutron/plugins/ml2/drivers/helpers.py @@ -21,7 +21,6 @@ from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log -from neutron._i18n import _LE from neutron.common import exceptions as exc from neutron.db import api as db_api from neutron.objects import base as base_obj @@ -43,7 +42,7 @@ class BaseTypeDriver(api.ML2TypeDriver): cfg.CONF.ml2.physical_network_mtus, unique_values=False ) except Exception as e: - LOG.error(_LE("Failed to parse physical_network_mtus: %s"), e) + LOG.error("Failed to parse physical_network_mtus: %s", e) self.physnet_mtus = [] def get_mtu(self, physical_network=None): diff --git a/neutron/plugins/ml2/drivers/l2pop/mech_driver.py b/neutron/plugins/ml2/drivers/l2pop/mech_driver.py index 185324188c8..a4a7d05cf85 100644 --- a/neutron/plugins/ml2/drivers/l2pop/mech_driver.py +++ b/neutron/plugins/ml2/drivers/l2pop/mech_driver.py @@ -22,7 +22,7 @@ from neutron_lib.plugins.ml2 import api from oslo_config import cfg from oslo_log import log as logging -from neutron._i18n import _, _LW +from neutron._i18n import _ from neutron.conf.plugins.ml2.drivers import l2pop as config from neutron.db import api as db_api from neutron.db import l3_hamode_db @@ -264,7 +264,7 @@ class L2populationMechanismDriver(api.MechanismDriver): port_context = context._plugin_context agent = l2pop_db.get_agent_by_host(session, agent_host) if not agent: - LOG.warning(_LW("Unable to retrieve active L2 agent on host %s"), + LOG.warning("Unable to retrieve active L2 agent on host %s", agent_host) return @@ -319,7 +319,7 @@ class L2populationMechanismDriver(api.MechanismDriver): agent = l2pop_db.get_agent_by_host(session, agent_host) if not agent: - LOG.warning(_LW("Unable to retrieve active L2 agent on host %s"), + LOG.warning("Unable to retrieve active L2 agent on host %s", agent_host) return if not self._validate_segment(segment, port['id'], agent): diff --git a/neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py b/neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py index f3231c9d462..f83f3fcd59d 100644 --- a/neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py +++ b/neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py @@ -19,7 +19,6 @@ from oslo_concurrency import lockutils from oslo_log import log as logging import tenacity -from neutron._i18n import _LI from neutron.agent.linux import ip_lib LOG = logging.getLogger(__name__) @@ -31,8 +30,8 @@ def setup_arp_spoofing_protection(vif, port_details): if not port_details.get('port_security_enabled', True): # clear any previous entries related to this port delete_arp_spoofing_protection([vif]) - LOG.info(_LI("Skipping ARP spoofing rules for port '%s' because " - "it has port security disabled"), vif) + LOG.info("Skipping ARP spoofing rules for port '%s' because " + "it has port security disabled", vif) return if net.is_port_trusted(port_details): # clear any previous entries related to this port @@ -100,7 +99,7 @@ def delete_unreferenced_arp_protection(current_vifs): devname = line.split(SPOOF_CHAIN_PREFIX, 1)[1].split(',')[0] if devname not in current_vifs: to_delete.append(devname) - LOG.info(_LI("Clearing orphaned ARP spoofing entries for devices %s"), + LOG.info("Clearing orphaned ARP spoofing entries for devices %s", to_delete) _delete_arp_spoofing_protection(to_delete, current_rules) diff --git a/neutron/plugins/ml2/drivers/linuxbridge/agent/common/utils.py b/neutron/plugins/ml2/drivers/linuxbridge/agent/common/utils.py index c8f9d6fa02e..10e8f569fdf 100644 --- a/neutron/plugins/ml2/drivers/linuxbridge/agent/common/utils.py +++ b/neutron/plugins/ml2/drivers/linuxbridge/agent/common/utils.py @@ -16,7 +16,6 @@ from neutron_lib import constants as n_const from oslo_log import log -from neutron._i18n import _LW from neutron.plugins.ml2.drivers.linuxbridge.agent.common import constants LOG = log.getLogger(__name__) @@ -25,8 +24,8 @@ LOG = log.getLogger(__name__) def get_tap_device_name(interface_id): """Convert port ID into device name format expected by linux bridge.""" if not interface_id: - LOG.warning(_LW("Invalid Interface ID, will lead to incorrect " - "tap device name")) + LOG.warning("Invalid Interface ID, will lead to incorrect " + "tap device name") tap_device_name = (n_const.TAP_DEVICE_PREFIX + interface_id[:constants.RESOURCE_ID_LENGTH]) return tap_device_name diff --git a/neutron/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/qos_driver.py b/neutron/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/qos_driver.py index 4e8849442e8..b038fbebf70 100644 --- a/neutron/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/qos_driver.py +++ b/neutron/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/qos_driver.py @@ -16,7 +16,6 @@ from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_log import log -from neutron._i18n import _LI from neutron.agent.l2.extensions import qos_linux as qos from neutron.agent.linux import iptables_manager from neutron.agent.linux import tc_lib @@ -41,7 +40,7 @@ class QosLinuxbridgeAgentDriver(qos.QosLinuxAgentDriver): const.EGRESS_DIRECTION: "o"} def initialize(self): - LOG.info(_LI("Initializing Linux bridge QoS extension")) + LOG.info("Initializing Linux bridge QoS extension") self.iptables_manager = iptables_manager.IptablesManager(use_ipv6=True) self.tbf_latency = cfg.CONF.QOS.tbf_latency diff --git a/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py b/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py index 31e9496113d..1a018e8841c 100644 --- a/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py +++ b/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py @@ -31,7 +31,6 @@ from oslo_service import service from oslo_utils import excutils from six import moves -from neutron._i18n import _LE, _LI, _LW from neutron.agent.linux import bridge_lib from neutron.agent.linux import ip_lib from neutron.api.rpc.handlers import securitygroups_rpc as sg_rpc @@ -85,16 +84,16 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase): def validate_interface_mappings(self): for physnet, interface in self.interface_mappings.items(): if not ip_lib.device_exists(interface): - LOG.error(_LE("Interface %(intf)s for physical network %(net)s" - " does not exist. Agent terminated!"), + LOG.error("Interface %(intf)s for physical network %(net)s" + " does not exist. Agent terminated!", {'intf': interface, 'net': physnet}) sys.exit(1) def validate_bridge_mappings(self): for physnet, bridge in self.bridge_mappings.items(): if not ip_lib.device_exists(bridge): - LOG.error(_LE("Bridge %(brq)s for physical network %(net)s" - " does not exist. Agent terminated!"), + LOG.error("Bridge %(brq)s for physical network %(net)s" + " does not exist. Agent terminated!", {'brq': bridge, 'net': physnet}) sys.exit(1) @@ -133,10 +132,10 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase): if not ip_addr.version == group_net.version: raise ValueError() except (netaddr.core.AddrFormatError, ValueError): - LOG.error(_LE("Invalid VXLAN Group: %(group)s, must be an address " - "or network (in CIDR notation) in a multicast " - "range of the same address family as local_ip: " - "%(ip)s"), + LOG.error("Invalid VXLAN Group: %(group)s, must be an address " + "or network (in CIDR notation) in a multicast " + "range of the same address family as local_ip: " + "%(ip)s", {'group': cfg.CONF.VXLAN.vxlan_group, 'ip': self.local_ip}) sys.exit(1) @@ -145,10 +144,10 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase): """Return the device with local_ip on the host.""" device = self.ip.get_device_by_ip(self.local_ip) if not device: - LOG.error(_LE("Tunneling cannot be enabled without the local_ip " - "bound to an interface on the host. Please " - "configure local_ip %s on the host interface to " - "be used for tunneling and restart the agent."), + LOG.error("Tunneling cannot be enabled without the local_ip " + "bound to an interface on the host. Please " + "configure local_ip %s on the host interface to " + "be used for tunneling and restart the agent.", self.local_ip) sys.exit(1) return device @@ -161,16 +160,16 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase): @staticmethod def get_bridge_name(network_id): if not network_id: - LOG.warning(_LW("Invalid Network ID, will lead to incorrect " - "bridge name")) + LOG.warning("Invalid Network ID, will lead to incorrect " + "bridge name") bridge_name = BRIDGE_NAME_PREFIX + \ network_id[:lconst.RESOURCE_ID_LENGTH] return bridge_name def get_subinterface_name(self, physical_interface, vlan_id): if not vlan_id: - LOG.warning(_LW("Invalid VLAN ID, will lead to incorrect " - "subinterface name")) + LOG.warning("Invalid VLAN ID, will lead to incorrect " + "subinterface name") vlan_postfix = '.%s' % vlan_id # For the vlan subinterface name prefix we use: @@ -206,8 +205,8 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase): if 0 <= int(segmentation_id) <= p_const.MAX_VXLAN_VNI: return VXLAN_INTERFACE_PREFIX + str(segmentation_id) else: - LOG.warning(_LW("Invalid Segmentation ID: %s, will lead to " - "incorrect vxlan device name"), segmentation_id) + LOG.warning("Invalid Segmentation ID: %s, will lead to " + "incorrect vxlan device name", segmentation_id) @staticmethod def _match_multicast_range(segmentation_id): @@ -252,8 +251,8 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase): """Create a vxlan and bridge unless they already exist.""" interface = self.ensure_vxlan(segmentation_id) if not interface: - LOG.error(_LE("Failed creating vxlan interface for " - "%(segmentation_id)s"), + LOG.error("Failed creating vxlan interface for " + "%(segmentation_id)s", {segmentation_id: segmentation_id}) return bridge_name = self.get_bridge_name(network_id) @@ -304,9 +303,9 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase): with excutils.save_and_reraise_exception() as ctxt: if ip_lib.vlan_in_use(vlan_id): ctxt.reraise = False - LOG.error(_LE("Unable to create VLAN interface for " - "VLAN ID %s because it is in use by " - "another interface."), vlan_id) + LOG.error("Unable to create VLAN interface for " + "VLAN ID %s because it is in use by " + "another interface.", vlan_id) return int_vlan.disable_ipv6() int_vlan.link.set_up() @@ -341,9 +340,9 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase): # to avoid excessive lookups and a possible race condition. if ip_lib.vxlan_in_use(segmentation_id): ctxt.reraise = False - LOG.error(_LE("Unable to create VXLAN interface for " - "VNI %s because it is in use by another " - "interface."), segmentation_id) + LOG.error("Unable to create VXLAN interface for " + "VNI %s because it is in use by another " + "interface.", segmentation_id) return None int_vxlan.disable_ipv6() int_vxlan.link.set_up() @@ -445,8 +444,8 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase): bridge_device.addif(interface) except Exception as e: - LOG.error(_LE("Unable to add %(interface)s to %(bridge_name)s" - "! Exception: %(e)s"), + LOG.error("Unable to add %(interface)s to %(bridge_name)s" + "! Exception: %(e)s", {'interface': interface, 'bridge_name': bridge_name, 'e': e}) return @@ -458,7 +457,7 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase): segmentation_id): if network_type == p_const.TYPE_VXLAN: if self.vxlan_mode == lconst.VXLAN_NONE: - LOG.error(_LE("Unable to add vxlan interface for network %s"), + LOG.error("Unable to add vxlan interface for network %s", network_id) return return self.ensure_vxlan_bridge(network_id, segmentation_id) @@ -467,8 +466,8 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase): physical_bridge = self.get_existing_bridge_name(physical_network) physical_interface = self.interface_mappings.get(physical_network) if not physical_bridge and not physical_interface: - LOG.error(_LE("No bridge or interface mappings" - " for physical network %s"), + LOG.error("No bridge or interface mappings" + " for physical network %s", physical_network) return if network_type == p_const.TYPE_FLAT: @@ -479,9 +478,9 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase): physical_interface, segmentation_id) else: - LOG.error(_LE("Unknown network_type %(network_type)s for network " - "%(network_id)s."), {network_type: network_type, - network_id: network_id}) + LOG.error("Unknown network_type %(network_type)s for network " + "%(network_id)s.", {network_type: network_type, + network_id: network_id}) def add_tap_interface(self, network_id, network_type, physical_network, segmentation_id, tap_device_name, device_owner, mtu): @@ -664,8 +663,8 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase): return False if not ip_lib.iproute_arg_supported( ['bridge', 'fdb'], 'append'): - LOG.warning(_LW('Option "%(option)s" must be supported by command ' - '"%(command)s" to enable %(mode)s mode'), + LOG.warning('Option "%(option)s" must be supported by command ' + '"%(command)s" to enable %(mode)s mode', {'option': 'append', 'command': 'bridge fdb', 'mode': 'VXLAN UCAST'}) @@ -679,7 +678,7 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase): test_iface = self.ensure_vxlan(seg_id) break else: - LOG.error(_LE('No valid Segmentation ID to perform UCAST test.')) + LOG.error('No valid Segmentation ID to perform UCAST test.') return False try: @@ -694,14 +693,14 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase): def vxlan_mcast_supported(self): if not cfg.CONF.VXLAN.vxlan_group: - LOG.warning(_LW('VXLAN muticast group(s) must be provided in ' - 'vxlan_group option to enable VXLAN MCAST mode')) + LOG.warning('VXLAN muticast group(s) must be provided in ' + 'vxlan_group option to enable VXLAN MCAST mode') return False if not ip_lib.iproute_arg_supported( ['ip', 'link', 'add', 'type', 'vxlan'], 'proxy'): - LOG.warning(_LW('Option "%(option)s" must be supported by command ' - '"%(command)s" to enable %(mode)s mode'), + LOG.warning('Option "%(option)s" must be supported by command ' + '"%(command)s" to enable %(mode)s mode', {'option': 'proxy', 'command': 'ip link add type vxlan', 'mode': 'VXLAN MCAST'}) @@ -776,8 +775,8 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase): if mac: break else: - LOG.error(_LE("Unable to obtain MAC address for unique ID. " - "Agent terminated!")) + LOG.error("Unable to obtain MAC address for unique ID. " + "Agent terminated!") sys.exit(1) return 'lb%s' % mac.replace(":", "") @@ -844,8 +843,8 @@ class LinuxBridgeRpcCallbacks( if network_id in self.network_map: phynet = self.network_map[network_id].physical_network if phynet and phynet in self.agent.mgr.bridge_mappings: - LOG.info(_LI("Physical network %s is defined in " - "bridge_mappings and cannot be deleted."), + LOG.info("Physical network %s is defined in " + "bridge_mappings and cannot be deleted.", network_id) return @@ -960,19 +959,19 @@ def main(): interface_mappings = helpers.parse_mappings( cfg.CONF.LINUX_BRIDGE.physical_interface_mappings) except ValueError as e: - LOG.error(_LE("Parsing physical_interface_mappings failed: %s. " - "Agent terminated!"), e) + LOG.error("Parsing physical_interface_mappings failed: %s. " + "Agent terminated!", e) sys.exit(1) - LOG.info(_LI("Interface mappings: %s"), interface_mappings) + LOG.info("Interface mappings: %s", interface_mappings) try: bridge_mappings = helpers.parse_mappings( cfg.CONF.LINUX_BRIDGE.bridge_mappings) except ValueError as e: - LOG.error(_LE("Parsing bridge_mappings failed: %s. " - "Agent terminated!"), e) + LOG.error("Parsing bridge_mappings failed: %s. " + "Agent terminated!", e) sys.exit(1) - LOG.info(_LI("Bridge mappings: %s"), bridge_mappings) + LOG.info("Bridge mappings: %s", bridge_mappings) manager = LinuxBridgeManager(bridge_mappings, interface_mappings) linuxbridge_capabilities.register() @@ -983,6 +982,6 @@ def main(): constants.AGENT_TYPE_LINUXBRIDGE, LB_AGENT_BINARY) setup_profiler.setup("neutron-linuxbridge-agent", cfg.CONF.host) - LOG.info(_LI("Agent initialized successfully, now running... ")) + LOG.info("Agent initialized successfully, now running... ") launcher = service.launch(cfg.CONF, agent) launcher.wait() diff --git a/neutron/plugins/ml2/drivers/macvtap/agent/macvtap_neutron_agent.py b/neutron/plugins/ml2/drivers/macvtap/agent/macvtap_neutron_agent.py index 6f2ffc8566e..2acd4b8caf5 100644 --- a/neutron/plugins/ml2/drivers/macvtap/agent/macvtap_neutron_agent.py +++ b/neutron/plugins/ml2/drivers/macvtap/agent/macvtap_neutron_agent.py @@ -24,7 +24,6 @@ from oslo_log import log as logging import oslo_messaging from oslo_service import service -from neutron._i18n import _LE, _LI from neutron.agent.linux import ip_lib from neutron.api.rpc.handlers import securitygroups_rpc as sg_rpc from neutron.common import config as common_config @@ -58,7 +57,7 @@ class MacvtapRPCCallBack(sg_rpc.SecurityGroupAgentRpcCallbackMixin, network_id = kwargs.get('network_id') if network_id not in self.network_map: - LOG.error(_LE("Network %s is not available."), network_id) + LOG.error("Network %s is not available.", network_id) return segment = self.network_map.get(network_id) @@ -95,8 +94,8 @@ class MacvtapManager(amb.CommonAgentManagerBase): def validate_interface_mappings(self): for physnet, interface in self.interface_mappings.items(): if not ip_lib.device_exists(interface): - LOG.error(_LE("Interface %(intf)s for physical network " - "%(net)s does not exist. Agent terminated!"), + LOG.error("Interface %(intf)s for physical network " + "%(net)s does not exist. Agent terminated!", {'intf': interface, 'net': physnet}) sys.exit(1) @@ -118,8 +117,8 @@ class MacvtapManager(amb.CommonAgentManagerBase): mac = ip_lib.get_device_mac(devices[0].name) return 'macvtap%s' % mac.replace(":", "") else: - LOG.error(_LE("Unable to obtain MAC address for unique ID. " - "Agent terminated!")) + LOG.error("Unable to obtain MAC address for unique ID. " + "Agent terminated!") sys.exit(1) def get_devices_modified_timestamps(self, devices): @@ -175,18 +174,18 @@ class MacvtapManager(amb.CommonAgentManagerBase): def parse_interface_mappings(): if not cfg.CONF.macvtap.physical_interface_mappings: - LOG.error(_LE("No physical_interface_mappings provided, but at least " - "one mapping is required. Agent terminated!")) + LOG.error("No physical_interface_mappings provided, but at least " + "one mapping is required. Agent terminated!") sys.exit(1) try: interface_mappings = helpers.parse_mappings( cfg.CONF.macvtap.physical_interface_mappings) - LOG.info(_LI("Interface mappings: %s"), interface_mappings) + LOG.info("Interface mappings: %s", interface_mappings) return interface_mappings except ValueError as e: - LOG.error(_LE("Parsing physical_interface_mappings failed: %s. " - "Agent terminated!"), e) + LOG.error("Parsing physical_interface_mappings failed: %s. " + "Agent terminated!", e) sys.exit(1) @@ -195,11 +194,11 @@ def validate_firewall_driver(): supported_fw_drivers = ['neutron.agent.firewall.NoopFirewallDriver', 'noop'] if fw_driver not in supported_fw_drivers: - LOG.error(_LE('Unsupported configuration option for "SECURITYGROUP.' - 'firewall_driver"! Only the NoopFirewallDriver is ' - 'supported by macvtap agent, but "%s" is configured. ' - 'Set the firewall_driver to "noop" and start the ' - 'agent again. Agent terminated!'), + LOG.error('Unsupported configuration option for "SECURITYGROUP.' + 'firewall_driver"! Only the NoopFirewallDriver is ' + 'supported by macvtap agent, but "%s" is configured. ' + 'Set the firewall_driver to "noop" and start the ' + 'agent again. Agent terminated!', fw_driver) sys.exit(1) @@ -220,6 +219,6 @@ def main(): quitting_rpc_timeout, constants.AGENT_TYPE_MACVTAP, MACVTAP_AGENT_BINARY) - LOG.info(_LI("Agent initialized successfully, now running... ")) + LOG.info("Agent initialized successfully, now running... ") launcher = service.launch(cfg.CONF, agent) launcher.wait() diff --git a/neutron/plugins/ml2/drivers/macvtap/mech_driver/mech_macvtap.py b/neutron/plugins/ml2/drivers/macvtap/mech_driver/mech_macvtap.py index 4e6a1a30b86..3b41da01d7f 100644 --- a/neutron/plugins/ml2/drivers/macvtap/mech_driver/mech_macvtap.py +++ b/neutron/plugins/ml2/drivers/macvtap/mech_driver/mech_macvtap.py @@ -14,7 +14,6 @@ # License for the specific language governing permissions and limitations # under the License. -from neutron._i18n import _LE from neutron_lib.api.definitions import portbindings from neutron_lib import constants from neutron_lib.plugins.ml2 import api @@ -102,15 +101,15 @@ class MacvtapMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): if orig_source != macvtap_src: source_host = context.original[portbindings.HOST_ID] target_host = agent['host'] - LOG.error(_LE("Vif binding denied by mechanism driver. " - "MacVTap source device '%(target_dev)s' on " - "the migration target '%(target_host)s'is " - "not equal to device '%(source_dev)s' on " - "the migration source '%(source_host)s. " - "Make sure that the " - "interface mapping of macvtap " - "agent on both hosts is equal " - "for the physical network '%(physnet)s'!"), + LOG.error("Vif binding denied by mechanism driver. " + "MacVTap source device '%(target_dev)s' on " + "the migration target '%(target_host)s'is " + "not equal to device '%(source_dev)s' on " + "the migration source '%(source_host)s. " + "Make sure that the " + "interface mapping of macvtap " + "agent on both hosts is equal " + "for the physical network '%(physnet)s'!", {'source_dev': orig_source, 'target_dev': macvtap_src, 'target_host': target_host, diff --git a/neutron/plugins/ml2/drivers/mech_agent.py b/neutron/plugins/ml2/drivers/mech_agent.py index 7d26b4c56f9..67e54ce5c04 100644 --- a/neutron/plugins/ml2/drivers/mech_agent.py +++ b/neutron/plugins/ml2/drivers/mech_agent.py @@ -22,7 +22,6 @@ from neutron_lib.plugins.ml2 import api from oslo_log import log import six -from neutron._i18n import _LW from neutron.db import provisioning_blocks from neutron.plugins.common import constants as p_constants @@ -110,8 +109,8 @@ class AgentMechanismDriverBase(api.MechanismDriver): LOG.debug("Bound using segment: %s", segment) return else: - LOG.warning(_LW("Refusing to bind port %(pid)s to dead agent: " - "%(agent)s"), + LOG.warning("Refusing to bind port %(pid)s to dead agent: " + "%(agent)s", {'pid': context.current['id'], 'agent': agent}) @abc.abstractmethod diff --git a/neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py b/neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py index 5d712a777f0..36600ec617a 100644 --- a/neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py +++ b/neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py @@ -19,7 +19,7 @@ import re from neutron_lib.utils import helpers from oslo_log import log as logging -from neutron._i18n import _, _LE, _LW +from neutron._i18n import _ from neutron.agent.linux import ip_link_support from neutron.plugins.ml2.drivers.mech_sriov.agent.common \ import exceptions as exc @@ -46,7 +46,7 @@ class PciOsWrapper(object): vf_list = [] dev_path = cls.DEVICE_PATH % dev_name if not os.path.isdir(dev_path): - LOG.error(_LE("Failed to get devices for %s"), dev_name) + LOG.error("Failed to get devices for %s", dev_name) raise exc.InvalidDeviceError(dev_name=dev_name, reason=_("Device not found")) file_list = os.listdir(dev_path) @@ -216,7 +216,7 @@ class EmbSwitch(object): def _get_vf_index(self, pci_slot): vf_index = self.pci_slot_map.get(pci_slot) if vf_index is None: - LOG.warning(_LW("Cannot find vf index for pci slot %s"), + LOG.warning("Cannot find vf index for pci slot %s", pci_slot) raise exc.InvalidPciSlotError(pci_slot=pci_slot) return vf_index @@ -410,8 +410,8 @@ class ESwitchManager(object): if embedded_switch: used_device_mac = embedded_switch.get_pci_device(pci_slot) if used_device_mac != device_mac: - LOG.warning(_LW("device pci mismatch: %(device_mac)s " - "- %(pci_slot)s"), + LOG.warning("device pci mismatch: %(device_mac)s " + "- %(pci_slot)s", {"device_mac": device_mac, "pci_slot": pci_slot}) embedded_switch = None return embedded_switch @@ -453,10 +453,10 @@ class ESwitchManager(object): if embedded_switch.get_pci_device(pci_slot) is None: embedded_switch.set_device_rate(pci_slot, rate_type, 0) else: - LOG.warning(_LW("VF with PCI slot %(pci_slot)s is already " - "assigned; skipping reset for '%(rate_type)s' " - "device configuration parameter"), + LOG.warning("VF with PCI slot %(pci_slot)s is already " + "assigned; skipping reset for '%(rate_type)s' " + "device configuration parameter", {'pci_slot': pci_slot, 'rate_type': rate_type}) else: - LOG.error(_LE("PCI slot %(pci_slot)s has no mapping to Embedded " - "Switch; skipping"), {'pci_slot': pci_slot}) + LOG.error("PCI slot %(pci_slot)s has no mapping to Embedded " + "Switch; skipping", {'pci_slot': pci_slot}) diff --git a/neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/qos_driver.py b/neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/qos_driver.py index e0be5514a2f..54e3d67010d 100644 --- a/neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/qos_driver.py +++ b/neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/qos_driver.py @@ -14,7 +14,6 @@ from oslo_log import log as logging -from neutron._i18n import _LE, _LI from neutron.agent.l2.extensions import qos_linux as qos from neutron.plugins.ml2.drivers.mech_sriov.agent.common import ( exceptions as exc) @@ -58,9 +57,9 @@ class QosSRIOVAgentDriver(qos.QosLinuxAgentDriver): device, pci_slot, max_kbps) except exc.SriovNicError: LOG.exception( - _LE("Failed to set device %s max rate"), device) + "Failed to set device %s max rate", device) else: - LOG.info(_LI("No device with MAC %s defined on agent."), device) + LOG.info("No device with MAC %s defined on agent.", device) # TODO(ihrachys): those handlers are pretty similar, probably could make # use of some code deduplication @@ -87,6 +86,6 @@ class QosSRIOVAgentDriver(qos.QosLinuxAgentDriver): device, pci_slot, min_tx_kbps) except exc.SriovNicError: LOG.exception( - _LE("Failed to set device %s min_tx_rate"), device) + "Failed to set device %s min_tx_rate", device) else: - LOG.info(_LI("No device with MAC %s defined on agent."), device) + LOG.info("No device with MAC %s defined on agent.", device) diff --git a/neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py b/neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py index 70ebf02a1ba..2b32f40dd4d 100644 --- a/neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py +++ b/neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py @@ -17,7 +17,6 @@ import re from oslo_log import log as logging -from neutron._i18n import _LE, _LW from neutron.agent.linux import ip_lib from neutron.plugins.ml2.drivers.mech_sriov.agent.common \ import exceptions as exc @@ -82,7 +81,7 @@ class PciDeviceIPWrapper(ip_lib.IPWrapper): try: out = self._as_root([], "link", ("show", self.dev_name)) except Exception as e: - LOG.exception(_LE("Failed executing ip command")) + LOG.exception("Failed executing ip command") raise exc.IpCommandDeviceError(dev_name=self.dev_name, reason=e) vf_to_mac_mapping = {} @@ -105,7 +104,7 @@ class PciDeviceIPWrapper(ip_lib.IPWrapper): try: out = self._as_root([], "link", ("show", self.dev_name)) except Exception as e: - LOG.exception(_LE("Failed executing ip command")) + LOG.exception("Failed executing ip command") raise exc.IpCommandDeviceError(dev_name=self.dev_name, reason=e) vf_lines = self._get_vf_link_show([vf_index], out) @@ -164,7 +163,7 @@ class PciDeviceIPWrapper(ip_lib.IPWrapper): if index in vf_list: vf_lines.append(line) if not vf_lines: - LOG.warning(_LW("Cannot find vfs %(vfs)s in device %(dev_name)s"), + LOG.warning("Cannot find vfs %(vfs)s in device %(dev_name)s", {'vfs': vf_list, 'dev_name': self.dev_name}) return vf_lines @@ -180,8 +179,8 @@ class PciDeviceIPWrapper(ip_lib.IPWrapper): vf_details["MAC"] = pattern_match.group("mac") vf_details["link-state"] = pattern_match.group("state") else: - LOG.warning(_LW("failed to parse vf link show line %(line)s: " - "for %(device)s"), + LOG.warning("failed to parse vf link show line %(line)s: " + "for %(device)s", {'line': vf_line, 'device': self.dev_name}) return vf_details @@ -190,7 +189,7 @@ class PciDeviceIPWrapper(ip_lib.IPWrapper): try: out = cls._execute([], "link", ("show", ), run_as_root=True) except Exception as e: - LOG.error(_LE("Failed executing ip command: %s"), e) + LOG.error("Failed executing ip command: %s", e) raise exc.IpCommandError(reason=e) return out diff --git a/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py b/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py index e737374d9ae..58e2f6686f7 100644 --- a/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py +++ b/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py @@ -31,7 +31,7 @@ from oslo_service import loopingcall from osprofiler import profiler import six -from neutron._i18n import _, _LE, _LI, _LW +from neutron._i18n import _ from neutron.agent.l2 import l2_agent_extensions_manager as ext_manager from neutron.agent import rpc as agent_rpc from neutron.agent import securitygroups_rpc as agent_sg_rpc @@ -151,7 +151,7 @@ class SriovNicSwitchAgent(object): def _setup_rpc(self): self.agent_id = 'nic-switch-agent.%s' % socket.gethostname() - LOG.info(_LI("RPC agent_id: %s"), self.agent_id) + LOG.info("RPC agent_id: %s", self.agent_id) self.topic = topics.AGENT self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS) @@ -183,7 +183,7 @@ class SriovNicSwitchAgent(object): self.agent_state.pop('resource_versions', None) self.agent_state.pop('start_flag', None) except Exception: - LOG.exception(_LE("Failed reporting state!")) + LOG.exception("Failed reporting state!") def _create_agent_extension_manager(self, connection): ext_manager.register_opts(self.conf) @@ -241,22 +241,22 @@ class SriovNicSwitchAgent(object): self.eswitch_mgr.set_device_spoofcheck(device, pci_slot, spoofcheck) except Exception: - LOG.warning(_LW("Failed to set spoofcheck for device %s"), + LOG.warning("Failed to set spoofcheck for device %s", device) - LOG.info(_LI("Device %(device)s spoofcheck %(spoofcheck)s"), + LOG.info("Device %(device)s spoofcheck %(spoofcheck)s", {"device": device, "spoofcheck": spoofcheck}) try: self.eswitch_mgr.set_device_state(device, pci_slot, admin_state_up) except exc.IpCommandOperationNotSupportedError: - LOG.warning(_LW("Device %s does not support state change"), + LOG.warning("Device %s does not support state change", device) except exc.SriovNicError: - LOG.warning(_LW("Failed to set device %s state"), device) + LOG.warning("Failed to set device %s state", device) return False else: - LOG.info(_LI("No device with MAC %s defined on agent."), device) + LOG.info("No device with MAC %s defined on agent.", device) return False return True @@ -294,7 +294,7 @@ class SriovNicSwitchAgent(object): LOG.debug("Port with MAC address %s is added", device) if 'port_id' in device_details: - LOG.info(_LI("Port %(device)s updated. Details: %(details)s"), + LOG.info("Port %(device)s updated. Details: %(details)s", {'device': device, 'details': device_details}) port_id = device_details['port_id'] profile = device_details['profile'] @@ -312,7 +312,7 @@ class SriovNicSwitchAgent(object): (device, profile.get('pci_slot'))) self.ext_manager.handle_port(self.context, device_details) else: - LOG.info(_LI("Device with MAC %s not defined on plugin"), + LOG.info("Device with MAC %s not defined on plugin", device) self.plugin_rpc.update_device_list(self.context, devices_up, @@ -325,8 +325,8 @@ class SriovNicSwitchAgent(object): resync = False for device in devices: mac, pci_slot = device - LOG.info(_LI("Removing device with MAC address %(mac)s and " - "PCI slot %(pci_slot)s"), + LOG.info("Removing device with MAC address %(mac)s and " + "PCI slot %(pci_slot)s", {'mac': mac, 'pci_slot': pci_slot}) try: port_id = self._clean_network_ports(device) @@ -336,8 +336,8 @@ class SriovNicSwitchAgent(object): 'profile': {'pci_slot': pci_slot}} self.ext_manager.delete_port(self.context, port) else: - LOG.warning(_LW("port_id to device with MAC " - "%s not found"), mac) + LOG.warning("port_id to device with MAC " + "%s not found", mac) dev_details = self.plugin_rpc.update_device_down(self.context, mac, self.agent_id, @@ -350,8 +350,8 @@ class SriovNicSwitchAgent(object): resync = True continue if dev_details['exists']: - LOG.info(_LI("Port with MAC %(mac)s and PCI slot " - "%(pci_slot)s updated."), + LOG.info("Port with MAC %(mac)s and PCI slot " + "%(pci_slot)s updated.", {'mac': mac, 'pci_slot': pci_slot}) else: LOG.debug("Device with MAC %(mac)s and PCI slot " @@ -363,14 +363,14 @@ class SriovNicSwitchAgent(object): sync = True devices = set() - LOG.info(_LI("SRIOV NIC Agent RPC Daemon Started!")) + LOG.info("SRIOV NIC Agent RPC Daemon Started!") while True: start = time.time() LOG.debug("Agent rpc_loop - iteration:%d started", self.iter_num) if sync: - LOG.info(_LI("Agent out of sync with plugin!")) + LOG.info("Agent out of sync with plugin!") devices.clear() sync = False device_info = {} @@ -391,7 +391,7 @@ class SriovNicSwitchAgent(object): sync = self.process_network_devices(device_info) devices = device_info['current'] except Exception: - LOG.exception(_LE("Error in agent loop. Devices info: %s"), + LOG.exception("Error in agent loop. Devices info: %s", device_info) sync = True # Restore devices that were removed from this set earlier @@ -452,11 +452,11 @@ def main(): exclude_devices = config_parser.exclude_devices except ValueError: - LOG.exception(_LE("Failed on Agent configuration parse. " - "Agent terminated!")) + LOG.exception("Failed on Agent configuration parse. " + "Agent terminated!") raise SystemExit(1) - LOG.info(_LI("Physical Devices mappings: %s"), device_mappings) - LOG.info(_LI("Exclude Devices: %s"), exclude_devices) + LOG.info("Physical Devices mappings: %s", device_mappings) + LOG.info("Exclude Devices: %s", exclude_devices) polling_interval = cfg.CONF.AGENT.polling_interval try: @@ -464,9 +464,9 @@ def main(): exclude_devices, polling_interval) except exc.SriovNicError: - LOG.exception(_LE("Agent Initialization Failed")) + LOG.exception("Agent Initialization Failed") raise SystemExit(1) # Start everything. setup_profiler.setup("neutron-sriov-nic-agent", cfg.CONF.host) - LOG.info(_LI("Agent initialized successfully, now running... ")) + LOG.info("Agent initialized successfully, now running... ") agent.daemon_loop() diff --git a/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py b/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py index 443c59a9842..094e4b7e0a3 100644 --- a/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py +++ b/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py @@ -18,7 +18,6 @@ from neutron_lib import constants from neutron_lib.plugins.ml2 import api from oslo_log import log -from neutron._i18n import _LW from neutron.plugins.common import constants as p_const from neutron.plugins.ml2.drivers import mech_agent from neutron.plugins.ml2.drivers.mech_sriov.mech_driver \ @@ -106,8 +105,7 @@ class SriovNicSwitchMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): agent): return else: - LOG.warning(_LW("Attempting to bind with dead agent: %s"), - agent) + LOG.warning("Attempting to bind with dead agent: %s", agent) def try_to_bind_segment_for_agent(self, context, segment, agent): vnic_type = context.current.get(portbindings.VNIC_TYPE, diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_int.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_int.py index ea94a54cfc8..1385233cd28 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_int.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_int.py @@ -26,7 +26,6 @@ from ryu.lib.packet import ether_types from ryu.lib.packet import icmpv6 from ryu.lib.packet import in_proto -from neutron._i18n import _LE from neutron.plugins.common import constants as p_const from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \ @@ -52,7 +51,7 @@ class OVSIntegrationBridge(ovs_bridge.OVSAgentBridge): try: flows = self.dump_flows(constants.CANARY_TABLE) except RuntimeError: - LOG.exception(_LE("Failed to communicate with the switch")) + LOG.exception("Failed to communicate with the switch") return constants.OVS_DEAD return constants.OVS_NORMAL if flows else constants.OVS_RESTARTED diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py index e03433ed35b..bf8c98a823f 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py @@ -26,7 +26,7 @@ from ryu.lib import ofctl_string from ryu.ofproto import ofproto_parser import six -from neutron._i18n import _, _LW +from neutron._i18n import _ from neutron.agent.common import ovs_lib LOG = logging.getLogger(__name__) @@ -156,7 +156,7 @@ class OpenFlowSwitchMixin(object): LOG.debug("Reserved cookies for %s: %s", self.br_name, self.reserved_cookies) for c in cookies: - LOG.warning(_LW("Deleting flow with cookie 0x%(cookie)x"), + LOG.warning("Deleting flow with cookie 0x%(cookie)x", {'cookie': c}) self.uninstall_flows(cookie=c, cookie_mask=ovs_lib.UINT64_BITMASK) diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_bridge.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_bridge.py index df9ab36afda..923f1a09774 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_bridge.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_bridge.py @@ -17,7 +17,6 @@ from oslo_log import log as logging from oslo_utils import excutils -from neutron._i18n import _LI from neutron.agent.common import ovs_lib from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants \ as ovs_consts @@ -44,7 +43,7 @@ class OVSAgentBridge(ofswitch.OpenFlowSwitchMixin, while True: if self._cached_dpid is None: dpid_str = self.get_datapath_id() - LOG.info(_LI("Bridge %(br_name)s has datapath-ID %(dpid)s"), + LOG.info("Bridge %(br_name)s has datapath-ID %(dpid)s", {"br_name": self.br_name, "dpid": dpid_str}) self._cached_dpid = int(dpid_str, 16) try: @@ -59,12 +58,12 @@ class OVSAgentBridge(ofswitch.OpenFlowSwitchMixin, old_dpid_str = format(self._cached_dpid, '0x') new_dpid_str = self.get_datapath_id() if new_dpid_str != old_dpid_str: - LOG.info(_LI("Bridge %(br_name)s changed its " - "datapath-ID from %(old)s to %(new)s"), { - "br_name": self.br_name, - "old": old_dpid_str, - "new": new_dpid_str, - }) + LOG.info("Bridge %(br_name)s changed its " + "datapath-ID from %(old)s to %(new)s", { + "br_name": self.br_name, + "old": old_dpid_str, + "new": new_dpid_str, + }) ctx.reraise = False self._cached_dpid = int(new_dpid_str, 16) diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_ryuapp.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_ryuapp.py index 0bd9f74d575..e21a5c78b56 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_ryuapp.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_ryuapp.py @@ -23,7 +23,6 @@ from ryu.base import app_manager from ryu.lib import hub from ryu.ofproto import ofproto_v1_3 -from neutron._i18n import _LE from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import br_int from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \ @@ -41,7 +40,7 @@ def agent_main_wrapper(bridge_classes): ovs_agent.main(bridge_classes) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Agent main thread died of an exception")) + LOG.exception("Agent main thread died of an exception") finally: # The following call terminates Ryu's AppManager.run_apps(), # which is needed for clean shutdown of an agent process. diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ofswitch.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ofswitch.py index 4cb0a1bf3df..3250731380b 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ofswitch.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ofswitch.py @@ -18,8 +18,6 @@ import re from oslo_log import log as logging -from neutron._i18n import _LW - LOG = logging.getLogger(__name__) # Field name mappings (from Ryu to ovs-ofctl) @@ -111,5 +109,5 @@ class OpenFlowSwitchMixin(object): for flow, cookie, table in self._filter_flows(flows): # deleting a stale flow should be rare. # it might deserve some attention - LOG.warning(_LW("Deleting flow %s"), flow) + LOG.warning("Deleting flow %s", flow) self.delete_flows(cookie=cookie + '/-1', table=table) diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py index 7abc73f42bd..c89ec4175d4 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py @@ -22,7 +22,6 @@ import oslo_messaging from oslo_utils import excutils from osprofiler import profiler -from neutron._i18n import _LE, _LI, _LW from neutron.agent.common import ovs_lib from neutron.common import utils as n_utils from neutron.plugins.common import constants as p_const @@ -170,13 +169,13 @@ class OVSDVRNeutronAgent(object): try: self.get_dvr_mac_address_with_retry() except oslo_messaging.RemoteError as e: - LOG.error(_LE('L2 agent could not get DVR MAC address at ' - 'startup due to RPC error. It happens when the ' - 'server does not support this RPC API. Detailed ' - 'message: %s'), e) + LOG.error('L2 agent could not get DVR MAC address at ' + 'startup due to RPC error. It happens when the ' + 'server does not support this RPC API. Detailed ' + 'message: %s', e) except oslo_messaging.MessagingTimeout: - LOG.error(_LE('DVR: Failed to obtain a valid local ' - 'DVR MAC address')) + LOG.error('DVR: Failed to obtain a valid local ' + 'DVR MAC address') if not self.in_distributed_mode(): sys.exit(1) @@ -193,9 +192,9 @@ class OVSDVRNeutronAgent(object): with excutils.save_and_reraise_exception() as ctx: if retry_count > 0: ctx.reraise = False - LOG.warning(_LW('L2 agent could not get DVR MAC ' - 'address from server. Retrying. ' - 'Detailed message: %s'), e) + LOG.warning('L2 agent could not get DVR MAC ' + 'address from server. Retrying. ' + 'Detailed message: %s', e) else: LOG.debug("L2 Agent DVR: Received response for " "get_dvr_mac_address_by_host() from " @@ -206,7 +205,7 @@ class OVSDVRNeutronAgent(object): def setup_dvr_flows_on_integ_br(self): '''Setup up initial dvr flows into br-int''' - LOG.info(_LI("L2 Agent operating in DVR Mode with MAC %s"), + LOG.info("L2 Agent operating in DVR Mode with MAC %s", self.dvr_mac_address) # Remove existing flows in integration bridge if self.conf.AGENT.drop_flows_on_start: @@ -361,9 +360,9 @@ class OVSDVRNeutronAgent(object): subnet_info = self.plugin_rpc.get_subnet_for_dvr( self.context, subnet_uuid, fixed_ips=fixed_ips) if not subnet_info: - LOG.warning(_LW("DVR: Unable to retrieve subnet information " - "for subnet_id %s. The subnet or the gateway " - "may have already been deleted"), subnet_uuid) + LOG.warning("DVR: Unable to retrieve subnet information " + "for subnet_id %s. The subnet or the gateway " + "may have already been deleted", subnet_uuid) return LOG.debug("get_subnet_for_dvr for subnet %(uuid)s " "returned with %(info)s", @@ -497,13 +496,13 @@ class OVSDVRNeutronAgent(object): subs = list(ovsport.get_subnets()) if subs[0] == fixed_ip['subnet_id']: return - LOG.error(_LE("Centralized-SNAT port %(port)s on subnet " - "%(port_subnet)s already seen on a different " - "subnet %(orig_subnet)s"), { - "port": port.vif_id, - "port_subnet": fixed_ip['subnet_id'], - "orig_subnet": subs[0], - }) + LOG.error("Centralized-SNAT port %(port)s on subnet " + "%(port_subnet)s already seen on a different " + "subnet %(orig_subnet)s", { + "port": port.vif_id, + "port_subnet": fixed_ip['subnet_id'], + "orig_subnet": subs[0], + }) return subnet_uuid = fixed_ip['subnet_id'] ldm = None @@ -514,9 +513,9 @@ class OVSDVRNeutronAgent(object): subnet_info = self.plugin_rpc.get_subnet_for_dvr( self.context, subnet_uuid, fixed_ips=fixed_ips) if not subnet_info: - LOG.warning(_LW("DVR: Unable to retrieve subnet information " - "for subnet_id %s. The subnet or the gateway " - "may have already been deleted"), subnet_uuid) + LOG.warning("DVR: Unable to retrieve subnet information " + "for subnet_id %s. The subnet or the gateway " + "may have already been deleted", subnet_uuid) return LOG.debug("get_subnet_for_dvr for subnet %(uuid)s " "returned with %(info)s", @@ -558,8 +557,8 @@ class OVSDVRNeutronAgent(object): if (port.vif_id in self.local_ports and self.local_ports[port.vif_id].ofport != port.ofport): - LOG.info(_LI("DVR: Port %(vif)s changed port number to " - "%(ofport)s, rebinding."), + LOG.info("DVR: Port %(vif)s changed port number to " + "%(ofport)s, rebinding.", {'vif': port.vif_id, 'ofport': port.ofport}) self.unbind_port_from_dvr(port, local_vlan_map) if device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE: diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py index c35f457b7af..ad20ca7e699 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py @@ -38,7 +38,7 @@ from oslo_utils import netutils from osprofiler import profiler from six import moves -from neutron._i18n import _, _LE, _LI, _LW +from neutron._i18n import _ from neutron.agent.common import ip_lib from neutron.agent.common import ovs_lib from neutron.agent.common import polling @@ -310,8 +310,8 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, self.agent_state, True) if agent_status == c_const.AGENT_REVIVED: - LOG.info(_LI('Agent has just been revived. ' - 'Doing a full sync.')) + LOG.info('Agent has just been revived. ' + 'Doing a full sync.') self.fullsync = True # we only want to update resource versions on startup @@ -321,7 +321,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, # is complete. systemd.notify_once() except Exception: - LOG.exception(_LE("Failed reporting state!")) + LOG.exception("Failed reporting state!") def _restore_local_vlan_map(self): self._local_vlan_hints = {} @@ -474,10 +474,10 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, tunnel_ip = kwargs.get('tunnel_ip') tunnel_type = kwargs.get('tunnel_type') if not tunnel_type: - LOG.error(_LE("No tunnel_type specified, cannot create tunnels")) + LOG.error("No tunnel_type specified, cannot create tunnels") return if tunnel_type not in self.tunnel_types: - LOG.error(_LE("tunnel_type %s not supported by agent"), + LOG.error("tunnel_type %s not supported by agent", tunnel_type) return if tunnel_ip == self.local_ip: @@ -496,14 +496,14 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, return tunnel_ip = kwargs.get('tunnel_ip') if not tunnel_ip: - LOG.error(_LE("No tunnel_ip specified, cannot delete tunnels")) + LOG.error("No tunnel_ip specified, cannot delete tunnels") return tunnel_type = kwargs.get('tunnel_type') if not tunnel_type: - LOG.error(_LE("No tunnel_type specified, cannot delete tunnels")) + LOG.error("No tunnel_type specified, cannot delete tunnels") return if tunnel_type not in self.tunnel_types: - LOG.error(_LE("tunnel_type %s not supported by agent"), + LOG.error("tunnel_type %s not supported by agent", tunnel_type) return ofport = self.tun_br_ofports[tunnel_type].get(tunnel_ip) @@ -600,7 +600,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, elif action == 'remove': br.delete_arp_responder(local_vid, ip) else: - LOG.warning(_LW('Action %s not supported'), action) + LOG.warning('Action %s not supported', action) def _local_vlan_for_flat(self, lvid, physical_network): phys_br = self.phys_brs[physical_network] @@ -646,7 +646,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, lvid = self._local_vlan_hints.pop(net_uuid, None) if lvid is None: if not self.available_local_vlans: - LOG.error(_LE("No local VLAN available for net-id=%s"), + LOG.error("No local VLAN available for net-id=%s", net_uuid) return lvid = self.available_local_vlans.pop() @@ -654,8 +654,8 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, net_uuid, lvid, network_type, physical_network, segmentation_id) - LOG.info(_LI("Assigning %(vlan_id)s as local vlan for " - "net-id=%(net_uuid)s"), + LOG.info("Assigning %(vlan_id)s as local vlan for " + "net-id=%(net_uuid)s", {'vlan_id': lvid, 'net_uuid': net_uuid}) if network_type in constants.TUNNEL_NETWORK_TYPES: @@ -676,17 +676,17 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, network_type=network_type, lvid=lvid, segmentation_id=segmentation_id) else: - LOG.error(_LE("Cannot provision %(network_type)s network for " - "net-id=%(net_uuid)s - tunneling disabled"), + LOG.error("Cannot provision %(network_type)s network for " + "net-id=%(net_uuid)s - tunneling disabled", {'network_type': network_type, 'net_uuid': net_uuid}) elif network_type == p_const.TYPE_FLAT: if physical_network in self.phys_brs: self._local_vlan_for_flat(lvid, physical_network) else: - LOG.error(_LE("Cannot provision flat network for " - "net-id=%(net_uuid)s - no bridge for " - "physical_network %(physical_network)s"), + LOG.error("Cannot provision flat network for " + "net-id=%(net_uuid)s - no bridge for " + "physical_network %(physical_network)s", {'net_uuid': net_uuid, 'physical_network': physical_network}) elif network_type == p_const.TYPE_VLAN: @@ -694,17 +694,17 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, self._local_vlan_for_vlan(lvid, physical_network, segmentation_id) else: - LOG.error(_LE("Cannot provision VLAN network for " - "net-id=%(net_uuid)s - no bridge for " - "physical_network %(physical_network)s"), + LOG.error("Cannot provision VLAN network for " + "net-id=%(net_uuid)s - no bridge for " + "physical_network %(physical_network)s", {'net_uuid': net_uuid, 'physical_network': physical_network}) elif network_type == p_const.TYPE_LOCAL: # no flows needed for local networks pass else: - LOG.error(_LE("Cannot provision unknown network type " - "%(network_type)s for net-id=%(net_uuid)s"), + LOG.error("Cannot provision unknown network type " + "%(network_type)s for net-id=%(net_uuid)s", {'network_type': network_type, 'net_uuid': net_uuid}) @@ -719,8 +719,8 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, LOG.debug("Network %s not used on agent.", net_uuid) return - LOG.info(_LI("Reclaiming vlan = %(vlan_id)s from " - "net-id = %(net_uuid)s"), + LOG.info("Reclaiming vlan = %(vlan_id)s from " + "net-id = %(net_uuid)s", {'vlan_id': lvm.vlan, 'net_uuid': net_uuid}) if lvm.network_type in constants.TUNNEL_NETWORK_TYPES: @@ -764,8 +764,8 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, # no flows needed for local networks pass else: - LOG.error(_LE("Cannot reclaim unknown network type " - "%(network_type)s for net-id=%(net_uuid)s"), + LOG.error("Cannot reclaim unknown network type " + "%(network_type)s for net-id=%(net_uuid)s", {'network_type': lvm.network_type, 'net_uuid': net_uuid}) @@ -802,7 +802,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, if port.vif_id in self.deleted_ports: LOG.debug("Port %s deleted concurrently", port.vif_id) elif port.vif_id in self.updated_ports: - LOG.error(_LE("Expected port %s not found"), port.vif_id) + LOG.error("Expected port %s not found", port.vif_id) else: LOG.debug("Unable to get config for port %s", port.vif_id) return False @@ -897,18 +897,18 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, failed_devices = (devices_set.get('failed_devices_up') + devices_set.get('failed_devices_down')) if failed_devices: - LOG.error(_LE("Configuration for devices %s failed!"), + LOG.error("Configuration for devices %s failed!", failed_devices) - LOG.info(_LI("Configuration for devices up %(up)s and devices " - "down %(down)s completed."), + LOG.info("Configuration for devices up %(up)s and devices " + "down %(down)s completed.", {'up': devices_up, 'down': devices_down}) return set(failed_devices) @staticmethod def setup_arp_spoofing_protection(bridge, vif, port_details): if not port_details.get('port_security_enabled', True): - LOG.info(_LI("Skipping ARP spoofing rules for port '%s' because " - "it has port security disabled"), vif.port_name) + LOG.info("Skipping ARP spoofing rules for port '%s' because " + "it has port security disabled", vif.port_name) bridge.delete_arp_spoofing_protection(port=vif.ofport) bridge.set_allowed_macs_for_port(port=vif.ofport, allow_all=True) return @@ -969,7 +969,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, net_uuid = net_uuid or self.vlan_manager.get_net_uuid(vif_id) except vlanmanager.VifIdNotFound: LOG.info( - _LI('port_unbound(): net_uuid %s not managed by VLAN manager'), + 'port_unbound(): net_uuid %s not managed by VLAN manager', net_uuid) return @@ -1039,7 +1039,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, ancillary_bridges = [] for bridge in ovs_bridges: br = ovs_lib.OVSBridge(bridge) - LOG.info(_LI('Adding %s to list of bridges.'), bridge) + LOG.info('Adding %s to list of bridges.', bridge) ancillary_bridges.append(br) return ancillary_bridges @@ -1070,10 +1070,10 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, self.conf.OVS.int_peer_patch_port) if ovs_lib.INVALID_OFPORT in (self.patch_tun_ofport, self.patch_int_ofport): - LOG.error(_LE("Failed to create OVS patch port. Cannot have " - "tunneling enabled on this agent, since this " - "version of OVS does not support tunnels or patch " - "ports. Agent terminated!")) + LOG.error("Failed to create OVS patch port. Cannot have " + "tunneling enabled on this agent, since this " + "version of OVS does not support tunnels or patch " + "ports. Agent terminated!") sys.exit(1) if self.conf.AGENT.drop_flows_on_start: self.tun_br.uninstall_flows(cookie=ovs_lib.COOKIE_ANY) @@ -1101,15 +1101,15 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, ovs = ovs_lib.BaseOVS() ovs_bridges = ovs.get_bridges() for physical_network, bridge in bridge_mappings.items(): - LOG.info(_LI("Mapping physical network %(physical_network)s to " - "bridge %(bridge)s"), + LOG.info("Mapping physical network %(physical_network)s to " + "bridge %(bridge)s", {'physical_network': physical_network, 'bridge': bridge}) # setup physical bridge if bridge not in ovs_bridges: - LOG.error(_LE("Bridge %(bridge)s for physical network " - "%(physical_network)s does not exist. Agent " - "terminated!"), + LOG.error("Bridge %(bridge)s for physical network " + "%(physical_network)s does not exist. Agent " + "terminated!", {'physical_network': physical_network, 'bridge': bridge}) sys.exit(1) @@ -1395,8 +1395,8 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, and port_tags[port.port_name] != lvm.vlan ): LOG.info( - _LI("Port '%(port_name)s' has lost " - "its vlan tag '%(vlan_tag)d'!"), + "Port '%(port_name)s' has lost " + "its vlan tag '%(vlan_tag)d'!", {'port_name': port.port_name, 'vlan_tag': lvm.vlan} ) @@ -1424,8 +1424,8 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, # error condition of which operators should be aware port_needs_binding = True if not vif_port.ofport: - LOG.warning(_LW("VIF port: %s has no ofport configured, " - "and might not be able to transmit"), + LOG.warning("VIF port: %s has no ofport configured, " + "and might not be able to transmit", vif_port.vif_id) if vif_port: if admin_state_up: @@ -1434,8 +1434,8 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, physical_network, segmentation_id, fixed_ips, device_owner, ovs_restarted) else: - LOG.info(_LI("VIF port: %s admin state up disabled, " - "putting on the dead VLAN"), vif_port.vif_id) + LOG.info("VIF port: %s admin state up disabled, " + "putting on the dead VLAN", vif_port.vif_id) self.port_dead(vif_port) port_needs_binding = False @@ -1447,13 +1447,13 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, try: if (netaddr.IPAddress(self.local_ip).version != netaddr.IPAddress(remote_ip).version): - LOG.error(_LE("IP version mismatch, cannot create tunnel: " - "local_ip=%(lip)s remote_ip=%(rip)s"), + LOG.error("IP version mismatch, cannot create tunnel: " + "local_ip=%(lip)s remote_ip=%(rip)s", {'lip': self.local_ip, 'rip': remote_ip}) return 0 except Exception: - LOG.error(_LE("Invalid local or remote IP, cannot create tunnel: " - "local_ip=%(lip)s remote_ip=%(rip)s"), + LOG.error("Invalid local or remote IP, cannot create tunnel: " + "local_ip=%(lip)s remote_ip=%(rip)s", {'lip': self.local_ip, 'rip': remote_ip}) return 0 ofport = br.add_tunnel_port(port_name, @@ -1464,7 +1464,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, self.dont_fragment, self.tunnel_csum) if ofport == ovs_lib.INVALID_OFPORT: - LOG.error(_LE("Failed to set-up %(type)s tunnel port to %(ip)s"), + LOG.error("Failed to set-up %(type)s tunnel port to %(ip)s", {'type': tunnel_type, 'ip': remote_ip}) return 0 @@ -1532,13 +1532,13 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, port = vif_by_id.get(device) if not port: # The port disappeared and cannot be processed - LOG.info(_LI("Port %s was not found on the integration bridge " - "and will therefore not be processed"), device) + LOG.info("Port %s was not found on the integration bridge " + "and will therefore not be processed", device) skipped_devices.append(device) continue if 'port_id' in details: - LOG.info(_LI("Port %(device)s updated. Details: %(details)s"), + LOG.info("Port %(device)s updated. Details: %(details)s", {'device': device, 'details': details}) details['vif_port'] = port need_binding = self.treat_vif_port(port, details['port_id'], @@ -1557,7 +1557,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, self.ext_manager.handle_port(self.context, details) else: LOG.warning( - _LW("Device %s not defined on plugin or binding failed"), + "Device %s not defined on plugin or binding failed", device) if (port and port.ofport != -1): self.port_dead(port) @@ -1587,14 +1587,13 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, self.agent_id, self.conf.host)) failed_devices |= set(devices_set_up.get('failed_devices_up')) - LOG.info(_LI("Ancillary Ports %(added)s added, failed devices " - "%(failed)s"), {'added': devices, - 'failed': failed_devices}) + LOG.info("Ancillary Ports %(added)s added, failed devices " + "%(failed)s", {'added': devices, 'failed': failed_devices}) return failed_devices def treat_devices_removed(self, devices): self.sg_agent.remove_devices_filter(devices) - LOG.info(_LI("Ports %s removed"), devices) + LOG.info("Ports %s removed", devices) devices_down = self.plugin_rpc.update_device_list(self.context, [], devices, @@ -1608,19 +1607,19 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, return failed_devices def treat_ancillary_devices_removed(self, devices): - LOG.info(_LI("Ancillary ports %s removed"), devices) + LOG.info("Ancillary ports %s removed", devices) devices_down = self.plugin_rpc.update_device_list(self.context, [], devices, self.agent_id, self.conf.host) - LOG.info(_LI("Devices down %s "), devices_down) + LOG.info("Devices down %s ", devices_down) failed_devices = set(devices_down.get('failed_devices_down')) if failed_devices: LOG.debug("Port removal failed for %s", failed_devices) for detail in devices_down.get('devices_down'): if detail['exists']: - LOG.info(_LI("Port %s updated."), detail['device']) + LOG.info("Port %s updated.", detail['device']) # Nothing to do regarding local networking else: LOG.debug("Device %s not defined on plugin", detail['device']) @@ -1721,7 +1720,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, iphash = base64.b32encode(sha1.digest()) return iphash[:hashlen].decode().lower() except Exception: - LOG.warning(_LW("Invalid remote IP: %s"), ip_address) + LOG.warning("Invalid remote IP: %s", ip_address) return def tunnel_sync(self): @@ -1783,11 +1782,11 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, # Check for the canary flow status = self.int_br.check_canary_table() if status == constants.OVS_RESTARTED: - LOG.warning(_LW("OVS is restarted. OVSNeutronAgent will reset " - "bridges and recover ports.")) + LOG.warning("OVS is restarted. OVSNeutronAgent will reset " + "bridges and recover ports.") elif status == constants.OVS_DEAD: - LOG.warning(_LW("OVS is dead. OVSNeutronAgent will keep running " - "and checking OVS status periodically.")) + LOG.warning("OVS is dead. OVSNeutronAgent will keep running " + "and checking OVS status periodically.") return status def loop_count_and_wait(self, start_time, port_stats): @@ -1826,7 +1825,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, if self.enable_tunneling: bridges.append(self.tun_br) for bridge in bridges: - LOG.info(_LI("Cleaning stale %s flows"), bridge.br_name) + LOG.info("Cleaning stale %s flows", bridge.br_name) bridge.cleanup_flows() def process_port_info(self, start, polling_manager, sync, ovs_restarted, @@ -1839,14 +1838,14 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, # details regarding polling in BasePollingManager subclasses if sync or not (hasattr(polling_manager, 'get_events')): if sync: - LOG.info(_LI("Agent out of sync with plugin!")) + LOG.info("Agent out of sync with plugin!") consecutive_resyncs = consecutive_resyncs + 1 if (consecutive_resyncs >= constants.MAX_DEVICE_RETRIES): - LOG.warning(_LW( + LOG.warning( "Clearing cache of registered ports," - " retries to resync were > %s"), - constants.MAX_DEVICE_RETRIES) + " retries to resync were > %s", + constants.MAX_DEVICE_RETRIES) ports.clear() ancillary_ports.clear() consecutive_resyncs = 0 @@ -1923,9 +1922,9 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, retries = failed_devices_retries_map.get(dev, 0) if retries >= constants.MAX_DEVICE_RETRIES: devices_not_to_retry.add(dev) - LOG.warning(_LW( + LOG.warning( "Device %(dev)s failed for %(times)s times and won't " - "be retried anymore"), { + "be retried anymore", { 'dev': dev, 'times': constants.MAX_DEVICE_RETRIES}) else: new_failed_devices_retries_map[dev] = retries + 1 @@ -1971,7 +1970,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, failed_devices_retries_map = {} while self._check_and_handle_signal(): if self.fullsync: - LOG.info(_LI("rpc_loop doing a full sync.")) + LOG.info("rpc_loop doing a full sync.") sync = True self.fullsync = False port_info = {} @@ -2021,8 +2020,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, try: tunnel_sync = self.tunnel_sync() except Exception: - LOG.exception( - _LE("Error while configuring tunnel endpoints")) + LOG.exception("Error while configuring tunnel endpoints") tunnel_sync = True ovs_restarted |= (ovs_status == constants.OVS_RESTARTED) devices_need_retry = (any(failed_devices.values()) or @@ -2098,7 +2096,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, ovs_restarted = False self._dispose_local_vlan_hints() except Exception: - LOG.exception(_LE("Error while processing VIF ports")) + LOG.exception("Error while processing VIF ports") # Put the ports back in self.updated_port self.updated_ports |= updated_ports_copy sync = True @@ -2107,7 +2105,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, def daemon_loop(self): # Start everything. - LOG.info(_LI("Agent initialized successfully, now running... ")) + LOG.info("Agent initialized successfully, now running... ") signal.signal(signal.SIGTERM, self._handle_sigterm) if hasattr(signal, 'SIGHUP'): signal.signal(signal.SIGHUP, self._handle_sighup) @@ -2121,7 +2119,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, self.catch_sigterm = True if self.quitting_rpc_timeout: LOG.info( - _LI('SIGTERM received, capping RPC timeout by %d seconds.'), + 'SIGTERM received, capping RPC timeout by %d seconds.', self.quitting_rpc_timeout) self.set_rpc_timeout(self.quitting_rpc_timeout) @@ -2130,11 +2128,11 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, def _check_and_handle_signal(self): if self.catch_sigterm: - LOG.info(_LI("Agent caught SIGTERM, quitting daemon loop.")) + LOG.info("Agent caught SIGTERM, quitting daemon loop.") self.run_daemon_loop = False self.catch_sigterm = False if self.catch_sighup: - LOG.info(_LI("Agent caught SIGHUP, resetting.")) + LOG.info("Agent caught SIGHUP, resetting.") self.conf.reload_config_files() config.setup_logging() LOG.debug('Full set of CONF:') @@ -2159,8 +2157,8 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, def validate_local_ip(local_ip): """Verify if the ip exists on the agent's host.""" if not ip_lib.IPWrapper().get_device_by_ip(local_ip): - LOG.error(_LE("Tunneling can't be enabled with invalid local_ip '%s'." - " IP couldn't be found on this host's interfaces."), + LOG.error("Tunneling can't be enabled with invalid local_ip '%s'." + " IP couldn't be found on this host's interfaces.", local_ip) raise SystemExit(1) @@ -2173,7 +2171,7 @@ def validate_tunnel_config(tunnel_types, local_ip): validate_local_ip(local_ip) for tun in tunnel_types: if tun not in constants.TUNNEL_NETWORK_TYPES: - LOG.error(_LE('Invalid tunnel type specified: %s'), tun) + LOG.error('Invalid tunnel type specified: %s', tun) raise SystemExit(1) @@ -2198,6 +2196,6 @@ def main(bridge_classes): agent = OVSNeutronAgent(bridge_classes, cfg.CONF) capabilities.notify_init_event(n_const.AGENT_TYPE_OVS, agent) except (RuntimeError, ValueError) as e: - LOG.error(_LE("%s Agent terminated!"), e) + LOG.error("%s Agent terminated!", e) sys.exit(1) agent.daemon_loop() diff --git a/neutron/plugins/ml2/drivers/type_flat.py b/neutron/plugins/ml2/drivers/type_flat.py index 7f493b05ac4..856d5034f72 100644 --- a/neutron/plugins/ml2/drivers/type_flat.py +++ b/neutron/plugins/ml2/drivers/type_flat.py @@ -18,7 +18,7 @@ from neutron_lib.plugins.ml2 import api from oslo_config import cfg from oslo_log import log -from neutron._i18n import _, _LI, _LW +from neutron._i18n import _ from neutron.common import exceptions as n_exc from neutron.conf.plugins.ml2.drivers import driver_type from neutron.db import api as db_api @@ -50,19 +50,19 @@ class FlatTypeDriver(helpers.BaseTypeDriver): def _parse_networks(self, entries): self.flat_networks = entries if '*' in self.flat_networks: - LOG.info(_LI("Arbitrary flat physical_network names allowed")) + LOG.info("Arbitrary flat physical_network names allowed") self.flat_networks = None elif not self.flat_networks: - LOG.info(_LI("Flat networks are disabled")) + LOG.info("Flat networks are disabled") else: - LOG.info(_LI("Allowable flat physical_network names: %s"), + LOG.info("Allowable flat physical_network names: %s", self.flat_networks) def get_type(self): return p_const.TYPE_FLAT def initialize(self): - LOG.info(_LI("ML2 FlatTypeDriver initialization complete")) + LOG.info("ML2 FlatTypeDriver initialization complete") def is_partial_segment(self, segment): return False @@ -116,8 +116,8 @@ class FlatTypeDriver(helpers.BaseTypeDriver): LOG.debug("Releasing flat network on physical network %s", physical_network) else: - LOG.warning(_LW( - "No flat network found on physical network %s"), + LOG.warning( + "No flat network found on physical network %s", physical_network) def get_mtu(self, physical_network): diff --git a/neutron/plugins/ml2/drivers/type_geneve.py b/neutron/plugins/ml2/drivers/type_geneve.py index c9de7a45a23..621fdf1b099 100644 --- a/neutron/plugins/ml2/drivers/type_geneve.py +++ b/neutron/plugins/ml2/drivers/type_geneve.py @@ -18,7 +18,6 @@ from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_log import log -from neutron._i18n import _LE from neutron.conf.plugins.ml2.drivers import driver_type from neutron.objects.plugins.ml2 import geneveallocation as geneve_obj from neutron.plugins.common import constants as p_const @@ -43,8 +42,7 @@ class GeneveTypeDriver(type_tunnel.EndpointTunnelTypeDriver): try: self._initialize(cfg.CONF.ml2_type_geneve.vni_ranges) except n_exc.NetworkTunnelRangeError: - LOG.error(_LE("Failed to parse vni_ranges. " - "Service terminated!")) + LOG.error("Failed to parse vni_ranges. Service terminated!") raise SystemExit() def get_endpoints(self): diff --git a/neutron/plugins/ml2/drivers/type_gre.py b/neutron/plugins/ml2/drivers/type_gre.py index 984d5a609cf..21ae52baff8 100644 --- a/neutron/plugins/ml2/drivers/type_gre.py +++ b/neutron/plugins/ml2/drivers/type_gre.py @@ -17,7 +17,6 @@ from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_log import log -from neutron._i18n import _LE from neutron.conf.plugins.ml2.drivers import driver_type from neutron.objects.plugins.ml2 import greallocation as gre_obj from neutron.plugins.common import constants as p_const @@ -41,8 +40,8 @@ class GreTypeDriver(type_tunnel.EndpointTunnelTypeDriver): try: self._initialize(cfg.CONF.ml2_type_gre.tunnel_id_ranges) except n_exc.NetworkTunnelRangeError: - LOG.exception(_LE("Failed to parse tunnel_id_ranges. " - "Service terminated!")) + LOG.exception("Failed to parse tunnel_id_ranges. " + "Service terminated!") raise SystemExit() def get_endpoints(self): diff --git a/neutron/plugins/ml2/drivers/type_local.py b/neutron/plugins/ml2/drivers/type_local.py index 4a7c4892650..b5d9acb9da3 100644 --- a/neutron/plugins/ml2/drivers/type_local.py +++ b/neutron/plugins/ml2/drivers/type_local.py @@ -17,7 +17,7 @@ from neutron_lib import exceptions as exc from neutron_lib.plugins.ml2 import api from oslo_log import log -from neutron._i18n import _, _LI +from neutron._i18n import _ from neutron.plugins.common import constants as p_const from neutron.plugins.ml2 import driver_api @@ -35,7 +35,7 @@ class LocalTypeDriver(driver_api.ML2TypeDriver): """ def __init__(self): - LOG.info(_LI("ML2 LocalTypeDriver initialization complete")) + LOG.info("ML2 LocalTypeDriver initialization complete") def get_type(self): return p_const.TYPE_LOCAL diff --git a/neutron/plugins/ml2/drivers/type_tunnel.py b/neutron/plugins/ml2/drivers/type_tunnel.py index d4cf1002636..4bf3e858b7c 100644 --- a/neutron/plugins/ml2/drivers/type_tunnel.py +++ b/neutron/plugins/ml2/drivers/type_tunnel.py @@ -27,7 +27,7 @@ import six from six import moves from sqlalchemy import or_ -from neutron._i18n import _, _LI, _LW +from neutron._i18n import _ from neutron.common import topics from neutron.db import api as db_api from neutron.objects import base as base_obj @@ -133,7 +133,7 @@ class _TunnelTypeDriverBase(helpers.SegmentTypeDriver): raise exc.NetworkTunnelRangeError(tunnel_range=entry, error=ex) plugin_utils.verify_tunnel_range(tunnel_range, self.get_type()) current_range.append(tunnel_range) - LOG.info(_LI("%(type)s ID ranges: %(range)s"), + LOG.info("%(type)s ID ranges: %(range)s", {'type': self.get_type(), 'range': current_range}) @db_api.retry_db_errors @@ -259,7 +259,7 @@ class TunnelTypeDriver(_TunnelTypeDriverBase): info) if not count: - LOG.warning(_LW("%(type)s tunnel %(id)s not found"), info) + LOG.warning("%(type)s tunnel %(id)s not found", info) def get_allocation(self, session, tunnel_id): return (session.query(self.model). @@ -327,7 +327,7 @@ class ML2TunnelTypeDriver(_TunnelTypeDriverBase): info) if not count: - LOG.warning(_LW("%(type)s tunnel %(id)s not found"), info) + LOG.warning("%(type)s tunnel %(id)s not found", info) @db_api.context_manager.reader def get_allocation(self, context, tunnel_id): @@ -385,7 +385,7 @@ class EndpointTunnelTypeDriver(ML2TunnelTypeDriver): except db_exc.DBDuplicateEntry: endpoint = (session.query(self.endpoint_model). filter_by(ip_address=ip).one()) - LOG.warning(_LW("Endpoint with ip %s already exists"), ip) + LOG.warning("Endpoint with ip %s already exists", ip) return endpoint @@ -450,8 +450,8 @@ class TunnelRpcCallbackMixin(object): driver.obj.delete_endpoint(ip_endpoint.ip_address) elif (ip_endpoint and ip_endpoint.host != host): LOG.info( - _LI("Tunnel IP %(ip)s was used by host %(host)s and " - "will be assigned to %(new_host)s"), + "Tunnel IP %(ip)s was used by host %(host)s and " + "will be assigned to %(new_host)s", {'ip': ip_endpoint.ip_address, 'host': ip_endpoint.host, 'new_host': host}) diff --git a/neutron/plugins/ml2/drivers/type_vlan.py b/neutron/plugins/ml2/drivers/type_vlan.py index bd6c3423de5..622854cb588 100644 --- a/neutron/plugins/ml2/drivers/type_vlan.py +++ b/neutron/plugins/ml2/drivers/type_vlan.py @@ -22,7 +22,7 @@ from oslo_config import cfg from oslo_log import log from six import moves -from neutron._i18n import _, _LE, _LI, _LW +from neutron._i18n import _ from neutron.conf.plugins.ml2.drivers import driver_type from neutron.db import api as db_api from neutron.objects.plugins.ml2 import vlanallocation as vlanalloc @@ -55,10 +55,10 @@ class VlanTypeDriver(helpers.SegmentTypeDriver): self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges( cfg.CONF.ml2_type_vlan.network_vlan_ranges) except Exception: - LOG.exception(_LE("Failed to parse network_vlan_ranges. " - "Service terminated!")) + LOG.exception("Failed to parse network_vlan_ranges. " + "Service terminated!") sys.exit(1) - LOG.info(_LI("Network VLAN ranges: %s"), self.network_vlan_ranges) + LOG.info("Network VLAN ranges: %s", self.network_vlan_ranges) @db_api.retry_db_errors def _sync_vlan_allocations(self): @@ -137,7 +137,7 @@ class VlanTypeDriver(helpers.SegmentTypeDriver): def initialize(self): self._sync_vlan_allocations() - LOG.info(_LI("VlanTypeDriver initialization complete")) + LOG.info("VlanTypeDriver initialization complete") def is_partial_segment(self, segment): return segment.get(api.SEGMENTATION_ID) is None @@ -242,8 +242,8 @@ class VlanTypeDriver(helpers.SegmentTypeDriver): 'physical_network': physical_network}) if not count: - LOG.warning(_LW("No vlan_id %(vlan_id)s found on physical " - "network %(physical_network)s"), + LOG.warning("No vlan_id %(vlan_id)s found on physical " + "network %(physical_network)s", {'vlan_id': vlan_id, 'physical_network': physical_network}) diff --git a/neutron/plugins/ml2/drivers/type_vxlan.py b/neutron/plugins/ml2/drivers/type_vxlan.py index 96934e88eae..5d464246637 100644 --- a/neutron/plugins/ml2/drivers/type_vxlan.py +++ b/neutron/plugins/ml2/drivers/type_vxlan.py @@ -17,7 +17,6 @@ from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_log import log -from neutron._i18n import _LE from neutron.conf.plugins.ml2.drivers import driver_type from neutron.objects.plugins.ml2 import vxlanallocation as vxlan_obj from neutron.plugins.common import constants as p_const @@ -41,8 +40,8 @@ class VxlanTypeDriver(type_tunnel.EndpointTunnelTypeDriver): try: self._initialize(cfg.CONF.ml2_type_vxlan.vni_ranges) except n_exc.NetworkTunnelRangeError: - LOG.exception(_LE("Failed to parse vni_ranges. " - "Service terminated!")) + LOG.exception("Failed to parse vni_ranges. " + "Service terminated!") raise SystemExit() def get_endpoints(self): diff --git a/neutron/plugins/ml2/extensions/dns_integration.py b/neutron/plugins/ml2/extensions/dns_integration.py index 2d4e3e56add..c44d7b11685 100644 --- a/neutron/plugins/ml2/extensions/dns_integration.py +++ b/neutron/plugins/ml2/extensions/dns_integration.py @@ -21,7 +21,6 @@ from neutron_lib.plugins import directory from oslo_config import cfg from oslo_log import log as logging -from neutron._i18n import _LE, _LI from neutron.db import segments_db from neutron.extensions import dns from neutron.objects import network as net_obj @@ -327,7 +326,7 @@ class DNSExtensionDriver(api.ExtensionDriver): class DNSExtensionDriverML2(DNSExtensionDriver): def initialize(self): - LOG.info(_LI("DNSExtensionDriverML2 initialization complete")) + LOG.info("DNSExtensionDriverML2 initialization complete") def _is_tunnel_tenant_network(self, provider_net): if provider_net['network_type'] == 'geneve': @@ -385,7 +384,7 @@ class DNSDomainPortsExtensionDriver(DNSExtensionDriverML2): return self._supported_extension_aliases def initialize(self): - LOG.info(_LI("DNSDomainPortsExtensionDriver initialization complete")) + LOG.info("DNSDomainPortsExtensionDriver initialization complete") def extend_port_dict(self, session, db_data, response_data): response_data = ( @@ -412,8 +411,8 @@ def _get_dns_driver(): cfg.CONF.external_dns_driver) return DNS_DRIVER except ImportError: - LOG.exception(_LE("ImportError exception occurred while loading " - "the external DNS service driver")) + LOG.exception("ImportError exception occurred while loading " + "the external DNS service driver") raise dns.ExternalDNSDriverNotFound( driver=cfg.CONF.external_dns_driver) @@ -440,9 +439,9 @@ def _send_data_to_external_dns_service(context, dns_driver, dns_domain, try: dns_driver.create_record_set(context, dns_domain, dns_name, records) except (dns.DNSDomainNotFound, dns.DuplicateRecordSet) as e: - LOG.exception(_LE("Error publishing port data in external DNS " - "service. Name: '%(name)s'. Domain: '%(domain)s'. " - "DNS service driver message '%(message)s'"), + LOG.exception("Error publishing port data in external DNS " + "service. Name: '%(name)s'. Domain: '%(domain)s'. " + "DNS service driver message '%(message)s'", {"name": dns_name, "domain": dns_domain, "message": e.msg}) @@ -453,10 +452,10 @@ def _remove_data_from_external_dns_service(context, dns_driver, dns_domain, try: dns_driver.delete_record_set(context, dns_domain, dns_name, records) except (dns.DNSDomainNotFound, dns.DuplicateRecordSet) as e: - LOG.exception(_LE("Error deleting port data from external DNS " - "service. Name: '%(name)s'. Domain: '%(domain)s'. " - "IP addresses '%(ips)s'. DNS service driver message " - "'%(message)s'"), + LOG.exception("Error deleting port data from external DNS " + "service. Name: '%(name)s'. Domain: '%(domain)s'. " + "IP addresses '%(ips)s'. DNS service driver message " + "'%(message)s'", {"name": dns_name, "domain": dns_domain, "message": e.msg, diff --git a/neutron/plugins/ml2/extensions/port_security.py b/neutron/plugins/ml2/extensions/port_security.py index 953032013e8..6b3f0da63a9 100644 --- a/neutron/plugins/ml2/extensions/port_security.py +++ b/neutron/plugins/ml2/extensions/port_security.py @@ -18,7 +18,6 @@ from neutron_lib.api import validators from neutron_lib.utils import net from oslo_log import log as logging -from neutron._i18n import _LI from neutron.db import common_db_mixin from neutron.db import portsecurity_db_common as ps_db_common from neutron.plugins.ml2 import driver_api as api @@ -32,7 +31,7 @@ class PortSecurityExtensionDriver(api.ExtensionDriver, _supported_extension_alias = 'port-security' def initialize(self): - LOG.info(_LI("PortSecurityExtensionDriver initialization complete")) + LOG.info("PortSecurityExtensionDriver initialization complete") @property def extension_alias(self): diff --git a/neutron/plugins/ml2/managers.py b/neutron/plugins/ml2/managers.py index e968c9967ab..0a1ebc6a31e 100644 --- a/neutron/plugins/ml2/managers.py +++ b/neutron/plugins/ml2/managers.py @@ -24,7 +24,7 @@ from oslo_log import log from oslo_utils import excutils import stevedore -from neutron._i18n import _, _LC, _LE, _LI, _LW +from neutron._i18n import _ from neutron.db import api as db_api from neutron.db import segments_db from neutron.extensions import external_net @@ -46,12 +46,12 @@ class TypeManager(stevedore.named.NamedExtensionManager): # Mapping from type name to DriverManager self.drivers = {} - LOG.info(_LI("Configured type driver names: %s"), + LOG.info("Configured type driver names: %s", cfg.CONF.ml2.type_drivers) super(TypeManager, self).__init__('neutron.ml2.type_drivers', cfg.CONF.ml2.type_drivers, invoke_on_load=True) - LOG.info(_LI("Loaded type driver names: %s"), self.names()) + LOG.info("Loaded type driver names: %s", self.names()) self._register_types() self._check_tenant_network_types(cfg.CONF.ml2.tenant_network_types) self._check_external_network_type(cfg.CONF.ml2.external_network_type) @@ -60,15 +60,15 @@ class TypeManager(stevedore.named.NamedExtensionManager): for ext in self: network_type = ext.obj.get_type() if network_type in self.drivers: - LOG.error(_LE("Type driver '%(new_driver)s' ignored because" - " type driver '%(old_driver)s' is already" - " registered for type '%(type)s'"), + LOG.error("Type driver '%(new_driver)s' ignored because" + " type driver '%(old_driver)s' is already" + " registered for type '%(type)s'", {'new_driver': ext.name, 'old_driver': self.drivers[network_type].name, 'type': network_type}) else: self.drivers[network_type] = ext - LOG.info(_LI("Registered types: %s"), self.drivers.keys()) + LOG.info("Registered types: %s", self.drivers.keys()) def _check_tenant_network_types(self, types): self.tenant_network_types = [] @@ -76,15 +76,15 @@ class TypeManager(stevedore.named.NamedExtensionManager): if network_type in self.drivers: self.tenant_network_types.append(network_type) else: - LOG.error(_LE("No type driver for tenant network_type: %s. " - "Service terminated!"), network_type) + LOG.error("No type driver for tenant network_type: %s. " + "Service terminated!", network_type) raise SystemExit(1) - LOG.info(_LI("Tenant network_types: %s"), self.tenant_network_types) + LOG.info("Tenant network_types: %s", self.tenant_network_types) def _check_external_network_type(self, ext_network_type): if ext_network_type and ext_network_type not in self.drivers: - LOG.error(_LE("No type driver for external network_type: %s. " - "Service terminated!"), ext_network_type) + LOG.error("No type driver for external network_type: %s. " + "Service terminated!", ext_network_type) raise SystemExit(1) def _process_provider_segment(self, segment): @@ -181,7 +181,7 @@ class TypeManager(stevedore.named.NamedExtensionManager): def initialize(self): for network_type, driver in self.drivers.items(): - LOG.info(_LI("Initializing driver for type '%s'"), network_type) + LOG.info("Initializing driver for type '%s'", network_type) driver.obj.initialize() def _add_network_segment(self, context, network_id, segment, @@ -293,8 +293,8 @@ class TypeManager(stevedore.named.NamedExtensionManager): else: driver.obj.release_segment(context, segment) else: - LOG.error(_LE("Failed to release segment '%s' because " - "network type is not supported."), segment) + LOG.error("Failed to release segment '%s' because " + "network type is not supported.", segment) def allocate_dynamic_segment(self, context, network_id, segment): """Allocate a dynamic segment using a partial or full segment dict.""" @@ -328,8 +328,8 @@ class TypeManager(stevedore.named.NamedExtensionManager): driver.obj.release_segment(context, segment) segments_db.delete_network_segment(context, segment_id) else: - LOG.error(_LE("Failed to release segment '%s' because " - "network type is not supported."), segment) + LOG.error("Failed to release segment '%s' because " + "network type is not supported.", segment) else: LOG.debug("No segment found with id %(segment_id)s", segment_id) @@ -344,7 +344,7 @@ class MechanismManager(stevedore.named.NamedExtensionManager): # the order in which the drivers are called. self.ordered_mech_drivers = [] - LOG.info(_LI("Configured mechanism driver names: %s"), + LOG.info("Configured mechanism driver names: %s", cfg.CONF.ml2.mechanism_drivers) super(MechanismManager, self).__init__( 'neutron.ml2.mechanism_drivers', @@ -354,12 +354,12 @@ class MechanismManager(stevedore.named.NamedExtensionManager): on_missing_entrypoints_callback=self._driver_not_found, on_load_failure_callback=self._driver_not_loaded ) - LOG.info(_LI("Loaded mechanism driver names: %s"), self.names()) + LOG.info("Loaded mechanism driver names: %s", self.names()) self._register_mechanisms() self.host_filtering_supported = self.is_host_filtering_supported() if not self.host_filtering_supported: - LOG.info(_LI("No mechanism drivers provide segment reachability " - "information for agent scheduling.")) + LOG.info("No mechanism drivers provide segment reachability " + "information for agent scheduling.") def _driver_not_found(self, names): msg = (_("The following mechanism drivers were not found: %s") @@ -368,8 +368,8 @@ class MechanismManager(stevedore.named.NamedExtensionManager): raise SystemExit(msg) def _driver_not_loaded(self, manager, entrypoint, exception): - LOG.critical(_LC("The '%(entrypoint)s' entrypoint could not be" - " loaded for the following reason: '%(reason)s'."), + LOG.critical("The '%(entrypoint)s' entrypoint could not be" + " loaded for the following reason: '%(reason)s'.", {'entrypoint': entrypoint, 'reason': exception}) raise SystemExit(str(exception)) @@ -383,12 +383,12 @@ class MechanismManager(stevedore.named.NamedExtensionManager): for ext in self: self.mech_drivers[ext.name] = ext self.ordered_mech_drivers.append(ext) - LOG.info(_LI("Registered mechanism drivers: %s"), + LOG.info("Registered mechanism drivers: %s", [driver.name for driver in self.ordered_mech_drivers]) def initialize(self): for driver in self.ordered_mech_drivers: - LOG.info(_LI("Initializing mechanism driver '%s'"), driver.name) + LOG.info("Initializing mechanism driver '%s'", driver.name) driver.obj.initialize() def _check_vlan_transparency(self, context): @@ -432,7 +432,7 @@ class MechanismManager(stevedore.named.NamedExtensionManager): {'name': driver.name, 'method': method_name}, exc_info=e) LOG.exception( - _LE("Mechanism driver '%(name)s' failed in %(method)s"), + "Mechanism driver '%(name)s' failed in %(method)s", {'name': driver.name, 'method': method_name} ) errors.append(e) @@ -746,9 +746,9 @@ class MechanismManager(stevedore.named.NamedExtensionManager): if not self._bind_port_level(context, 0, context.network.network_segments): binding.vif_type = portbindings.VIF_TYPE_BINDING_FAILED - LOG.error(_LE("Failed to bind port %(port)s on host %(host)s " - "for vnic_type %(vnic_type)s using segments " - "%(segments)s"), + LOG.error("Failed to bind port %(port)s on host %(host)s " + "for vnic_type %(vnic_type)s using segments " + "%(segments)s", {'port': context.current['id'], 'host': context.host, 'vnic_type': binding.vnic_type, @@ -765,8 +765,8 @@ class MechanismManager(stevedore.named.NamedExtensionManager): 'segments': segments_to_bind}) if level == MAX_BINDING_LEVELS: - LOG.error(_LE("Exceeded maximum binding levels attempting to bind " - "port %(port)s on host %(host)s"), + LOG.error("Exceeded maximum binding levels attempting to bind " + "port %(port)s on host %(host)s", {'port': context.current['id'], 'host': context.host}) return False @@ -793,8 +793,8 @@ class MechanismManager(stevedore.named.NamedExtensionManager): next_segments): return True else: - LOG.warning(_LW("Failed to bind port %(port)s on " - "host %(host)s at level %(lvl)s"), + LOG.warning("Failed to bind port %(port)s on " + "host %(host)s at level %(lvl)s", {'port': context.current['id'], 'host': context.host, 'lvl': level + 1}) @@ -813,8 +813,8 @@ class MechanismManager(stevedore.named.NamedExtensionManager): 'binding_levels': context.binding_levels}) return True except Exception: - LOG.exception(_LE("Mechanism driver %s failed in " - "bind_port"), + LOG.exception("Mechanism driver %s failed in " + "bind_port", driver.name) def is_host_filtering_supported(self): @@ -873,13 +873,13 @@ class ExtensionManager(stevedore.named.NamedExtensionManager): # the order in which the drivers are called. self.ordered_ext_drivers = [] - LOG.info(_LI("Configured extension driver names: %s"), + LOG.info("Configured extension driver names: %s", cfg.CONF.ml2.extension_drivers) super(ExtensionManager, self).__init__('neutron.ml2.extension_drivers', cfg.CONF.ml2.extension_drivers, invoke_on_load=True, name_order=True) - LOG.info(_LI("Loaded extension driver names: %s"), self.names()) + LOG.info("Loaded extension driver names: %s", self.names()) self._register_drivers() def _register_drivers(self): @@ -890,13 +890,13 @@ class ExtensionManager(stevedore.named.NamedExtensionManager): """ for ext in self: self.ordered_ext_drivers.append(ext) - LOG.info(_LI("Registered extension drivers: %s"), + LOG.info("Registered extension drivers: %s", [driver.name for driver in self.ordered_ext_drivers]) def initialize(self): # Initialize each driver in the list. for driver in self.ordered_ext_drivers: - LOG.info(_LI("Initializing extension driver '%s'"), driver.name) + LOG.info("Initializing extension driver '%s'", driver.name) driver.obj.initialize() def extension_aliases(self): @@ -907,7 +907,7 @@ class ExtensionManager(stevedore.named.NamedExtensionManager): if not alias: continue exts.append(alias) - LOG.info(_LI("Got %(alias)s extension from driver '%(drv)s'"), + LOG.info("Got %(alias)s extension from driver '%(drv)s'", {'alias': alias, 'drv': driver.name}) return exts @@ -918,8 +918,8 @@ class ExtensionManager(stevedore.named.NamedExtensionManager): getattr(driver.obj, method_name)(plugin_context, data, result) except Exception: with excutils.save_and_reraise_exception(): - LOG.info(_LI("Extension driver '%(name)s' failed in " - "%(method)s"), + LOG.info("Extension driver '%(name)s' failed in " + "%(method)s", {'name': driver.name, 'method': method_name}) def process_create_network(self, plugin_context, data, result): @@ -957,8 +957,8 @@ class ExtensionManager(stevedore.named.NamedExtensionManager): try: getattr(driver.obj, method_name)(session, base_model, result) except Exception: - LOG.exception(_LE("Extension driver '%(name)s' failed in " - "%(method)s"), + LOG.exception("Extension driver '%(name)s' failed in " + "%(method)s", {'name': driver.name, 'method': method_name}) raise ml2_exc.ExtensionDriverError(driver=driver.name) diff --git a/neutron/plugins/ml2/plugin.py b/neutron/plugins/ml2/plugin.py index 21e84d0075d..138a469ae0f 100644 --- a/neutron/plugins/ml2/plugin.py +++ b/neutron/plugins/ml2/plugin.py @@ -42,7 +42,7 @@ from oslo_utils import uuidutils import sqlalchemy from sqlalchemy.orm import exc as sa_exc -from neutron._i18n import _, _LE, _LI, _LW +from neutron._i18n import _ from neutron.agent import securitygroups_rpc as sg_rpc from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api from neutron.api.rpc.handlers import dhcp_rpc @@ -194,7 +194,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, self.add_agent_status_check_worker(self.agent_health_check) self.add_workers(self.mechanism_manager.get_workers()) self._verify_service_plugins_requirements() - LOG.info(_LI("Modular L2 Plugin initialization complete")) + LOG.info("Modular L2 Plugin initialization complete") def _setup_rpc(self): """Initialize components to support agent communication.""" @@ -380,7 +380,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, # multiple attempts shouldn't happen very often so we log each # attempt after the 1st. - LOG.info(_LI("Attempt %(count)s to bind port %(port)s"), + LOG.info("Attempt %(count)s to bind port %(port)s", {'count': count, 'port': context.current['id']}) bind_context, need_notify, try_again = self._attempt_binding( @@ -403,8 +403,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, self._notify_port_updated(context) return context - LOG.error(_LE("Failed to commit binding results for %(port)s " - "after %(max)s tries"), + LOG.error("Failed to commit binding results for %(port)s " + "after %(max)s tries", {'port': context.current['id'], 'max': MAX_BIND_TRIES}) return context @@ -598,8 +598,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, try: return jsonutils.loads(binding.vif_details) except Exception: - LOG.error(_LE("Serialized vif_details DB value '%(value)s' " - "for port %(port)s is invalid"), + LOG.error("Serialized vif_details DB value '%(value)s' " + "for port %(port)s is invalid", {'value': binding.vif_details, 'port': binding.port_id}) return {} @@ -609,8 +609,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, try: return jsonutils.loads(binding.profile) except Exception: - LOG.error(_LE("Serialized profile DB value '%(value)s' for " - "port %(port)s is invalid"), + LOG.error("Serialized profile DB value '%(value)s' for " + "port %(port)s is invalid", {'value': binding.profile, 'port': binding.port_id}) return {} @@ -675,10 +675,10 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, try: delete_op(context, obj['result']['id']) except KeyError: - LOG.exception(_LE("Could not find %s to delete."), + LOG.exception("Could not find %s to delete.", resource) except Exception: - LOG.exception(_LE("Could not delete %(res)s %(id)s."), + LOG.exception("Could not delete %(res)s %(id)s.", {'res': resource, 'id': obj['result']['id']}) @@ -702,8 +702,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, except Exception as e: with excutils.save_and_reraise_exception(): utils.attach_exc_details( - e, _LE("An exception occurred while creating " - "the %(resource)s:%(item)s"), + e, ("An exception occurred while creating " + "the %(resource)s:%(item)s"), {'resource': resource, 'item': item}) postcommit_op = getattr(self, '_after_create_%s' % resource) @@ -713,10 +713,10 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, except Exception: with excutils.save_and_reraise_exception(): resource_ids = [res['result']['id'] for res in objects] - LOG.exception(_LE("ML2 _after_create_%(res)s " - "failed for %(res)s: " - "'%(failed_id)s'. Deleting " - "%(res)ss %(resource_ids)s"), + LOG.exception("ML2 _after_create_%(res)s " + "failed for %(res)s: " + "'%(failed_id)s'. Deleting " + "%(res)ss %(resource_ids)s", {'res': resource, 'failed_id': obj['result']['id'], 'resource_ids': ', '.join(resource_ids)}) @@ -747,10 +747,10 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, # trigger the exception here. if segment_type and s['segmentation_id']: LOG.warning( - _LW("Failed to determine MTU for segment " - "%(segment_type)s:%(segment_id)s; network " - "%(network_id)s MTU calculation may be not " - "accurate"), + "Failed to determine MTU for segment " + "%(segment_type)s:%(segment_id)s; network " + "%(network_id)s MTU calculation may be not " + "accurate", { 'segment_type': segment_type, 'segment_id': s['segmentation_id'], @@ -840,8 +840,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, self.mechanism_manager.create_network_postcommit(mech_context) except ml2_exc.MechanismDriverError: with excutils.save_and_reraise_exception(): - LOG.error(_LE("mechanism_manager.create_network_postcommit " - "failed, deleting network '%s'"), result['id']) + LOG.error("mechanism_manager.create_network_postcommit " + "failed, deleting network '%s'", result['id']) self.delete_network(context, result['id']) return result @@ -1007,8 +1007,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, # TODO(apech) - One or more mechanism driver failed to # delete the network. Ideally we'd notify the caller of # the fact that an error occurred. - LOG.error(_LE("mechanism_manager.delete_network_postcommit" - " failed")) + LOG.error("mechanism_manager.delete_network_postcommit" + " failed") self.notifier.network_delete(context, network['id']) def _before_create_subnet(self, context, subnet): @@ -1054,8 +1054,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, self.mechanism_manager.create_subnet_postcommit(mech_context) except ml2_exc.MechanismDriverError: with excutils.save_and_reraise_exception(): - LOG.error(_LE("mechanism_manager.create_subnet_postcommit " - "failed, deleting subnet '%s'"), result['id']) + LOG.error("mechanism_manager.create_subnet_postcommit " + "failed, deleting subnet '%s'", result['id']) self.delete_subnet(context, result['id']) return result @@ -1118,7 +1118,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, # TODO(apech) - One or more mechanism driver failed to # delete the subnet. Ideally we'd notify the caller of # the fact that an error occurred. - LOG.error(_LE("mechanism_manager.delete_subnet_postcommit failed")) + LOG.error("mechanism_manager.delete_subnet_postcommit failed") # TODO(yalei) - will be simplified after security group and address pair be # converted to ext driver too. @@ -1216,15 +1216,15 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, self.mechanism_manager.create_port_postcommit(mech_context) except ml2_exc.MechanismDriverError: with excutils.save_and_reraise_exception(): - LOG.error(_LE("mechanism_manager.create_port_postcommit " - "failed, deleting port '%s'"), result['id']) + LOG.error("mechanism_manager.create_port_postcommit " + "failed, deleting port '%s'", result['id']) self.delete_port(context, result['id'], l3_port_check=False) try: bound_context = self._bind_port_if_needed(mech_context) except ml2_exc.MechanismDriverError: with excutils.save_and_reraise_exception(): - LOG.error(_LE("_bind_port_if_needed " - "failed, deleting port '%s'"), result['id']) + LOG.error("_bind_port_if_needed " + "failed, deleting port '%s'", result['id']) self.delete_port(context, result['id'], l3_port_check=False) return bound_context.current @@ -1394,8 +1394,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, self.mechanism_manager.update_port_postcommit( mech_context) except ml2_exc.MechanismDriverError: - LOG.error(_LE("mechanism_manager.update_port_postcommit " - "failed for port %s"), id) + LOG.error("mechanism_manager.update_port_postcommit " + "failed for port %s", id) self.check_and_notify_security_group_member_changed( context, original_port, updated_port) @@ -1445,7 +1445,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, host_set = validators.is_attr_set(host) if not host_set: - LOG.error(_LE("No Host supplied to bind DVR Port %s"), id) + LOG.error("No Host supplied to bind DVR Port %s", id) return binding = db.get_distributed_port_binding_by_host(context, @@ -1560,8 +1560,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, # TODO(apech) - One or more mechanism driver failed to # delete the port. Ideally we'd notify the caller of the # fact that an error occurred. - LOG.error(_LE("mechanism_manager.delete_port_postcommit failed for" - " port %s"), port['id']) + LOG.error("mechanism_manager.delete_port_postcommit failed for" + " port %s", port['id']) self.notifier.port_delete(context, port['id']) @utils.transaction_guard @@ -1578,11 +1578,11 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, filter(models_v2.Port.id.startswith(port_id)). one()) except sa_exc.NoResultFound: - LOG.info(_LI("No ports have port_id starting with %s"), + LOG.info("No ports have port_id starting with %s", port_id) return except sa_exc.MultipleResultsFound: - LOG.error(_LE("Multiple ports have port_id starting with %s"), + LOG.error("Multiple ports have port_id starting with %s", port_id) return port = self._make_port_dict(port_db) @@ -1595,7 +1595,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, binding = db.get_distributed_port_binding_by_host( plugin_context, port['id'], host) if not binding: - LOG.error(_LE("Binding info for DVR port %s not found"), + LOG.error("Binding info for DVR port %s not found", port_id) return None levels = db.get_binding_levels(plugin_context, @@ -1609,8 +1609,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, # It's not an error condition. binding = port_db.port_binding if not binding: - LOG.info(_LI("Binding info for port %s was not found, " - "it might have been deleted already."), + LOG.info("Binding info for port %s was not found, " + "it might have been deleted already.", port_id) return levels = db.get_binding_levels(plugin_context, port_db.id, @@ -1653,8 +1653,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, binding = port_db.port_binding bindlevelhost_match = binding.host if binding else None if not binding: - LOG.info(_LI("Binding info for port %s was not found, " - "it might have been deleted already."), + LOG.info("Binding info for port %s was not found, " + "it might have been deleted already.", port_id) result[dev_id] = None continue @@ -1754,7 +1754,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, with db_api.context_manager.writer.using(context): port = db.get_port(context, port_id) if not port: - LOG.warning(_LW("Port %s not found during update"), + LOG.warning("Port %s not found during update", port_id) return original_port = self._make_port_dict(port) @@ -1888,9 +1888,9 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, event, context, network_id) except ml2_exc.MechanismDriverError: with excutils.save_and_reraise_exception(): - LOG.error(_LE("mechanism_manager error occurred when " - "handle event %(event)s for segment " - "'%(segment)s'"), + LOG.error("mechanism_manager error occurred when " + "handle event %(event)s for segment " + "'%(segment)s'", {'event': event, 'segment': segment['id']}) def _notify_mechanism_driver_for_segment_change(self, event, diff --git a/neutron/plugins/ml2/rpc.py b/neutron/plugins/ml2/rpc.py index cd27928a632..198851d1012 100644 --- a/neutron/plugins/ml2/rpc.py +++ b/neutron/plugins/ml2/rpc.py @@ -24,7 +24,6 @@ from oslo_log import log import oslo_messaging from sqlalchemy.orm import exc -from neutron._i18n import _LE, _LW from neutron.api.rpc.handlers import dvr_rpc from neutron.api.rpc.handlers import securitygroups_rpc as sg_rpc from neutron.common import rpc as n_rpc @@ -116,9 +115,9 @@ class RpcCallbacks(type_tunnel.TunnelRpcCallbackMixin): port = port_context.current if not segment: - LOG.warning(_LW("Device %(device)s requested by agent " - "%(agent_id)s on network %(network_id)s not " - "bound, vif_type: %(vif_type)s"), + LOG.warning("Device %(device)s requested by agent " + "%(agent_id)s on network %(network_id)s not " + "bound, vif_type: %(vif_type)s", {'device': device, 'agent_id': agent_id, 'network_id': port['network_id'], @@ -187,7 +186,7 @@ class RpcCallbacks(type_tunnel.TunnelRpcCallbackMixin): device=device, port_context=bound_contexts[device])) except Exception: - LOG.exception(_LE("Failed to get details for device %s"), + LOG.exception("Failed to get details for device %s", device) failed_devices.append(device) new_status_map = {ctxt.current['id']: self._get_new_status(host, ctxt) @@ -346,7 +345,7 @@ class RpcCallbacks(type_tunnel.TunnelRpcCallbackMixin): **kwargs) except Exception: failed_devices_up.append(device) - LOG.error(_LE("Failed to update device %s up"), device) + LOG.error("Failed to update device %s up", device) else: devices_up.append(device) @@ -360,7 +359,7 @@ class RpcCallbacks(type_tunnel.TunnelRpcCallbackMixin): **kwargs) except Exception: failed_devices_down.append(device) - LOG.error(_LE("Failed to update device %s down"), device) + LOG.error("Failed to update device %s down", device) else: devices_down.append(dev) diff --git a/neutron/policy.py b/neutron/policy.py index 6dcae218b86..51e9c3ef2cd 100644 --- a/neutron/policy.py +++ b/neutron/policy.py @@ -27,7 +27,7 @@ from oslo_policy import policy from oslo_utils import excutils import six -from neutron._i18n import _, _LE, _LW +from neutron._i18n import _ from neutron.api.v2 import attributes from neutron.common import cache_utils as cache from neutron.common import constants as const @@ -116,8 +116,7 @@ def _build_subattr_match_rule(attr_name, attr, action, target): validate = attr['validate'] key = [k for k in validate.keys() if k.startswith('type:dict')] if not key: - LOG.warning(_LW("Unable to find data type descriptor " - "for attribute %s"), + LOG.warning("Unable to find data type descriptor for attribute %s", attr_name) return data = validate[key[0]] @@ -235,7 +234,7 @@ class OwnerCheck(policy.Check): raise db_exc.RetryRequest(e) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Policy check error while calling %s!'), f) + LOG.exception('Policy check error while calling %s!', f) return data[field] def __call__(self, target, creds, enforcer): diff --git a/neutron/privileged/agent/linux/netlink_lib.py b/neutron/privileged/agent/linux/netlink_lib.py index 307930b7ae8..df0c22e181e 100644 --- a/neutron/privileged/agent/linux/netlink_lib.py +++ b/neutron/privileged/agent/linux/netlink_lib.py @@ -42,7 +42,7 @@ import re from neutron_lib import constants from oslo_log import log as logging -from neutron._i18n import _, _LW +from neutron._i18n import _ from neutron.common import exceptions from neutron import privileged from neutron.privileged.agent.linux import netlink_constants as nl_constants @@ -167,7 +167,7 @@ class ConntrackManager(object): result = nfct.nfct_query(self.conntrack_handler, query_type, query_data) if result == nl_constants.NFCT_CB_FAILURE: - LOG.warning(_LW("Netlink query failed")) + LOG.warning("Netlink query failed") def _convert_text_to_binary(self, source, addr_family): dest = ctypes.create_string_buffer( diff --git a/neutron/quota/__init__.py b/neutron/quota/__init__.py index 7a535d312d6..3ca015f202c 100644 --- a/neutron/quota/__init__.py +++ b/neutron/quota/__init__.py @@ -24,7 +24,7 @@ from oslo_utils import importutils import six import webob -from neutron._i18n import _, _LI, _LW +from neutron._i18n import _ from neutron.common import exceptions from neutron.conf import quota from neutron.db.quota import api as quota_api @@ -173,18 +173,18 @@ class QuotaEngine(object): QUOTA_DB_MODULE not in sys.modules): # If quotas table is not loaded, force config quota driver. _driver_class = QUOTA_CONF_DRIVER - LOG.info(_LI("ConfDriver is used as quota_driver because the " - "loaded plugin does not support 'quotas' table.")) + LOG.info("ConfDriver is used as quota_driver because the " + "loaded plugin does not support 'quotas' table.") if isinstance(_driver_class, six.string_types): _driver_class = importutils.import_object(_driver_class) if isinstance(_driver_class, ConfDriver): versionutils.report_deprecated_feature( - LOG, _LW("The quota driver neutron.quota.ConfDriver is " - "deprecated as of Liberty. " - "neutron.db.quota.driver.DbQuotaDriver should " - "be used in its place")) + LOG, ("The quota driver neutron.quota.ConfDriver is " + "deprecated as of Liberty. " + "neutron.db.quota.driver.DbQuotaDriver should " + "be used in its place")) self._driver = _driver_class - LOG.info(_LI('Loaded quota_driver: %s.'), _driver_class) + LOG.info('Loaded quota_driver: %s.', _driver_class) return self._driver def count(self, context, resource_name, *args, **kwargs): diff --git a/neutron/quota/resource.py b/neutron/quota/resource.py index d80a052fb49..456a556d2b1 100644 --- a/neutron/quota/resource.py +++ b/neutron/quota/resource.py @@ -18,7 +18,6 @@ from oslo_utils import excutils from sqlalchemy import exc as sql_exc from sqlalchemy.orm import session as se -from neutron._i18n import _LE, _LW from neutron.db import api as db_api from neutron.db.quota import api as quota_api @@ -199,8 +198,8 @@ class TrackedResource(BaseResource): tenant_id = target['tenant_id'] except AttributeError: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Model class %s does not have a tenant_id " - "attribute"), target) + LOG.error("Model class %s does not have a tenant_id " + "attribute", target) self._dirty_tenants.add(tenant_id) # Retry the operation if a duplicate entry exception is raised. This @@ -300,10 +299,10 @@ class TrackedResource(BaseResource): def _except_bulk_delete(self, delete_context): if delete_context.mapper.class_ == self._model_class: - raise RuntimeError(_LE("%s may not be deleted in bulk because " - "it is tracked by the quota engine via " - "SQLAlchemy event handlers, which are not " - "compatible with bulk deletes.") % + raise RuntimeError("%s may not be deleted in bulk because " + "it is tracked by the quota engine via " + "SQLAlchemy event handlers, which are not " + "compatible with bulk deletes." % self._model_class) def register_events(self): @@ -321,5 +320,5 @@ class TrackedResource(BaseResource): db_api.sqla_remove(se.Session, 'after_bulk_delete', self._except_bulk_delete) except sql_exc.InvalidRequestError: - LOG.warning(_LW("No sqlalchemy event for resource %s found"), + LOG.warning("No sqlalchemy event for resource %s found", self.name) diff --git a/neutron/quota/resource_registry.py b/neutron/quota/resource_registry.py index 145ba8daf07..9a666545d87 100644 --- a/neutron/quota/resource_registry.py +++ b/neutron/quota/resource_registry.py @@ -14,7 +14,7 @@ from oslo_config import cfg from oslo_log import log import six -from neutron._i18n import _, _LI, _LW +from neutron._i18n import _ from neutron.db import api as db_api from neutron.quota import resource @@ -172,14 +172,14 @@ class ResourceRegistry(object): if (not cfg.CONF.QUOTAS.track_quota_usage or resource_name not in self._tracked_resource_mappings): - LOG.info(_LI("Creating instance of CountableResource for " - "resource:%s"), resource_name) + LOG.info("Creating instance of CountableResource for " + "resource:%s", resource_name) return resource.CountableResource( resource_name, resource._count_resource, 'quota_%s' % resource_name) else: - LOG.info(_LI("Creating instance of TrackedResource for " - "resource:%s"), resource_name) + LOG.info("Creating instance of TrackedResource for " + "resource:%s", resource_name) return resource.TrackedResource( resource_name, self._tracked_resource_mappings[resource_name], @@ -223,7 +223,7 @@ class ResourceRegistry(object): def register_resource(self, resource): if resource.name in self._resources: - LOG.warning(_LW('%s is already registered'), resource.name) + LOG.warning('%s is already registered', resource.name) if resource.name in self._tracked_resource_mappings: resource.register_events() self._resources[resource.name] = resource diff --git a/neutron/scheduler/dhcp_agent_scheduler.py b/neutron/scheduler/dhcp_agent_scheduler.py index 78ab21754d7..166993d2623 100644 --- a/neutron/scheduler/dhcp_agent_scheduler.py +++ b/neutron/scheduler/dhcp_agent_scheduler.py @@ -23,7 +23,6 @@ from oslo_db import exception as db_exc from oslo_log import log as logging from sqlalchemy import sql -from neutron._i18n import _LI, _LW from neutron.agent.common import utils as agent_utils from neutron.db import api as db_api from neutron.db.models import agent as agent_model @@ -74,8 +73,7 @@ class AutoScheduler(object): for dhcp_agent in dhcp_agents: if agent_utils.is_agent_down( dhcp_agent.heartbeat_timestamp): - LOG.warning(_LW('DHCP agent %s is not active'), - dhcp_agent.id) + LOG.warning('DHCP agent %s is not active', dhcp_agent.id) continue for net_id, is_routed_network in net_ids.items(): agents = plugin.get_dhcp_agents_hosting_networks( @@ -194,7 +192,7 @@ class DhcpFilter(base_resource_filter.BaseResourceFilter): except db_exc.DBDuplicateEntry: # it's totally ok, someone just did our job! bound_agents.remove(agent) - LOG.info(_LI('Agent %s already present'), agent_id) + LOG.info('Agent %s already present', agent_id) LOG.debug('Network %(network_id)s is scheduled to be ' 'hosted by DHCP agent %(agent_id)s', {'network_id': network_id, @@ -256,7 +254,7 @@ class DhcpFilter(base_resource_filter.BaseResourceFilter): active_dhcp_agents = plugin.get_agents_db( context, filters=filters) if not active_dhcp_agents: - LOG.warning(_LW('No more DHCP agents')) + LOG.warning('No more DHCP agents') return [] return active_dhcp_agents diff --git a/neutron/scheduler/l3_agent_scheduler.py b/neutron/scheduler/l3_agent_scheduler.py index c7ef191bf7f..d30a2c03ea2 100644 --- a/neutron/scheduler/l3_agent_scheduler.py +++ b/neutron/scheduler/l3_agent_scheduler.py @@ -26,7 +26,6 @@ from oslo_log import log as logging from oslo_log import versionutils import six -from neutron._i18n import _LW from neutron.common import utils from neutron.db import api as db_api from neutron.db import l3_hamode_db @@ -94,9 +93,9 @@ class L3Scheduler(object): if router_ids is not None: versionutils.report_deprecated_feature( LOG, - _LW('Passing router_ids has no effect on L3 agent ' - 'scheduling. This is deprecated and will be ' - 'removed in the Queens release.')) + 'Passing router_ids has no effect on L3 agent ' + 'scheduling. This is deprecated and will be ' + 'removed in the Queens release.') l3_agent = plugin.get_enabled_agent_on_host( context, lib_const.AGENT_TYPE_L3, host) @@ -144,13 +143,13 @@ class L3Scheduler(object): active_l3_agents = plugin.get_l3_agents(context, active=True) if not active_l3_agents: - LOG.warning(_LW('No active L3 agents')) + LOG.warning('No active L3 agents') return [] candidates = plugin.get_l3_agent_candidates(context, sync_router, active_l3_agents) if not candidates: - LOG.warning(_LW('No L3 agents can host the router %s'), + LOG.warning('No L3 agents can host the router %s', sync_router['id']) return candidates diff --git a/neutron/server/rpc_eventlet.py b/neutron/server/rpc_eventlet.py index 7278a0fd1ab..93af4d48f13 100644 --- a/neutron/server/rpc_eventlet.py +++ b/neutron/server/rpc_eventlet.py @@ -20,7 +20,6 @@ from oslo_log import log -from neutron._i18n import _LI from neutron import manager from neutron import service @@ -28,13 +27,13 @@ LOG = log.getLogger(__name__) def eventlet_rpc_server(): - LOG.info(_LI("Eventlet based AMQP RPC server starting...")) + LOG.info("Eventlet based AMQP RPC server starting...") try: manager.init() rpc_workers_launcher = service.start_all_workers() except NotImplementedError: - LOG.info(_LI("RPC was already started in parent process by " - "plugin.")) + LOG.info("RPC was already started in parent process by " + "plugin.") else: rpc_workers_launcher.wait() diff --git a/neutron/server/wsgi_eventlet.py b/neutron/server/wsgi_eventlet.py index e72796b250a..afd58bcca69 100644 --- a/neutron/server/wsgi_eventlet.py +++ b/neutron/server/wsgi_eventlet.py @@ -15,7 +15,6 @@ import eventlet from oslo_log import log -from neutron._i18n import _LI from neutron import service LOG = log.getLogger(__name__) @@ -41,7 +40,7 @@ def start_api_and_rpc_workers(neutron_api): pool.waitall() except NotImplementedError: - LOG.info(_LI("RPC was already started in parent process by " - "plugin.")) + LOG.info("RPC was already started in parent process by " + "plugin.") neutron_api.wait() diff --git a/neutron/service.py b/neutron/service.py index 47db6a8440a..29d47ae2f41 100644 --- a/neutron/service.py +++ b/neutron/service.py @@ -32,7 +32,6 @@ from oslo_service import service as common_service from oslo_utils import excutils from oslo_utils import importutils -from neutron._i18n import _LE, _LI from neutron.common import config from neutron.common import profiler from neutron.common import rpc as n_rpc @@ -87,8 +86,8 @@ def serve_wsgi(cls): service.start() except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Unrecoverable error: please check log ' - 'for details.')) + LOG.exception('Unrecoverable error: please check log ' + 'for details.') registry.notify(resources.PROCESS, events.BEFORE_SPAWN, service) return service @@ -120,7 +119,7 @@ class RpcWorker(neutron_worker.BaseWorker): try: self._wait() except Exception: - LOG.exception(_LE('done with wait')) + LOG.exception('done with wait') raise def _wait(self): @@ -163,8 +162,8 @@ def _get_rpc_workers(): if not plugin.rpc_workers_supported(): LOG.debug("Active plugin doesn't implement start_rpc_listeners") if 0 < cfg.CONF.rpc_workers: - LOG.error(_LE("'rpc_workers = %d' ignored because " - "start_rpc_listeners is not implemented."), + LOG.error("'rpc_workers = %d' ignored because " + "start_rpc_listeners is not implemented.", cfg.CONF.rpc_workers) raise NotImplementedError() @@ -256,8 +255,8 @@ def _start_workers(workers): return worker_launcher except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Unrecoverable error: please check log for ' - 'details.')) + LOG.exception('Unrecoverable error: please check log for ' + 'details.') def start_all_workers(): @@ -289,7 +288,7 @@ def _get_api_workers(): def _run_wsgi(app_name): app = config.load_paste_app(app_name) if not app: - LOG.error(_LE('No known API applications configured.')) + LOG.error('No known API applications configured.') return return run_wsgi_app(app) @@ -298,7 +297,7 @@ def run_wsgi_app(app): server = wsgi.Server("Neutron") server.start(app, cfg.CONF.bind_port, cfg.CONF.bind_host, workers=_get_api_workers()) - LOG.info(_LI("Neutron service started, listening on %(host)s:%(port)s"), + LOG.info("Neutron service started, listening on %(host)s:%(port)s", {'host': cfg.CONF.bind_host, 'port': cfg.CONF.bind_port}) return server @@ -399,7 +398,7 @@ class Service(n_rpc.Service): try: x.stop() except Exception: - LOG.exception(_LE("Exception occurs when timer stops")) + LOG.exception("Exception occurs when timer stops") self.timers = [] def wait(self): @@ -408,7 +407,7 @@ class Service(n_rpc.Service): try: x.wait() except Exception: - LOG.exception(_LE("Exception occurs when waiting for timer")) + LOG.exception("Exception occurs when waiting for timer") def reset(self): config.reset_service() diff --git a/neutron/services/auto_allocate/db.py b/neutron/services/auto_allocate/db.py index d8f6fdf97b8..2e1aeed4fd2 100644 --- a/neutron/services/auto_allocate/db.py +++ b/neutron/services/auto_allocate/db.py @@ -23,7 +23,7 @@ from neutron_lib.plugins import constants from neutron_lib.plugins import directory from oslo_log import log as logging -from neutron._i18n import _, _LE +from neutron._i18n import _ from neutron.common import exceptions as c_exc from neutron.db import _resource_extend as resource_extend from neutron.db import _utils as db_utils @@ -170,8 +170,8 @@ class AutoAllocatedTopologyMixin(common_db_mixin.CommonDbMixin): except exceptions.UnknownProvisioningError as e: # Clean partially provisioned topologies, and reraise the # error. If it can be retried, so be it. - LOG.error(_LE("Unknown error while provisioning topology for " - "tenant %(tenant_id)s. Reason: %(reason)s"), + LOG.error("Unknown error while provisioning topology for " + "tenant %(tenant_id)s. Reason: %(reason)s", {'tenant_id': tenant_id, 'reason': e}) self._cleanup( context, network_id=e.network_id, @@ -228,14 +228,14 @@ class AutoAllocatedTopologyMixin(common_db_mixin.CommonDbMixin): context, is_default=True) if not default_external_networks: - LOG.error(_LE("Unable to find default external network " - "for deployment, please create/assign one to " - "allow auto-allocation to work correctly.")) + LOG.error("Unable to find default external network " + "for deployment, please create/assign one to " + "allow auto-allocation to work correctly.") raise exceptions.AutoAllocationFailure( reason=_("No default router:external network")) if len(default_external_networks) > 1: - LOG.error(_LE("Multiple external default networks detected. " - "Network %s is true 'default'."), + LOG.error("Multiple external default networks detected. " + "Network %s is true 'default'.", default_external_networks[0]['network_id']) return default_external_networks[0].network_id @@ -249,7 +249,7 @@ class AutoAllocatedTopologyMixin(common_db_mixin.CommonDbMixin): s for s in default_subnet_pools if s ] if not available_pools: - LOG.error(_LE("No default pools available")) + LOG.error("No default pools available") raise n_exc.NotFound() return available_pools @@ -280,9 +280,9 @@ class AutoAllocatedTopologyMixin(common_db_mixin.CommonDbMixin): return subnets except (c_exc.SubnetAllocationError, ValueError, n_exc.BadRequest, n_exc.NotFound) as e: - LOG.error(_LE("Unable to auto allocate topology for tenant " - "%(tenant_id)s due to missing or unmet " - "requirements. Reason: %(reason)s"), + LOG.error("Unable to auto allocate topology for tenant " + "%(tenant_id)s due to missing or unmet " + "requirements. Reason: %(reason)s", {'tenant_id': tenant_id, 'reason': e}) if network: self._cleanup(context, network['id']) @@ -312,9 +312,9 @@ class AutoAllocatedTopologyMixin(common_db_mixin.CommonDbMixin): attached_subnets.append(subnet) return router except n_exc.BadRequest as e: - LOG.error(_LE("Unable to auto allocate topology for tenant " - "%(tenant_id)s because of router errors. " - "Reason: %(reason)s"), + LOG.error("Unable to auto allocate topology for tenant " + "%(tenant_id)s because of router errors. " + "Reason: %(reason)s", {'tenant_id': tenant_id, 'reason': e}) router_id = router['id'] if router else None self._cleanup(context, diff --git a/neutron/services/l3_router/l3_router_plugin.py b/neutron/services/l3_router/l3_router_plugin.py index 287284c40f1..7be653a9795 100644 --- a/neutron/services/l3_router/l3_router_plugin.py +++ b/neutron/services/l3_router/l3_router_plugin.py @@ -21,7 +21,6 @@ from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import importutils -from neutron._i18n import _LI from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api from neutron.api.rpc.handlers import l3_rpc from neutron.common import rpc as n_rpc @@ -47,7 +46,7 @@ LOG = logging.getLogger(__name__) def disable_dvr_extension_by_config(aliases): if not cfg.CONF.enable_dvr: - LOG.info(_LI('Disabled DVR extension.')) + LOG.info('Disabled DVR extension.') if 'dvr' in aliases: aliases.remove('dvr') diff --git a/neutron/services/metering/agents/metering_agent.py b/neutron/services/metering/agents/metering_agent.py index c3456eb3478..af4e2215538 100644 --- a/neutron/services/metering/agents/metering_agent.py +++ b/neutron/services/metering/agents/metering_agent.py @@ -24,8 +24,7 @@ from oslo_service import periodic_task from oslo_service import service from oslo_utils import timeutils -from neutron.services.metering.drivers import utils as driverutils -from neutron._i18n import _, _LE, _LI, _LW +from neutron._i18n import _ from neutron.agent import rpc as agent_rpc from neutron.common import config as common_config from neutron.common import constants as n_const @@ -36,6 +35,7 @@ from neutron.conf.agent import common as config from neutron.conf.services import metering_agent from neutron import manager from neutron import service as neutron_service +from neutron.services.metering.drivers import utils as driverutils LOG = logging.getLogger(__name__) @@ -59,7 +59,7 @@ class MeteringPluginRpc(object): return cctxt.call(context, 'get_sync_data_metering', host=self.host) except Exception: - LOG.exception(_LE("Failed synchronizing routers")) + LOG.exception("Failed synchronizing routers") class MeteringAgent(MeteringPluginRpc, manager.Manager): @@ -83,7 +83,7 @@ class MeteringAgent(MeteringPluginRpc, manager.Manager): def _load_drivers(self): """Loads plugin-driver from configuration.""" - LOG.info(_LI("Loading Metering driver %s"), self.conf.driver) + LOG.info("Loading Metering driver %s", self.conf.driver) if not self.conf.driver: raise SystemExit(_('A metering driver must be specified')) self.metering_driver = driverutils.load_metering_driver(self, @@ -165,11 +165,11 @@ class MeteringAgent(MeteringPluginRpc, manager.Manager): try: return getattr(self.metering_driver, func_name)(context, meterings) except AttributeError: - LOG.exception(_LE("Driver %(driver)s does not implement %(func)s"), + LOG.exception("Driver %(driver)s does not implement %(func)s", {'driver': self.conf.driver, 'func': func_name}) except RuntimeError: - LOG.exception(_LE("Driver %(driver)s:%(func)s runtime error"), + LOG.exception("Driver %(driver)s:%(func)s runtime error", {'driver': self.conf.driver, 'func': func_name}) @@ -274,15 +274,15 @@ class MeteringAgentWithStateReport(MeteringAgent): self.use_call = False except AttributeError: # This means the server does not support report_state - LOG.warning(_LW("Neutron server does not support state report. " - "State report for this agent will be disabled.")) + LOG.warning("Neutron server does not support state report. " + "State report for this agent will be disabled.") self.heartbeat.stop() return except Exception: - LOG.exception(_LE("Failed reporting state!")) + LOG.exception("Failed reporting state!") def agent_updated(self, context, payload): - LOG.info(_LI("agent_updated by server side %s!"), payload) + LOG.info("agent_updated by server side %s!", payload) def main(): diff --git a/neutron/services/metering/drivers/iptables/iptables_driver.py b/neutron/services/metering/drivers/iptables/iptables_driver.py index 76dc23ff945..7f99c3403bc 100644 --- a/neutron/services/metering/drivers/iptables/iptables_driver.py +++ b/neutron/services/metering/drivers/iptables/iptables_driver.py @@ -17,7 +17,7 @@ from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import importutils -from neutron._i18n import _, _LE, _LI +from neutron._i18n import _ from neutron.agent.l3 import dvr_snat_ns from neutron.agent.l3 import namespaces from neutron.agent.linux import interface @@ -111,8 +111,7 @@ class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): if not self.conf.interface_driver: raise SystemExit(_('An interface driver must be specified')) - LOG.info(_LI("Loading interface driver %s"), - self.conf.interface_driver) + LOG.info("Loading interface driver %s", self.conf.interface_driver) self.driver = importutils.import_object(self.conf.interface_driver, self.conf) @@ -419,8 +418,8 @@ class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): chain_acc = rm.iptables_manager.get_traffic_counters( chain, wrap=False, zero=True) except RuntimeError: - LOG.exception(_LE('Failed to get traffic counters, ' - 'router: %s'), router) + LOG.exception('Failed to get traffic counters, ' + 'router: %s', router) routers_to_reconfigure.add(router['id']) continue diff --git a/neutron/services/metering/drivers/utils.py b/neutron/services/metering/drivers/utils.py index cdf6c933027..b5406574093 100644 --- a/neutron/services/metering/drivers/utils.py +++ b/neutron/services/metering/drivers/utils.py @@ -15,7 +15,6 @@ from oslo_log import log as logging -from neutron._i18n import _LE from neutron.common import utils as utils @@ -37,6 +36,5 @@ def load_metering_driver(plugin, conf): METERING_NAMESPACE, conf.driver) return loaded_class(plugin, conf) except ImportError: - LOG.error(_LE("Error loading metering driver '%s'"), - conf.driver) + LOG.error("Error loading metering driver '%s'", conf.driver) raise SystemExit(1) diff --git a/neutron/services/provider_configuration.py b/neutron/services/provider_configuration.py index ad4321ae2ef..97b42006eca 100644 --- a/neutron/services/provider_configuration.py +++ b/neutron/services/provider_configuration.py @@ -24,7 +24,7 @@ from oslo_log import log as logging from oslo_log import versionutils import stevedore -from neutron._i18n import _, _LW +from neutron._i18n import _ from neutron.conf.services import provider_configuration as prov_config from neutron.db import _utils as db_utils @@ -120,9 +120,9 @@ class NeutronModule(object): if providers: versionutils.report_deprecated_feature( LOG, - _LW('Implicit loading of service providers from ' - 'neutron_*.conf files is deprecated and will be ' - 'removed in Ocata release.')) + 'Implicit loading of service providers from ' + 'neutron_*.conf files is deprecated and will be ' + 'removed in Ocata release.') return providers @@ -148,10 +148,10 @@ def get_provider_driver_class(driver, namespace=SERVICE_PROVIDERS): return driver new_driver = "%s.%s" % (driver_manager.__module__, driver_manager.__name__) - LOG.warning(_LW( + LOG.warning( "The configured driver %(driver)s has been moved, automatically " "using %(new_driver)s instead. Please update your config files, " - "as this automatic fixup will be removed in a future release."), + "as this automatic fixup will be removed in a future release.", {'driver': driver, 'new_driver': new_driver}) return new_driver diff --git a/neutron/services/qos/drivers/manager.py b/neutron/services/qos/drivers/manager.py index 9474f4ea572..db676e2a15f 100644 --- a/neutron/services/qos/drivers/manager.py +++ b/neutron/services/qos/drivers/manager.py @@ -15,7 +15,6 @@ from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from oslo_log import log as logging -from neutron._i18n import _LW from neutron.api.rpc.callbacks import events as rpc_events from neutron.api.rpc.callbacks.producer import registry as rpc_registry from neutron.api.rpc.callbacks import resources @@ -53,10 +52,9 @@ class QosServiceDriverManager(object): def _get_qos_policy_cb(resource, policy_id, **kwargs): context = kwargs.get('context') if context is None: - LOG.warning(_LW( - 'Received %(resource)s %(policy_id)s without context'), - {'resource': resource, 'policy_id': policy_id} - ) + LOG.warning( + 'Received %(resource)s %(policy_id)s without context', + {'resource': resource, 'policy_id': policy_id}) return policy = policy_object.QosPolicy.get_object(context, id=policy_id) diff --git a/neutron/services/revisions/revision_plugin.py b/neutron/services/revisions/revision_plugin.py index d321abfe43b..9115cf5ac9c 100644 --- a/neutron/services/revisions/revision_plugin.py +++ b/neutron/services/revisions/revision_plugin.py @@ -18,7 +18,7 @@ from sqlalchemy.orm import exc from sqlalchemy.orm import session as se import webob.exc -from neutron._i18n import _, _LW +from neutron._i18n import _ from neutron.db import _resource_extend as resource_extend from neutron.db import api as db_api from neutron.db import standard_attr @@ -62,8 +62,8 @@ class RevisionPlugin(service_base.ServicePluginBase): try: related_obj = self._find_related_obj(session, obj, revises_col) if not related_obj: - LOG.warning(_LW("Could not find related %(col)s for " - "resource %(obj)s to bump revision."), + LOG.warning("Could not find related %(col)s for " + "resource %(obj)s to bump revision.", {'obj': obj, 'col': revises_col}) continue # if related object revises others, bump those as well diff --git a/neutron/services/segments/plugin.py b/neutron/services/segments/plugin.py index e6465c4ab01..99871f93743 100644 --- a/neutron/services/segments/plugin.py +++ b/neutron/services/segments/plugin.py @@ -29,7 +29,7 @@ from novaclient import exceptions as nova_exc from oslo_config import cfg from oslo_log import log -from neutron._i18n import _, _LE, _LI +from neutron._i18n import _ from neutron.common import exceptions as n_exc from neutron.db import _resource_extend as resource_extend from neutron.db import api as db_api @@ -209,8 +209,8 @@ class NovaSegmentNotifier(object): except n_exc.PlacementInventoryUpdateConflict: LOG.debug('Re-trying to update Nova IPv4 inventory for ' 'routed network segment: %s', event.segment_id) - LOG.error(_LE('Failed to update Nova IPv4 inventory for routed ' - 'network segment: %s'), event.segment_id) + LOG.error('Failed to update Nova IPv4 inventory for routed ' + 'network segment: %s', event.segment_id) def _create_nova_inventory(self, segment_id, total, reserved, segment_host_mappings): @@ -326,16 +326,16 @@ class NovaSegmentNotifier(object): try: aggregate_id = self._get_aggregate_id(segment_id) except n_exc.PlacementAggregateNotFound: - LOG.info(_LI('When adding host %(host)s, aggregate not found ' - 'for routed network segment %(segment_id)s'), + LOG.info('When adding host %(host)s, aggregate not found ' + 'for routed network segment %(segment_id)s', {'host': event.host, 'segment_id': segment_id}) continue try: self.n_client.aggregates.add_host(aggregate_id, event.host) except nova_exc.Conflict: - LOG.info(_LI('Host %(host)s already exists in aggregate for ' - 'routed network segment %(segment_id)s'), + LOG.info('Host %(host)s already exists in aggregate for ' + 'routed network segment %(segment_id)s', {'host': event.host, 'segment_id': segment_id}) @registry.receives(resources.PORT, diff --git a/neutron/services/service_base.py b/neutron/services/service_base.py index 21b34ad54ff..cefd4deb96b 100644 --- a/neutron/services/service_base.py +++ b/neutron/services/service_base.py @@ -17,7 +17,6 @@ from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import importutils -from neutron._i18n import _LE, _LI from neutron.db import servicetype_db as sdb from neutron.services import provider_configuration as pconf @@ -36,7 +35,7 @@ def load_drivers(service_type, plugin): filters={'service_type': [service_type]}) ) if not providers: - msg = (_LE("No providers specified for '%s' service, exiting") % + msg = ("No providers specified for '%s' service, exiting" % service_type) LOG.error(msg) raise SystemExit(1) @@ -53,8 +52,8 @@ def load_drivers(service_type, plugin): 'service_type': service_type}) except ImportError: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Error loading provider '%(provider)s' for " - "service %(service_type)s"), + LOG.exception("Error loading provider '%(provider)s' for " + "service %(service_type)s", {'provider': provider['driver'], 'service_type': service_type}) @@ -64,7 +63,7 @@ def load_drivers(service_type, plugin): None, service_type) default_provider = provider['name'] except pconf.DefaultServiceProviderNotFound: - LOG.info(_LI("Default provider is not specified for service type %s"), + LOG.info("Default provider is not specified for service type %s", service_type) return drivers, default_provider diff --git a/neutron/services/timestamp/timestamp_db.py b/neutron/services/timestamp/timestamp_db.py index d1602939807..d1dd417d15c 100644 --- a/neutron/services/timestamp/timestamp_db.py +++ b/neutron/services/timestamp/timestamp_db.py @@ -16,7 +16,6 @@ from neutron_lib import exceptions as n_exc from oslo_utils import timeutils from sqlalchemy.orm import session as se -from neutron._i18n import _LW from neutron.db import _model_query as model_query from neutron.db import _resource_extend as resource_extend from neutron.db import api as db_api @@ -39,8 +38,8 @@ def _change_since_result_filter_hook(query, filters): try: changed_since_string = timeutils.parse_isotime(data) except Exception: - msg = _LW("The input %s must be in the " - "following format: YYYY-MM-DDTHH:MM:SSZ") % CHANGED_SINCE + msg = ("The input %s must be in the " + "following format: YYYY-MM-DDTHH:MM:SSZ") % CHANGED_SINCE raise n_exc.InvalidInput(error_message=msg) changed_since = (timeutils. normalize_time(changed_since_string)) diff --git a/neutron/services/trunk/drivers/linuxbridge/agent/driver.py b/neutron/services/trunk/drivers/linuxbridge/agent/driver.py index 0eb2d2044fc..1fb765239a6 100644 --- a/neutron/services/trunk/drivers/linuxbridge/agent/driver.py +++ b/neutron/services/trunk/drivers/linuxbridge/agent/driver.py @@ -17,7 +17,6 @@ from neutron_lib.callbacks import resources as local_resources from oslo_log import log as logging import oslo_messaging -from neutron._i18n import _LE from neutron.api.rpc.callbacks import events from neutron.api.rpc.handlers import resources_rpc from neutron.services.trunk import constants as t_const @@ -113,8 +112,7 @@ class LinuxBridgeTrunkDriver(trunk_rpc.TrunkSkeleton): LOG.debug("Trunk %s removed during wiring", trunk.port_id) return # something broke - LOG.exception(_LE("Failure setting up subports for %s"), - trunk.port_id) + LOG.exception("Failure setting up subports for %s", trunk.port_id) self._tapi.set_trunk_status(context, trunk, t_const.DEGRADED_STATUS) diff --git a/neutron/services/trunk/drivers/linuxbridge/agent/trunk_plumber.py b/neutron/services/trunk/drivers/linuxbridge/agent/trunk_plumber.py index 75ace4a2cbf..90bb94a7c8f 100644 --- a/neutron/services/trunk/drivers/linuxbridge/agent/trunk_plumber.py +++ b/neutron/services/trunk/drivers/linuxbridge/agent/trunk_plumber.py @@ -16,7 +16,6 @@ from oslo_concurrency import lockutils from oslo_log import log as logging from oslo_utils import excutils -from neutron._i18n import _LW from neutron.agent.linux import ip_lib from neutron.common import utils from neutron.plugins.ml2.drivers.linuxbridge.agent.common import utils as lutil @@ -141,7 +140,7 @@ class _InterfaceInfo(object): name_section = line.split(': ')[1] except IndexError: name_section = None - LOG.warning(_LW("Bad interface line: %s"), line) + LOG.warning("Bad interface line: %s", line) if not name_section or '@' not in name_section: self.devname = name_section self.parent_devname = self.vlan_tag = None diff --git a/neutron/services/trunk/drivers/openvswitch/agent/driver.py b/neutron/services/trunk/drivers/openvswitch/agent/driver.py index c8e668a18f7..2b9d518136f 100644 --- a/neutron/services/trunk/drivers/openvswitch/agent/driver.py +++ b/neutron/services/trunk/drivers/openvswitch/agent/driver.py @@ -15,7 +15,6 @@ from oslo_config import cfg from oslo_log import log as logging import oslo_messaging -from neutron._i18n import _LE, _LW from neutron.api.rpc.callbacks.consumer import registry from neutron.api.rpc.callbacks import events from neutron.api.rpc.callbacks import resources @@ -56,7 +55,7 @@ class OVSTrunkSkeleton(agent.TrunkSkeleton): if self.ovsdb_handler.manages_this_trunk(trunk_id): if event_type not in (events.CREATED, events.DELETED): - LOG.error(_LE("Unknown or unimplemented event %s"), event_type) + LOG.error("Unknown or unimplemented event %s", event_type) return ctx = self.ovsdb_handler.context @@ -71,9 +70,9 @@ class OVSTrunkSkeleton(agent.TrunkSkeleton): trunk_id, subport_ids) self.ovsdb_handler.report_trunk_status(ctx, trunk_id, status) except oslo_messaging.MessagingException as e: - LOG.error(_LE( + LOG.error( "Error on event %(event)s for subports " - "%(subports)s: %(err)s"), + "%(subports)s: %(err)s", {'event': event_type, 'subports': subports, 'err': e}) @local_registry.receives(constants.TRUNK, [local_events.BEFORE_CREATE]) @@ -87,9 +86,9 @@ class OVSTrunkSkeleton(agent.TrunkSkeleton): # only if the trunk is indeed associated with ports that have security # groups and QoS rules, though this would be a lot more work. if "iptables_hybrid" in cfg.CONF.SECURITYGROUP.firewall_driver: - LOG.warning(_LW( + LOG.warning( "Firewall driver iptables_hybrid is not compatible with " - "trunk ports. Trunk %(trunk_id)s may be insecure."), + "trunk ports. Trunk %(trunk_id)s may be insecure.", {'trunk_id': kwargs['trunk'].id}) diff --git a/neutron/services/trunk/drivers/openvswitch/agent/ovsdb_handler.py b/neutron/services/trunk/drivers/openvswitch/agent/ovsdb_handler.py index a0d0861825f..5d3738e405c 100644 --- a/neutron/services/trunk/drivers/openvswitch/agent/ovsdb_handler.py +++ b/neutron/services/trunk/drivers/openvswitch/agent/ovsdb_handler.py @@ -25,7 +25,7 @@ from oslo_log import log as logging import oslo_messaging from oslo_serialization import jsonutils -from neutron._i18n import _, _LE +from neutron._i18n import _ from neutron.agent.common import ovs_lib from neutron.api.rpc.handlers import resources_rpc from neutron.common import utils as common_utils @@ -85,8 +85,8 @@ def bridge_has_port(bridge, is_port_predicate): try: ifaces = bridge.get_iface_name_list() except RuntimeError as e: - LOG.error(_LE("Cannot obtain interface list for bridge %(bridge)s: " - "%(err)s"), + LOG.error("Cannot obtain interface list for bridge %(bridge)s: " + "%(err)s", {'bridge': bridge.br_name, 'err': e}) return False @@ -183,13 +183,13 @@ class OVSDBHandler(object): try: self._wire_trunk(bridge, self._get_parent_port(bridge), rewire) except oslo_messaging.MessagingException as e: - LOG.error(_LE("Got messaging error while processing trunk bridge " - "%(bridge_name)s: %(err)s"), + LOG.error("Got messaging error while processing trunk bridge " + "%(bridge_name)s: %(err)s", {'bridge_name': bridge.br_name, 'err': e}) except exceptions.ParentPortNotFound as e: - LOG.error(_LE("Failed to get parent port for bridge " - "%(bridge_name)s: %(err)s"), + LOG.error("Failed to get parent port for bridge " + "%(bridge_name)s: %(err)s", {'bridge_name': bridge.br_name, 'err': e}) @@ -217,7 +217,7 @@ class OVSDBHandler(object): self.unwire_subports_for_trunk(trunk_id, subport_ids) self.trunk_manager.remove_trunk(trunk_id, parent_port_id) except tman.TrunkManagerError as te: - LOG.error(_LE("Removing trunk %(trunk_id)s failed: %(err)s"), + LOG.error("Removing trunk %(trunk_id)s failed: %(err)s", {'trunk_id': port['external_ids']['trunk_id'], 'err': te}) else: @@ -241,8 +241,8 @@ class OVSDBHandler(object): for port in ports if is_subport(port['name']) ] except (RuntimeError, tman.TrunkManagerError) as e: - LOG.error(_LE("Failed to get subports for bridge %(bridge)s: " - "%(err)s"), {'bridge': bridge.br_name, 'err': e}) + LOG.error("Failed to get subports for bridge %(bridge)s: " + "%(err)s", {'bridge': bridge.br_name, 'err': e}) return [] def wire_subports_for_trunk(self, context, trunk_id, subports, @@ -262,9 +262,9 @@ class OVSDBHandler(object): subports_mac[subport.port_id], subport.segmentation_id) except tman.TrunkManagerError as te: - LOG.error(_LE("Failed to add subport with port ID " - "%(subport_port_id)s to trunk with ID " - "%(trunk_id)s: %(err)s"), + LOG.error("Failed to add subport with port ID " + "%(subport_port_id)s to trunk with ID " + "%(trunk_id)s: %(err)s", {'subport_port_id': subport.port_id, 'trunk_id': trunk_id, 'err': te}) @@ -275,8 +275,8 @@ class OVSDBHandler(object): self._update_trunk_metadata( trunk_bridge, parent_port, trunk_id, subport_ids) except (RuntimeError, exceptions.ParentPortNotFound) as e: - LOG.error(_LE("Failed to store metadata for trunk %(trunk_id)s: " - "%(reason)s"), {'trunk_id': trunk_id, 'reason': e}) + LOG.error("Failed to store metadata for trunk %(trunk_id)s: " + "%(reason)s", {'trunk_id': trunk_id, 'reason': e}) # NOTE(status_police): Trunk bridge has stale metadata now, it # might cause troubles during deletion. Signal a DEGRADED status; # if the user undo/redo the operation things may go back to @@ -294,8 +294,8 @@ class OVSDBHandler(object): self.trunk_manager.remove_sub_port(trunk_id, subport_id) ids.append(subport_id) except tman.TrunkManagerError as te: - LOG.error(_LE("Removing subport %(subport_id)s from trunk " - "%(trunk_id)s failed: %(err)s"), + LOG.error("Removing subport %(subport_id)s from trunk " + "%(trunk_id)s failed: %(err)s", {'subport_id': subport_id, 'trunk_id': trunk_id, 'err': te}) @@ -310,8 +310,8 @@ class OVSDBHandler(object): # might cause troubles during deletion. Signal a DEGRADED status; # if the user undo/redo the operation things may go back to # normal. - LOG.error(_LE("Failed to store metadata for trunk %(trunk_id)s: " - "%(reason)s"), {'trunk_id': trunk_id, 'reason': e}) + LOG.error("Failed to store metadata for trunk %(trunk_id)s: " + "%(reason)s", {'trunk_id': trunk_id, 'reason': e}) return constants.DEGRADED_STATUS except exceptions.ParentPortNotFound as e: # If a user deletes/migrates a VM and remove subports from a trunk @@ -356,11 +356,11 @@ class OVSDBHandler(object): self.trunk_manager.get_port_uuid_from_external_ids(port)) trunk = self.trunk_rpc.get_trunk_details(ctx, parent_port_id) except tman.TrunkManagerError as te: - LOG.error(_LE("Can't obtain parent port ID from port %s"), + LOG.error("Can't obtain parent port ID from port %s", port['name']) return except resources_rpc.ResourceNotFound: - LOG.error(_LE("Port %s has no trunk associated."), parent_port_id) + LOG.error("Port %s has no trunk associated.", parent_port_id) return try: @@ -371,7 +371,7 @@ class OVSDBHandler(object): trunk.id, trunk.port_id, port['external_ids'].get('attached-mac')) except tman.TrunkManagerError as te: - LOG.error(_LE("Failed to create trunk %(trunk_id)s: %(err)s"), + LOG.error("Failed to create trunk %(trunk_id)s: %(err)s", {'trunk_id': trunk.id, 'err': te}) # NOTE(status_police): Trunk couldn't be created so it ends in @@ -476,8 +476,8 @@ class OVSDBHandler(object): return True except common_utils.WaitTimeout: LOG.error( - _LE('No port present on trunk bridge %(br_name)s ' - 'in %(timeout)d seconds.'), + 'No port present on trunk bridge %(br_name)s ' + 'in %(timeout)d seconds.', {'br_name': bridge.br_name, 'timeout': self.timeout}) return False diff --git a/neutron/services/trunk/drivers/openvswitch/agent/trunk_manager.py b/neutron/services/trunk/drivers/openvswitch/agent/trunk_manager.py index ce59442dd62..5cb5706dcf1 100644 --- a/neutron/services/trunk/drivers/openvswitch/agent/trunk_manager.py +++ b/neutron/services/trunk/drivers/openvswitch/agent/trunk_manager.py @@ -14,7 +14,7 @@ from neutron_lib import constants from neutron_lib import exceptions from oslo_log import log as logging -from neutron._i18n import _, _LE +from neutron._i18n import _ from neutron.agent.common import ovs_lib from neutron.services.trunk.drivers.openvswitch.agent import exceptions as exc from neutron.services.trunk.drivers.openvswitch import utils @@ -248,8 +248,8 @@ class TrunkManager(object): LOG.debug("Deleted bridge '%s' and patch peers '%s'.", trunk_bridge.br_name, patch_peers) except RuntimeError as e: - LOG.error(_LE("Could not delete '%(peers)s' associated to " - "trunk bridge %(name)s. Reason: %(reason)s."), + LOG.error("Could not delete '%(peers)s' associated to " + "trunk bridge %(name)s. Reason: %(reason)s.", {'peers': patch_peers, 'name': trunk_bridge.br_name, 'reason': e}) diff --git a/neutron/services/trunk/rpc/server.py b/neutron/services/trunk/rpc/server.py index efa85454ef5..d4297f8ae3b 100644 --- a/neutron/services/trunk/rpc/server.py +++ b/neutron/services/trunk/rpc/server.py @@ -20,7 +20,6 @@ from oslo_log import helpers as log_helpers from oslo_log import log as logging import oslo_messaging -from neutron._i18n import _LE from neutron.api.rpc.callbacks import events from neutron.api.rpc.callbacks.producer import registry from neutron.api.rpc.callbacks import resources @@ -130,7 +129,7 @@ class TrunkSkeleton(object): # of the port data structure. updated_ports.append(updated_port) except trunk_exc.SubPortBindingError as e: - LOG.error(_LE("Failed to bind subport: %s"), e) + LOG.error("Failed to bind subport: %s", e) # NOTE(status_police) The subport binding has failed in a # manner in which we cannot proceed and the user must take @@ -138,8 +137,8 @@ class TrunkSkeleton(object): trunk.update(status=trunk_consts.ERROR_STATUS) return [] except Exception as e: - msg = _LE("Failed to bind subport port %(port)s on trunk " - "%(trunk)s: %(exc)s") + msg = ("Failed to bind subport port %(port)s on trunk " + "%(trunk)s: %(exc)s") LOG.error(msg, {'port': port_id, 'trunk': trunk.id, 'exc': e}) if len(port_ids) != len(updated_ports): diff --git a/neutron/tests/unit/plugins/ml2/drivers/mechanism_logger.py b/neutron/tests/unit/plugins/ml2/drivers/mechanism_logger.py index cbee90a0f0c..e7d3ba41fc0 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/mechanism_logger.py +++ b/neutron/tests/unit/plugins/ml2/drivers/mechanism_logger.py @@ -16,7 +16,6 @@ from neutron_lib.plugins.ml2 import api from oslo_log import log -from neutron._i18n import _ LOG = log.getLogger(__name__) @@ -31,9 +30,9 @@ class LoggerMechanismDriver(api.MechanismDriver): pass def _log_network_call(self, method_name, context): - LOG.info(_("%(method)s called with network settings %(current)s " - "(original settings %(original)s) and " - "network segments %(segments)s"), + LOG.info("%(method)s called with network settings %(current)s " + "(original settings %(original)s) and " + "network segments %(segments)s", {'method': method_name, 'current': context.current, 'original': context.original, @@ -62,8 +61,8 @@ class LoggerMechanismDriver(api.MechanismDriver): return True def _log_subnet_call(self, method_name, context): - LOG.info(_("%(method)s called with subnet settings %(current)s " - "(original settings %(original)s)"), + LOG.info("%(method)s called with subnet settings %(current)s " + "(original settings %(original)s)", {'method': method_name, 'current': context.current, 'original': context.original}) @@ -88,18 +87,18 @@ class LoggerMechanismDriver(api.MechanismDriver): def _log_port_call(self, method_name, context): network_context = context.network - LOG.info(_("%(method)s called with port settings %(current)s " - "(original settings %(original)s) " - "host %(host)s " - "(original host %(original_host)s) " - "vif type %(vif_type)s " - "(original vif type %(original_vif_type)s) " - "vif details %(vif_details)s " - "(original vif details %(original_vif_details)s) " - "binding levels %(levels)s " - "(original binding levels %(original_levels)s) " - "on network %(network)s " - "with segments to bind %(segments_to_bind)s"), + LOG.info("%(method)s called with port settings %(current)s " + "(original settings %(original)s) " + "host %(host)s " + "(original host %(original_host)s) " + "vif type %(vif_type)s " + "(original vif type %(original_vif_type)s) " + "vif details %(vif_details)s " + "(original vif details %(original_vif_details)s) " + "binding levels %(levels)s " + "(original binding levels %(original_levels)s) " + "on network %(network)s " + "with segments to bind %(segments_to_bind)s", {'method': method_name, 'current': context.current, 'original': context.original, @@ -137,7 +136,7 @@ class LoggerMechanismDriver(api.MechanismDriver): def filter_hosts_with_segment_access( self, context, segments, candidate_hosts, agent_getter): - LOG.info(_("filter_hosts_with_segment_access called with segments " - "%(segments)s, candidate hosts %(hosts)s "), + LOG.info("filter_hosts_with_segment_access called with segments " + "%(segments)s, candidate hosts %(hosts)s ", {'segments': segments, 'hosts': candidate_hosts}) return set() diff --git a/neutron/wsgi.py b/neutron/wsgi.py index b309584e144..a17ff5eccfc 100644 --- a/neutron/wsgi.py +++ b/neutron/wsgi.py @@ -39,7 +39,7 @@ import six import webob.dec import webob.exc -from neutron._i18n import _, _LE, _LI +from neutron._i18n import _ from neutron.common import config from neutron.common import exceptions as n_exc from neutron.conf import wsgi as wsgi_config @@ -129,7 +129,7 @@ class Server(object): family = info[0] bind_addr = info[-1] except Exception: - LOG.exception(_LE("Unable to listen on %(host)s:%(port)s"), + LOG.exception("Unable to listen on %(host)s:%(port)s", {'host': host, 'port': port}) sys.exit(1) @@ -583,27 +583,27 @@ class Resource(Application): def __call__(self, request): """WSGI method that controls (de)serialization and method dispatch.""" - LOG.info(_LI("%(method)s %(url)s"), + LOG.info("%(method)s %(url)s", {"method": request.method, "url": request.url}) try: action, args, accept = self.deserializer.deserialize(request) except exception.InvalidContentType: msg = _("Unsupported Content-Type") - LOG.exception(_LE("InvalidContentType: %s"), msg) + LOG.exception("InvalidContentType: %s", msg) return Fault(webob.exc.HTTPBadRequest(explanation=msg)) except n_exc.MalformedRequestBody: msg = _("Malformed request body") - LOG.exception(_LE("MalformedRequestBody: %s"), msg) + LOG.exception("MalformedRequestBody: %s", msg) return Fault(webob.exc.HTTPBadRequest(explanation=msg)) try: action_result = self.dispatch(request, action, args) except webob.exc.HTTPException as ex: - LOG.info(_LI("HTTP exception thrown: %s"), ex) + LOG.info("HTTP exception thrown: %s", ex) action_result = Fault(ex, self._fault_body_function) except Exception: - LOG.exception(_LE("Internal error")) + LOG.exception("Internal error") # Do not include the traceback to avoid returning it to clients. action_result = Fault(webob.exc.HTTPServerError(), self._fault_body_function) @@ -616,10 +616,10 @@ class Resource(Application): response = action_result try: - LOG.info(_LI("%(url)s returned with HTTP %(status)d"), + LOG.info("%(url)s returned with HTTP %(status)d", dict(url=request.url, status=response.status_int)) except AttributeError as e: - LOG.info(_LI("%(url)s returned a fault: %(exception)s"), + LOG.info("%(url)s returned a fault: %(exception)s", dict(url=request.url, exception=e)) return response @@ -633,7 +633,7 @@ class Resource(Application): # an argument whose name is 'request' return controller_method(request=request, **action_args) except TypeError: - LOG.exception(_LE('Invalid request')) + LOG.exception('Invalid request') return Fault(webob.exc.HTTPBadRequest()) diff --git a/tox.ini b/tox.ini index 7298e32fc74..3d5589d040f 100644 --- a/tox.ini +++ b/tox.ini @@ -136,9 +136,7 @@ commands = sphinx-build -W -b html doc/source doc/build/html # TODO(ihrachys) figure out what to do with N534 and N536 # N534 Untranslated exception message # N536 Use assertIsNone rather than assertEqual to check for None values -# TODO(ihrachys) reenable N537 when new neutron-lib release is available -# N537 Log messages should not be translated -ignore = E125,E126,E128,E129,E265,H404,H405,N530,N534,N536,N537 +ignore = E125,E126,E128,E129,E265,H404,H405,N530,N534,N536 # H106: Don't put vim configuration in source files # H203: Use assertIs(Not)None to check for None # H904: Delay string interpolations at logging calls