Make code follow log translation guideline
Since Pike log messages should not be translated.
This patch removes calls to i18n _LC, _LI, _LE, _LW from
logging logic throughout the code. Translators definition
from neutron._i18n is removed as well.
This patch also removes log translation verification from
ignore directive in tox.ini.
Change-Id: If9aa76fcf121c0e61a7c08088006c5873faee56e
(cherry picked from commit 7322bd6efb
)
This commit is contained in:
parent
2f98022fa5
commit
2f4d7aa96f
@ -27,16 +27,6 @@ _C = _translators.contextual_form
|
||||
# The plural translation function using the name "_P"
|
||||
_P = _translators.plural_form
|
||||
|
||||
# Translators for log levels.
|
||||
#
|
||||
# The abbreviated names are meant to reflect the usual use of a short
|
||||
# name like '_'. The "L" is for "log" and the other letter comes from
|
||||
# the level.
|
||||
_LI = _translators.log_info
|
||||
_LW = _translators.log_warning
|
||||
_LE = _translators.log_error
|
||||
_LC = _translators.log_critical
|
||||
|
||||
|
||||
def get_available_languages():
|
||||
return oslo_i18n.get_available_languages(DOMAIN)
|
||||
|
@ -13,7 +13,6 @@
|
||||
from oslo_log import log
|
||||
import stevedore
|
||||
|
||||
from neutron._i18n import _LI
|
||||
from neutron.conf.agent import agent_extensions_manager as agent_ext_mgr_config
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
@ -28,7 +27,7 @@ class AgentExtensionsManager(stevedore.named.NamedExtensionManager):
|
||||
super(AgentExtensionsManager, self).__init__(
|
||||
namespace, conf.agent.extensions,
|
||||
invoke_on_load=True, name_order=True)
|
||||
LOG.info(_LI("Loaded agent extensions: %s"), self.names())
|
||||
LOG.info("Loaded agent extensions: %s", self.names())
|
||||
|
||||
def initialize(self, connection, driver_type, agent_api=None):
|
||||
"""Initialize enabled agent extensions.
|
||||
@ -44,7 +43,7 @@ class AgentExtensionsManager(stevedore.named.NamedExtensionManager):
|
||||
"""
|
||||
# Initialize each agent extension in the list.
|
||||
for extension in self:
|
||||
LOG.info(_LI("Initializing agent extension '%s'"), extension.name)
|
||||
LOG.info("Initializing agent extension '%s'", extension.name)
|
||||
# If the agent has provided an agent_api object, this object will
|
||||
# be passed to all interested extensions. This object must be
|
||||
# consumed by each such extension before the extension's
|
||||
|
@ -27,7 +27,7 @@ from oslo_log import log as logging
|
||||
import six
|
||||
import tenacity
|
||||
|
||||
from neutron._i18n import _, _LE, _LI, _LW
|
||||
from neutron._i18n import _
|
||||
from neutron.agent.common import ip_lib
|
||||
from neutron.agent.common import utils
|
||||
from neutron.agent.ovsdb import api as ovsdb_api
|
||||
@ -298,8 +298,8 @@ class OVSBridge(BaseOVS):
|
||||
"in 1 second. Attempt: %s/10", i)
|
||||
time.sleep(1)
|
||||
continue
|
||||
LOG.error(_LE("Unable to execute %(cmd)s. Exception: "
|
||||
"%(exception)s"),
|
||||
LOG.error("Unable to execute %(cmd)s. Exception: "
|
||||
"%(exception)s",
|
||||
{'cmd': full_args, 'exception': e})
|
||||
break
|
||||
|
||||
@ -320,7 +320,7 @@ class OVSBridge(BaseOVS):
|
||||
try:
|
||||
ofport = self._get_port_val(port_name, "ofport")
|
||||
except tenacity.RetryError:
|
||||
LOG.exception(_LE("Timed out retrieving ofport on port %s."),
|
||||
LOG.exception("Timed out retrieving ofport on port %s.",
|
||||
port_name)
|
||||
return ofport
|
||||
|
||||
@ -330,7 +330,7 @@ class OVSBridge(BaseOVS):
|
||||
try:
|
||||
port_external_ids = self._get_port_val(port_name, "external_ids")
|
||||
except tenacity.RetryError:
|
||||
LOG.exception(_LE("Timed out retrieving external_ids on port %s."),
|
||||
LOG.exception("Timed out retrieving external_ids on port %s.",
|
||||
port_name)
|
||||
return port_external_ids
|
||||
|
||||
@ -526,10 +526,10 @@ class OVSBridge(BaseOVS):
|
||||
if_exists=True)
|
||||
for result in results:
|
||||
if result['ofport'] == UNASSIGNED_OFPORT:
|
||||
LOG.warning(_LW("Found not yet ready openvswitch port: %s"),
|
||||
LOG.warning("Found not yet ready openvswitch port: %s",
|
||||
result['name'])
|
||||
elif result['ofport'] == INVALID_OFPORT:
|
||||
LOG.warning(_LW("Found failed openvswitch port: %s"),
|
||||
LOG.warning("Found failed openvswitch port: %s",
|
||||
result['name'])
|
||||
elif 'attached-mac' in result['external_ids']:
|
||||
port_id = self.portid_from_external_ids(result['external_ids'])
|
||||
@ -569,8 +569,8 @@ class OVSBridge(BaseOVS):
|
||||
for port_id in port_ids:
|
||||
result[port_id] = None
|
||||
if port_id not in by_id:
|
||||
LOG.info(_LI("Port %(port_id)s not present in bridge "
|
||||
"%(br_name)s"),
|
||||
LOG.info("Port %(port_id)s not present in bridge "
|
||||
"%(br_name)s",
|
||||
{'port_id': port_id, 'br_name': self.br_name})
|
||||
continue
|
||||
pinfo = by_id[port_id]
|
||||
@ -584,8 +584,8 @@ class OVSBridge(BaseOVS):
|
||||
@staticmethod
|
||||
def _check_ofport(port_id, port_info):
|
||||
if port_info['ofport'] in [UNASSIGNED_OFPORT, INVALID_OFPORT]:
|
||||
LOG.warning(_LW("ofport: %(ofport)s for VIF: %(vif)s "
|
||||
"is not a positive integer"),
|
||||
LOG.warning("ofport: %(ofport)s for VIF: %(vif)s "
|
||||
"is not a positive integer",
|
||||
{'ofport': port_info['ofport'], 'vif': port_id})
|
||||
return False
|
||||
return True
|
||||
@ -602,7 +602,7 @@ class OVSBridge(BaseOVS):
|
||||
continue
|
||||
mac = port['external_ids'].get('attached-mac')
|
||||
return VifPort(port['name'], port['ofport'], port_id, mac, self)
|
||||
LOG.info(_LI("Port %(port_id)s not present in bridge %(br_name)s"),
|
||||
LOG.info("Port %(port_id)s not present in bridge %(br_name)s",
|
||||
{'port_id': port_id, 'br_name': self.br_name})
|
||||
|
||||
def delete_ports(self, all_ports=False):
|
||||
@ -837,7 +837,7 @@ class DeferredOVSBridge(object):
|
||||
if exc_type is None:
|
||||
self.apply_flows()
|
||||
else:
|
||||
LOG.exception(_LE("OVS flows could not be applied on bridge %s"),
|
||||
LOG.exception("OVS flows could not be applied on bridge %s",
|
||||
self.br.br_name)
|
||||
|
||||
|
||||
|
@ -19,7 +19,6 @@ from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import timeutils
|
||||
|
||||
from neutron._i18n import _LE
|
||||
from neutron.common import utils as neutron_utils
|
||||
from neutron.conf.agent import common as config
|
||||
from neutron.conf.agent.database import agents_db
|
||||
@ -53,7 +52,7 @@ def load_interface_driver(conf):
|
||||
INTERFACE_NAMESPACE, conf.interface_driver)
|
||||
return loaded_class(conf)
|
||||
except ImportError:
|
||||
LOG.error(_LE("Error loading interface driver '%s'"),
|
||||
LOG.error("Error loading interface driver '%s'",
|
||||
conf.interface_driver)
|
||||
raise SystemExit(1)
|
||||
|
||||
|
@ -29,7 +29,7 @@ from oslo_utils import fileutils
|
||||
from oslo_utils import importutils
|
||||
import six
|
||||
|
||||
from neutron._i18n import _, _LE, _LI, _LW
|
||||
from neutron._i18n import _
|
||||
from neutron.agent.linux import dhcp
|
||||
from neutron.agent.linux import external_process
|
||||
from neutron.agent.metadata import driver as metadata_driver
|
||||
@ -120,7 +120,7 @@ class DhcpAgent(manager.Manager):
|
||||
|
||||
def after_start(self):
|
||||
self.run()
|
||||
LOG.info(_LI("DHCP agent started"))
|
||||
LOG.info("DHCP agent started")
|
||||
|
||||
def run(self):
|
||||
"""Activate the DHCP agent."""
|
||||
@ -164,7 +164,7 @@ class DhcpAgent(manager.Manager):
|
||||
or isinstance(e, exceptions.NetworkNotFound)):
|
||||
LOG.debug("Network %s has been deleted.", network.id)
|
||||
else:
|
||||
LOG.exception(_LE('Unable to %(action)s dhcp for %(net_id)s.'),
|
||||
LOG.exception('Unable to %(action)s dhcp for %(net_id)s.',
|
||||
{'net_id': network.id, 'action': action})
|
||||
|
||||
def schedule_resync(self, reason, network_id=None):
|
||||
@ -179,21 +179,21 @@ class DhcpAgent(manager.Manager):
|
||||
or 'None' is one of the networks, sync all of the networks.
|
||||
"""
|
||||
only_nets = set([] if (not networks or None in networks) else networks)
|
||||
LOG.info(_LI('Synchronizing state'))
|
||||
LOG.info('Synchronizing state')
|
||||
pool = eventlet.GreenPool(self.conf.num_sync_threads)
|
||||
known_network_ids = set(self.cache.get_network_ids())
|
||||
|
||||
try:
|
||||
active_networks = self.plugin_rpc.get_active_networks_info()
|
||||
LOG.info(_LI('All active networks have been fetched through RPC.'))
|
||||
LOG.info('All active networks have been fetched through RPC.')
|
||||
active_network_ids = set(network.id for network in active_networks)
|
||||
for deleted_id in known_network_ids - active_network_ids:
|
||||
try:
|
||||
self.disable_dhcp_helper(deleted_id)
|
||||
except Exception as e:
|
||||
self.schedule_resync(e, deleted_id)
|
||||
LOG.exception(_LE('Unable to sync network state on '
|
||||
'deleted network %s'), deleted_id)
|
||||
LOG.exception('Unable to sync network state on '
|
||||
'deleted network %s', deleted_id)
|
||||
|
||||
for network in active_networks:
|
||||
if (not only_nets or # specifically resync all
|
||||
@ -204,7 +204,7 @@ class DhcpAgent(manager.Manager):
|
||||
# we notify all ports in case some were created while the agent
|
||||
# was down
|
||||
self.dhcp_ready_ports |= set(self.cache.get_port_ids(only_nets))
|
||||
LOG.info(_LI('Synchronizing state complete'))
|
||||
LOG.info('Synchronizing state complete')
|
||||
|
||||
except Exception as e:
|
||||
if only_nets:
|
||||
@ -212,7 +212,7 @@ class DhcpAgent(manager.Manager):
|
||||
self.schedule_resync(e, network_id)
|
||||
else:
|
||||
self.schedule_resync(e)
|
||||
LOG.exception(_LE('Unable to sync network state.'))
|
||||
LOG.exception('Unable to sync network state.')
|
||||
|
||||
def _dhcp_ready_ports_loop(self):
|
||||
"""Notifies the server of any ports that had reservations setup."""
|
||||
@ -226,12 +226,12 @@ class DhcpAgent(manager.Manager):
|
||||
self.plugin_rpc.dhcp_ready_on_ports(ports_to_send)
|
||||
continue
|
||||
except oslo_messaging.MessagingTimeout:
|
||||
LOG.error(_LE("Timeout notifying server of ports ready. "
|
||||
"Retrying..."))
|
||||
LOG.error("Timeout notifying server of ports ready. "
|
||||
"Retrying...")
|
||||
except Exception:
|
||||
LOG.exception(_LE("Failure notifying DHCP server of "
|
||||
"ready DHCP ports. Will retry on next "
|
||||
"iteration."))
|
||||
LOG.exception("Failure notifying DHCP server of "
|
||||
"ready DHCP ports. Will retry on next "
|
||||
"iteration.")
|
||||
self.dhcp_ready_ports |= ports_to_send
|
||||
|
||||
def start_ready_ports_loop(self):
|
||||
@ -267,7 +267,7 @@ class DhcpAgent(manager.Manager):
|
||||
return network
|
||||
except Exception as e:
|
||||
self.schedule_resync(e, network_id)
|
||||
LOG.exception(_LE('Network %s info call failed.'), network_id)
|
||||
LOG.exception('Network %s info call failed.', network_id)
|
||||
|
||||
def enable_dhcp_helper(self, network_id):
|
||||
"""Enable DHCP for a network that meets enabling criteria."""
|
||||
@ -279,12 +279,12 @@ class DhcpAgent(manager.Manager):
|
||||
def safe_configure_dhcp_for_network(self, network):
|
||||
try:
|
||||
network_id = network.get('id')
|
||||
LOG.info(_LI('Starting network %s dhcp configuration'), network_id)
|
||||
LOG.info('Starting network %s dhcp configuration', network_id)
|
||||
self.configure_dhcp_for_network(network)
|
||||
LOG.info(_LI('Finished network %s dhcp configuration'), network_id)
|
||||
LOG.info('Finished network %s dhcp configuration', network_id)
|
||||
except (exceptions.NetworkNotFound, RuntimeError):
|
||||
LOG.warning(_LW('Network %s may have been deleted and '
|
||||
'its resources may have already been disposed.'),
|
||||
LOG.warning('Network %s may have been deleted and '
|
||||
'its resources may have already been disposed.',
|
||||
network.id)
|
||||
|
||||
def configure_dhcp_for_network(self, network):
|
||||
@ -411,7 +411,7 @@ class DhcpAgent(manager.Manager):
|
||||
network = self.cache.get_network_by_id(updated_port.network_id)
|
||||
if not network:
|
||||
return
|
||||
LOG.info(_LI("Trigger reload_allocations for port %s"),
|
||||
LOG.info("Trigger reload_allocations for port %s",
|
||||
updated_port)
|
||||
driver_action = 'reload_allocations'
|
||||
if self._is_port_on_this_agent(updated_port):
|
||||
@ -498,10 +498,10 @@ class DhcpAgent(manager.Manager):
|
||||
if router_ports:
|
||||
# Multiple router ports should not be allowed
|
||||
if len(router_ports) > 1:
|
||||
LOG.warning(_LW("%(port_num)d router ports found on the "
|
||||
"metadata access network. Only the port "
|
||||
"%(port_id)s, for router %(router_id)s "
|
||||
"will be considered"),
|
||||
LOG.warning("%(port_num)d router ports found on the "
|
||||
"metadata access network. Only the port "
|
||||
"%(port_id)s, for router %(router_id)s "
|
||||
"will be considered",
|
||||
{'port_num': len(router_ports),
|
||||
'port_id': router_ports[0].id,
|
||||
'router_id': router_ports[0].device_id})
|
||||
@ -733,18 +733,18 @@ class DhcpAgentWithStateReport(DhcpAgent):
|
||||
agent_status = self.state_rpc.report_state(
|
||||
ctx, self.agent_state, True)
|
||||
if agent_status == n_const.AGENT_REVIVED:
|
||||
LOG.info(_LI("Agent has just been revived. "
|
||||
"Scheduling full sync"))
|
||||
LOG.info("Agent has just been revived. "
|
||||
"Scheduling full sync")
|
||||
self.schedule_resync("Agent has just been revived")
|
||||
except AttributeError:
|
||||
# This means the server does not support report_state
|
||||
LOG.warning(_LW("Neutron server does not support state report. "
|
||||
"State report for this agent will be disabled."))
|
||||
LOG.warning("Neutron server does not support state report. "
|
||||
"State report for this agent will be disabled.")
|
||||
self.heartbeat.stop()
|
||||
self.run()
|
||||
return
|
||||
except Exception:
|
||||
LOG.exception(_LE("Failed reporting state!"))
|
||||
LOG.exception("Failed reporting state!")
|
||||
return
|
||||
if self.agent_state.pop('start_flag', None):
|
||||
self.run()
|
||||
@ -753,7 +753,7 @@ class DhcpAgentWithStateReport(DhcpAgent):
|
||||
"""Handle the agent_updated notification event."""
|
||||
self.schedule_resync(_("Agent updated: %(payload)s") %
|
||||
{"payload": payload})
|
||||
LOG.info(_LI("agent_updated by server side %s!"), payload)
|
||||
LOG.info("agent_updated by server side %s!", payload)
|
||||
|
||||
def after_start(self):
|
||||
LOG.info(_LI("DHCP agent started"))
|
||||
LOG.info("DHCP agent started")
|
||||
|
@ -20,7 +20,6 @@ from neutron_lib.utils import helpers
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from neutron._i18n import _LE, _LW
|
||||
from neutron.agent.l2 import l2_agent_extension
|
||||
from neutron.agent.linux import bridge_lib
|
||||
from neutron.conf.agent import l2_ext_fdb_population
|
||||
@ -73,9 +72,9 @@ class FdbPopulationAgentExtension(
|
||||
try:
|
||||
_stdout = bridge_lib.FdbInterface.show(device)
|
||||
except RuntimeError as e:
|
||||
LOG.warning(_LW(
|
||||
LOG.warning(
|
||||
'Unable to find FDB Interface %(device)s. '
|
||||
'Exception: %(e)s'), {'device': device, 'e': e})
|
||||
'Exception: %(e)s', {'device': device, 'e': e})
|
||||
continue
|
||||
self.device_to_macs[device] = _stdout.split()[::3]
|
||||
|
||||
@ -94,10 +93,10 @@ class FdbPopulationAgentExtension(
|
||||
try:
|
||||
bridge_lib.FdbInterface.add(mac, device)
|
||||
except RuntimeError as e:
|
||||
LOG.warning(_LW(
|
||||
LOG.warning(
|
||||
'Unable to add mac %(mac)s '
|
||||
'to FDB Interface %(device)s. '
|
||||
'Exception: %(e)s'),
|
||||
'Exception: %(e)s',
|
||||
{'mac': mac, 'device': device, 'e': e})
|
||||
return
|
||||
self.device_to_macs[device].append(mac)
|
||||
@ -105,19 +104,19 @@ class FdbPopulationAgentExtension(
|
||||
def delete_port(self, devices, port_id):
|
||||
mac = self.portid_to_mac.get(port_id)
|
||||
if mac is None:
|
||||
LOG.warning(_LW('Port Id %(port_id)s does not have a rule for '
|
||||
'devices %(devices)s in FDB table'),
|
||||
{'port_id': port_id, 'devices': devices})
|
||||
LOG.warning('Port Id %(port_id)s does not have a rule for '
|
||||
'devices %(devices)s in FDB table',
|
||||
{'port_id': port_id, 'devices': devices})
|
||||
return
|
||||
for device in devices:
|
||||
if mac in self.device_to_macs[device]:
|
||||
try:
|
||||
bridge_lib.FdbInterface.delete(mac, device)
|
||||
except RuntimeError as e:
|
||||
LOG.warning(_LW(
|
||||
LOG.warning(
|
||||
'Unable to delete mac %(mac)s '
|
||||
'from FDB Interface %(device)s. '
|
||||
'Exception: %(e)s'),
|
||||
'Exception: %(e)s',
|
||||
{'mac': mac, 'device': device, 'e': e})
|
||||
return
|
||||
self.device_to_macs[device].remove(mac)
|
||||
@ -129,17 +128,17 @@ class FdbPopulationAgentExtension(
|
||||
valid_driver_types = (linux_bridge_constants.EXTENSION_DRIVER_TYPE,
|
||||
ovs_constants.EXTENSION_DRIVER_TYPE)
|
||||
if driver_type not in valid_driver_types:
|
||||
LOG.error(_LE('FDB extension is only supported for OVS and '
|
||||
'linux bridge agent, currently uses '
|
||||
'%(driver_type)s'), {'driver_type': driver_type})
|
||||
LOG.error('FDB extension is only supported for OVS and '
|
||||
'linux bridge agent, currently uses '
|
||||
'%(driver_type)s', {'driver_type': driver_type})
|
||||
sys.exit(1)
|
||||
|
||||
self.device_mappings = helpers.parse_mappings(
|
||||
cfg.CONF.FDB.shared_physical_device_mappings, unique_keys=False)
|
||||
devices = self._get_devices()
|
||||
if not devices:
|
||||
LOG.error(_LE('Invalid configuration provided for FDB extension: '
|
||||
'no physical devices'))
|
||||
LOG.error('Invalid configuration provided for FDB extension: '
|
||||
'no physical devices')
|
||||
sys.exit(1)
|
||||
self.fdb_tracker = self.FdbTableTracker(devices)
|
||||
|
||||
|
@ -20,7 +20,6 @@ from oslo_concurrency import lockutils
|
||||
from oslo_log import log as logging
|
||||
import six
|
||||
|
||||
from neutron._i18n import _LW, _LI
|
||||
from neutron.agent.l2 import l2_agent_extension
|
||||
from neutron.api.rpc.callbacks.consumer import registry
|
||||
from neutron.api.rpc.callbacks import events
|
||||
@ -107,8 +106,8 @@ class QosAgentDriver(object):
|
||||
if rule_type in self.SUPPORTED_RULES:
|
||||
yield rule
|
||||
else:
|
||||
LOG.warning(_LW('Unsupported QoS rule type for %(rule_id)s: '
|
||||
'%(rule_type)s; skipping'),
|
||||
LOG.warning('Unsupported QoS rule type for %(rule_id)s: '
|
||||
'%(rule_type)s; skipping',
|
||||
{'rule_id': rule.id, 'rule_type': rule_type})
|
||||
|
||||
def _handle_rule_delete(self, port, rule_type, ingress=False):
|
||||
@ -261,9 +260,9 @@ class QosAgentExtension(l2_agent_extension.L2AgentExtension):
|
||||
qos_policy = self.resource_rpc.pull(
|
||||
context, resources.QOS_POLICY, qos_policy_id)
|
||||
if qos_policy is None:
|
||||
LOG.info(_LI("QoS policy %(qos_policy_id)s applied to port "
|
||||
"%(port_id)s is not available on server, "
|
||||
"it has been deleted. Skipping."),
|
||||
LOG.info("QoS policy %(qos_policy_id)s applied to port "
|
||||
"%(port_id)s is not available on server, "
|
||||
"it has been deleted. Skipping.",
|
||||
{'qos_policy_id': qos_policy_id, 'port_id': port_id})
|
||||
self._process_reset_port(port)
|
||||
else:
|
||||
|
@ -12,7 +12,6 @@
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
from neutron._i18n import _LE
|
||||
from neutron.agent import agent_extensions_manager as agent_ext_manager
|
||||
from neutron.conf.agent import agent_extensions_manager as agent_ext_mgr_config
|
||||
|
||||
@ -43,8 +42,8 @@ class L2AgentExtensionsManager(agent_ext_manager.AgentExtensionsManager):
|
||||
extension.obj.handle_port(context, data)
|
||||
else:
|
||||
LOG.error(
|
||||
_LE("Agent Extension '%(name)s' does not "
|
||||
"implement method handle_port"),
|
||||
"Agent Extension '%(name)s' does not "
|
||||
"implement method handle_port",
|
||||
{'name': extension.name}
|
||||
)
|
||||
|
||||
@ -55,7 +54,7 @@ class L2AgentExtensionsManager(agent_ext_manager.AgentExtensionsManager):
|
||||
extension.obj.delete_port(context, data)
|
||||
else:
|
||||
LOG.error(
|
||||
_LE("Agent Extension '%(name)s' does not "
|
||||
"implement method delete_port"),
|
||||
"Agent Extension '%(name)s' does not "
|
||||
"implement method delete_port",
|
||||
{'name': extension.name}
|
||||
)
|
||||
|
@ -30,7 +30,7 @@ from oslo_service import periodic_task
|
||||
from oslo_utils import excutils
|
||||
from oslo_utils import timeutils
|
||||
|
||||
from neutron._i18n import _, _LE, _LI, _LW
|
||||
from neutron._i18n import _
|
||||
from neutron.agent.common import utils as common_utils
|
||||
from neutron.agent.l3 import dvr
|
||||
from neutron.agent.l3 import dvr_edge_ha_router
|
||||
@ -216,20 +216,20 @@ class L3NATAgent(ha.AgentMixin,
|
||||
self.neutron_service_plugins = (
|
||||
self.plugin_rpc.get_service_plugin_list(self.context))
|
||||
except oslo_messaging.RemoteError as e:
|
||||
LOG.warning(_LW('l3-agent cannot check service plugins '
|
||||
'enabled at the neutron server when '
|
||||
'startup due to RPC error. It happens '
|
||||
'when the server does not support this '
|
||||
'RPC API. If the error is '
|
||||
'UnsupportedVersion you can ignore this '
|
||||
'warning. Detail message: %s'), e)
|
||||
LOG.warning('l3-agent cannot check service plugins '
|
||||
'enabled at the neutron server when '
|
||||
'startup due to RPC error. It happens '
|
||||
'when the server does not support this '
|
||||
'RPC API. If the error is '
|
||||
'UnsupportedVersion you can ignore this '
|
||||
'warning. Detail message: %s', e)
|
||||
self.neutron_service_plugins = None
|
||||
except oslo_messaging.MessagingTimeout as e:
|
||||
LOG.warning(_LW('l3-agent cannot contact neutron server '
|
||||
'to retrieve service plugins enabled. '
|
||||
'Check connectivity to neutron server. '
|
||||
'Retrying... '
|
||||
'Detailed message: %(msg)s.'), {'msg': e})
|
||||
LOG.warning('l3-agent cannot contact neutron server '
|
||||
'to retrieve service plugins enabled. '
|
||||
'Check connectivity to neutron server. '
|
||||
'Retrying... '
|
||||
'Detailed message: %(msg)s.', {'msg': e})
|
||||
continue
|
||||
break
|
||||
|
||||
@ -263,15 +263,15 @@ class L3NATAgent(ha.AgentMixin,
|
||||
The actual values are not verified for correctness.
|
||||
"""
|
||||
if not self.conf.interface_driver:
|
||||
msg = _LE('An interface driver must be specified')
|
||||
msg = 'An interface driver must be specified'
|
||||
LOG.error(msg)
|
||||
raise SystemExit(1)
|
||||
|
||||
if self.conf.ipv6_gateway:
|
||||
# ipv6_gateway configured. Check for valid v6 link-local address.
|
||||
try:
|
||||
msg = _LE("%s used in config as ipv6_gateway is not a valid "
|
||||
"IPv6 link-local address."),
|
||||
msg = ("%s used in config as ipv6_gateway is not a valid "
|
||||
"IPv6 link-local address.")
|
||||
ip_addr = netaddr.IPAddress(self.conf.ipv6_gateway)
|
||||
if ip_addr.version != 6 or not ip_addr.is_link_local():
|
||||
LOG.error(msg, self.conf.ipv6_gateway)
|
||||
@ -352,13 +352,13 @@ class L3NATAgent(ha.AgentMixin,
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
del self.router_info[router_id]
|
||||
LOG.exception(_LE('Error while initializing router %s'),
|
||||
LOG.exception('Error while initializing router %s',
|
||||
router_id)
|
||||
self.namespaces_manager.ensure_router_cleanup(router_id)
|
||||
try:
|
||||
ri.delete()
|
||||
except Exception:
|
||||
LOG.exception(_LE('Error while deleting router %s'),
|
||||
LOG.exception('Error while deleting router %s',
|
||||
router_id)
|
||||
|
||||
def _safe_router_removed(self, router_id):
|
||||
@ -368,7 +368,7 @@ class L3NATAgent(ha.AgentMixin,
|
||||
self._router_removed(router_id)
|
||||
self.l3_ext_manager.delete_router(self.context, router_id)
|
||||
except Exception:
|
||||
LOG.exception(_LE('Error while deleting router %s'), router_id)
|
||||
LOG.exception('Error while deleting router %s', router_id)
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
@ -376,8 +376,8 @@ class L3NATAgent(ha.AgentMixin,
|
||||
def _router_removed(self, router_id):
|
||||
ri = self.router_info.get(router_id)
|
||||
if ri is None:
|
||||
LOG.warning(_LW("Info for router %s was not found. "
|
||||
"Performing router cleanup"), router_id)
|
||||
LOG.warning("Info for router %s was not found. "
|
||||
"Performing router cleanup", router_id)
|
||||
self.namespaces_manager.ensure_router_cleanup(router_id)
|
||||
return
|
||||
|
||||
@ -432,7 +432,7 @@ class L3NATAgent(ha.AgentMixin,
|
||||
def _process_router_if_compatible(self, router):
|
||||
if (self.conf.external_network_bridge and
|
||||
not ip_lib.device_exists(self.conf.external_network_bridge)):
|
||||
LOG.error(_LE("The external network bridge '%s' does not exist"),
|
||||
LOG.error("The external network bridge '%s' does not exist",
|
||||
self.conf.external_network_bridge)
|
||||
return
|
||||
|
||||
@ -494,7 +494,7 @@ class L3NATAgent(ha.AgentMixin,
|
||||
routers = self.plugin_rpc.get_routers(self.context,
|
||||
[update.id])
|
||||
except Exception:
|
||||
msg = _LE("Failed to fetch router information for '%s'")
|
||||
msg = "Failed to fetch router information for '%s'"
|
||||
LOG.exception(msg, update.id)
|
||||
self._resync_router(update)
|
||||
continue
|
||||
@ -521,12 +521,12 @@ class L3NATAgent(ha.AgentMixin,
|
||||
log_verbose_exc(e.msg, router)
|
||||
# Was the router previously handled by this agent?
|
||||
if router['id'] in self.router_info:
|
||||
LOG.error(_LE("Removing incompatible router '%s'"),
|
||||
LOG.error("Removing incompatible router '%s'",
|
||||
router['id'])
|
||||
self._safe_router_removed(router['id'])
|
||||
except Exception:
|
||||
log_verbose_exc(
|
||||
_LE("Failed to process compatible router: %s") % update.id,
|
||||
"Failed to process compatible router: %s" % update.id,
|
||||
router)
|
||||
self._resync_router(update)
|
||||
continue
|
||||
@ -612,20 +612,20 @@ class L3NATAgent(ha.AgentMixin,
|
||||
self.sync_routers_chunk_size = max(
|
||||
self.sync_routers_chunk_size / 2,
|
||||
SYNC_ROUTERS_MIN_CHUNK_SIZE)
|
||||
LOG.error(_LE('Server failed to return info for routers in '
|
||||
'required time, decreasing chunk size to: %s'),
|
||||
LOG.error('Server failed to return info for routers in '
|
||||
'required time, decreasing chunk size to: %s',
|
||||
self.sync_routers_chunk_size)
|
||||
else:
|
||||
LOG.error(_LE('Server failed to return info for routers in '
|
||||
'required time even with min chunk size: %s. '
|
||||
'It might be under very high load or '
|
||||
'just inoperable'),
|
||||
LOG.error('Server failed to return info for routers in '
|
||||
'required time even with min chunk size: %s. '
|
||||
'It might be under very high load or '
|
||||
'just inoperable',
|
||||
self.sync_routers_chunk_size)
|
||||
raise
|
||||
except oslo_messaging.MessagingException:
|
||||
failed_routers = chunk or router_ids
|
||||
LOG.exception(_LE("Failed synchronizing routers '%s' "
|
||||
"due to RPC error"), failed_routers)
|
||||
LOG.exception("Failed synchronizing routers '%s' "
|
||||
"due to RPC error", failed_routers)
|
||||
raise n_exc.AbortSyncRouters()
|
||||
|
||||
self.fullsync = False
|
||||
@ -659,7 +659,7 @@ class L3NATAgent(ha.AgentMixin,
|
||||
# can have L3NATAgentWithStateReport as its base class instead of
|
||||
# L3NATAgent.
|
||||
eventlet.spawn_n(self._process_routers_loop)
|
||||
LOG.info(_LI("L3 agent started"))
|
||||
LOG.info("L3 agent started")
|
||||
|
||||
def create_pd_router_update(self):
|
||||
router_id = None
|
||||
@ -721,22 +721,22 @@ class L3NATAgentWithStateReport(L3NATAgent):
|
||||
self.agent_state,
|
||||
True)
|
||||
if agent_status == l3_constants.AGENT_REVIVED:
|
||||
LOG.info(_LI('Agent has just been revived. '
|
||||
'Doing a full sync.'))
|
||||
LOG.info('Agent has just been revived. '
|
||||
'Doing a full sync.')
|
||||
self.fullsync = True
|
||||
self.agent_state.pop('start_flag', None)
|
||||
except AttributeError:
|
||||
# This means the server does not support report_state
|
||||
LOG.warning(_LW("Neutron server does not support state report. "
|
||||
"State report for this agent will be disabled."))
|
||||
LOG.warning("Neutron server does not support state report. "
|
||||
"State report for this agent will be disabled.")
|
||||
self.heartbeat.stop()
|
||||
return
|
||||
except Exception:
|
||||
LOG.exception(_LE("Failed reporting state!"))
|
||||
LOG.exception("Failed reporting state!")
|
||||
|
||||
def after_start(self):
|
||||
eventlet.spawn_n(self._process_routers_loop)
|
||||
LOG.info(_LI("L3 agent started"))
|
||||
LOG.info("L3 agent started")
|
||||
# Do the report state before we do the first full sync.
|
||||
self._report_state()
|
||||
|
||||
@ -745,4 +745,4 @@ class L3NATAgentWithStateReport(L3NATAgent):
|
||||
def agent_updated(self, context, payload):
|
||||
"""Handle the agent_updated notification event."""
|
||||
self.fullsync = True
|
||||
LOG.info(_LI("agent_updated by server side %s!"), payload)
|
||||
LOG.info("agent_updated by server side %s!", payload)
|
||||
|
@ -15,7 +15,6 @@
|
||||
from neutron_lib import constants as lib_constants
|
||||
from oslo_log import log as logging
|
||||
|
||||
from neutron._i18n import _LE
|
||||
from neutron.agent.l3 import dvr_local_router
|
||||
from neutron.agent.l3 import dvr_snat_ns
|
||||
from neutron.agent.l3 import router_info as router
|
||||
@ -211,8 +210,8 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
|
||||
super(DvrEdgeRouter, self)._update_routing_table(
|
||||
operation, route, namespace=ns_name)
|
||||
else:
|
||||
LOG.error(_LE("The SNAT namespace %s does not exist for "
|
||||
"the router."), ns_name)
|
||||
LOG.error("The SNAT namespace %s does not exist for "
|
||||
"the router.", ns_name)
|
||||
super(DvrEdgeRouter, self).update_routing_table(operation, route)
|
||||
|
||||
def delete(self):
|
||||
|
@ -20,7 +20,7 @@ from oslo_concurrency import lockutils
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
|
||||
from neutron._i18n import _, _LE, _LW
|
||||
from neutron._i18n import _
|
||||
from neutron.agent.l3 import fip_rule_priority_allocator as frpa
|
||||
from neutron.agent.l3 import link_local_allocator as lla
|
||||
from neutron.agent.l3 import namespaces
|
||||
@ -117,8 +117,8 @@ class FipNamespace(namespaces.Namespace):
|
||||
yield
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE('DVR: FIP namespace config failure '
|
||||
'for interface %s'), interface_name)
|
||||
LOG.error('DVR: FIP namespace config failure '
|
||||
'for interface %s', interface_name)
|
||||
|
||||
def create_or_update_gateway_port(self, agent_gateway_port):
|
||||
interface_name = self.get_ext_device_name(agent_gateway_port['id'])
|
||||
@ -147,8 +147,8 @@ class FipNamespace(namespaces.Namespace):
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.unsubscribe(agent_gateway_port['network_id'])
|
||||
self.delete()
|
||||
LOG.exception(_LE('DVR: Gateway update in '
|
||||
'FIP namespace failed'))
|
||||
LOG.exception('DVR: Gateway update in '
|
||||
'FIP namespace failed')
|
||||
|
||||
def _create_gateway_port(self, ex_gw_port, interface_name):
|
||||
"""Create namespace, request port creationg from Plugin,
|
||||
@ -296,8 +296,8 @@ class FipNamespace(namespaces.Namespace):
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.unsubscribe(self.agent_gateway_port['network_id'])
|
||||
self.agent_gateway_port = None
|
||||
LOG.exception(_LE('DVR: Gateway setup in FIP namespace '
|
||||
'failed'))
|
||||
LOG.exception('DVR: Gateway setup in FIP namespace '
|
||||
'failed')
|
||||
|
||||
# Now add the filter match rule for the table.
|
||||
ip_rule = ip_lib.IPRule(namespace=self.get_name())
|
||||
@ -328,10 +328,10 @@ class FipNamespace(namespaces.Namespace):
|
||||
# throw exceptions. Unsubscribe this external network so that
|
||||
# the next call will trigger the interface to be plugged.
|
||||
if not ipd.exists():
|
||||
LOG.warning(_LW('DVR: FIP gateway port with interface '
|
||||
'name: %(device)s does not exist in the given '
|
||||
'namespace: %(ns)s'), {'device': interface_name,
|
||||
'ns': ns_name})
|
||||
LOG.warning('DVR: FIP gateway port with interface '
|
||||
'name: %(device)s does not exist in the given '
|
||||
'namespace: %(ns)s', {'device': interface_name,
|
||||
'ns': ns_name})
|
||||
msg = _('DVR: Gateway update route in FIP namespace failed, retry '
|
||||
'should be attempted on next call')
|
||||
raise n_exc.FloatingIpSetupException(msg)
|
||||
|
@ -22,7 +22,6 @@ from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
import six
|
||||
|
||||
from neutron._i18n import _LE, _LW
|
||||
from neutron.agent.l3 import dvr_fip_ns
|
||||
from neutron.agent.l3 import dvr_router_base
|
||||
from neutron.agent.linux import ip_lib
|
||||
@ -239,16 +238,16 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
|
||||
return True
|
||||
else:
|
||||
if operation == 'add':
|
||||
LOG.warning(_LW("Device %s does not exist so ARP entry "
|
||||
"cannot be updated, will cache "
|
||||
"information to be applied later "
|
||||
"when the device exists"),
|
||||
LOG.warning("Device %s does not exist so ARP entry "
|
||||
"cannot be updated, will cache "
|
||||
"information to be applied later "
|
||||
"when the device exists",
|
||||
device)
|
||||
self._cache_arp_entry(ip, mac, subnet_id, operation)
|
||||
return False
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("DVR: Failed updating arp entry"))
|
||||
LOG.exception("DVR: Failed updating arp entry")
|
||||
|
||||
def _set_subnet_arp_info(self, subnet_id):
|
||||
"""Set ARP info retrieved from Plugin for existing ports."""
|
||||
@ -356,10 +355,10 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
|
||||
priority=snat_idx)
|
||||
except Exception:
|
||||
if is_add:
|
||||
exc = _LE('DVR: error adding redirection logic')
|
||||
exc = 'DVR: error adding redirection logic'
|
||||
else:
|
||||
exc = _LE('DVR: snat remove failed to clear the rule '
|
||||
'and device')
|
||||
exc = ('DVR: snat remove failed to clear the rule '
|
||||
'and device')
|
||||
LOG.exception(exc)
|
||||
|
||||
def _snat_redirect_add(self, gateway, sn_port, sn_int):
|
||||
|
@ -12,7 +12,6 @@
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from neutron._i18n import _LE
|
||||
from neutron.agent.l3 import router_info as router
|
||||
from neutron.common import constants as l3_constants
|
||||
|
||||
@ -47,8 +46,8 @@ class DvrRouterBase(router.RouterInfo):
|
||||
if ip['subnet_id'] in subnet_ids:
|
||||
return p
|
||||
|
||||
LOG.error(_LE('DVR: SNAT port not found in the list '
|
||||
'%(snat_list)s for the given router '
|
||||
'internal port %(int_p)s'), {
|
||||
'snat_list': snat_ports,
|
||||
'int_p': int_port})
|
||||
LOG.error('DVR: SNAT port not found in the list '
|
||||
'%(snat_list)s for the given router '
|
||||
'internal port %(int_p)s', {
|
||||
'snat_list': snat_ports,
|
||||
'int_p': int_port})
|
||||
|
@ -20,7 +20,6 @@ from oslo_log import log as logging
|
||||
from oslo_utils import fileutils
|
||||
import webob
|
||||
|
||||
from neutron._i18n import _LI
|
||||
from neutron.agent.linux import utils as agent_utils
|
||||
from neutron.common import constants
|
||||
from neutron.notifiers import batch_notifier
|
||||
@ -88,8 +87,8 @@ class AgentMixin(object):
|
||||
try:
|
||||
return self.router_info[router_id]
|
||||
except KeyError:
|
||||
LOG.info(_LI('Router %s is not managed by this agent. It was '
|
||||
'possibly deleted concurrently.'), router_id)
|
||||
LOG.info('Router %s is not managed by this agent. It was '
|
||||
'possibly deleted concurrently.', router_id)
|
||||
|
||||
def check_ha_state_for_router(self, router_id, current_state):
|
||||
ri = self._get_router_info(router_id)
|
||||
@ -110,7 +109,7 @@ class AgentMixin(object):
|
||||
return self.conf.ha_vrrp_advert_int
|
||||
|
||||
def enqueue_state_change(self, router_id, state):
|
||||
LOG.info(_LI('Router %(router_id)s transitioned to %(state)s'),
|
||||
LOG.info('Router %(router_id)s transitioned to %(state)s',
|
||||
{'router_id': router_id,
|
||||
'state': state})
|
||||
|
||||
|
@ -21,7 +21,6 @@ from neutron_lib.api.definitions import portbindings
|
||||
from neutron_lib import constants as n_consts
|
||||
from oslo_log import log as logging
|
||||
|
||||
from neutron._i18n import _, _LE
|
||||
from neutron.agent.l3 import namespaces
|
||||
from neutron.agent.l3 import router_info as router
|
||||
from neutron.agent.linux import external_process
|
||||
@ -93,7 +92,7 @@ class HaRouter(router.RouterInfo):
|
||||
with open(ha_state_path, 'w') as f:
|
||||
f.write(new_state)
|
||||
except (OSError, IOError):
|
||||
LOG.error(_LE('Error while writing HA state for %s'),
|
||||
LOG.error('Error while writing HA state for %s',
|
||||
self.router_id)
|
||||
|
||||
@property
|
||||
@ -112,8 +111,8 @@ class HaRouter(router.RouterInfo):
|
||||
def initialize(self, process_monitor):
|
||||
ha_port = self.router.get(n_consts.HA_INTERFACE_KEY)
|
||||
if not ha_port:
|
||||
msg = _("Unable to process HA router %s without "
|
||||
"HA port") % self.router_id
|
||||
msg = ("Unable to process HA router %s without HA port" %
|
||||
self.router_id)
|
||||
LOG.exception(msg)
|
||||
raise Exception(msg)
|
||||
super(HaRouter, self).initialize(process_monitor)
|
||||
|
@ -16,7 +16,6 @@ import os
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from neutron._i18n import _LW
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -55,8 +54,8 @@ class ItemAllocator(object):
|
||||
self.remembered[key] = self.ItemClass(saved_value)
|
||||
except ValueError:
|
||||
read_error = True
|
||||
LOG.warning(_LW("Invalid line in %(file)s, "
|
||||
"ignoring: %(line)s"),
|
||||
LOG.warning("Invalid line in %(file)s, "
|
||||
"ignoring: %(line)s",
|
||||
{'file': state_file, 'line': line})
|
||||
|
||||
self.pool.difference_update(self.remembered.values())
|
||||
|
@ -21,7 +21,7 @@ import netaddr
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from neutron._i18n import _, _LE
|
||||
from neutron._i18n import _
|
||||
from neutron.agent.l3 import ha
|
||||
from neutron.agent.linux import daemon
|
||||
from neutron.agent.linux import ip_lib
|
||||
@ -86,8 +86,8 @@ class MonitorDaemon(daemon.Daemon):
|
||||
# Remove this code once new keepalived versions are available.
|
||||
self.send_garp(event)
|
||||
except Exception:
|
||||
LOG.exception(_LE(
|
||||
'Failed to process or handle event for line %s'), iterable)
|
||||
LOG.exception('Failed to process or handle event for line %s',
|
||||
iterable)
|
||||
|
||||
def write_state_change(self, state):
|
||||
with open(os.path.join(
|
||||
|
@ -15,7 +15,6 @@
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
from neutron._i18n import _LE
|
||||
from neutron.agent import agent_extensions_manager as agent_ext_manager
|
||||
from neutron.conf.agent import agent_extensions_manager as agent_ext_mgr_config
|
||||
|
||||
@ -43,8 +42,8 @@ class L3AgentExtensionsManager(agent_ext_manager.AgentExtensionsManager):
|
||||
extension.obj.add_router(context, data)
|
||||
else:
|
||||
LOG.error(
|
||||
_LE("Agent Extension '%(name)s' does not "
|
||||
"implement method add_router"),
|
||||
"Agent Extension '%(name)s' does not "
|
||||
"implement method add_router",
|
||||
{'name': extension.name}
|
||||
)
|
||||
|
||||
@ -55,8 +54,8 @@ class L3AgentExtensionsManager(agent_ext_manager.AgentExtensionsManager):
|
||||
extension.obj.update_router(context, data)
|
||||
else:
|
||||
LOG.error(
|
||||
_LE("Agent Extension '%(name)s' does not "
|
||||
"implement method update_router"),
|
||||
"Agent Extension '%(name)s' does not "
|
||||
"implement method update_router",
|
||||
{'name': extension.name}
|
||||
)
|
||||
|
||||
@ -67,7 +66,7 @@ class L3AgentExtensionsManager(agent_ext_manager.AgentExtensionsManager):
|
||||
extension.obj.delete_router(context, data)
|
||||
else:
|
||||
LOG.error(
|
||||
_LE("Agent Extension '%(name)s' does not "
|
||||
"implement method delete_router"),
|
||||
"Agent Extension '%(name)s' does not "
|
||||
"implement method delete_router",
|
||||
{'name': extension.name}
|
||||
)
|
||||
|
@ -12,7 +12,6 @@
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from neutron._i18n import _LE
|
||||
from neutron.agent.l3 import dvr_fip_ns
|
||||
from neutron.agent.l3 import dvr_snat_ns
|
||||
from neutron.agent.l3 import namespaces
|
||||
@ -119,8 +118,8 @@ class NamespaceManager(object):
|
||||
namespaces = root_ip.get_namespaces()
|
||||
return set(ns for ns in namespaces if self.is_managed(ns))
|
||||
except RuntimeError:
|
||||
LOG.exception(_LE('RuntimeError in obtaining namespace list for '
|
||||
'namespace cleanup.'))
|
||||
LOG.exception('RuntimeError in obtaining namespace list for '
|
||||
'namespace cleanup.')
|
||||
return set()
|
||||
|
||||
def ensure_router_cleanup(self, router_id):
|
||||
@ -144,4 +143,4 @@ class NamespaceManager(object):
|
||||
self.process_monitor, ns_id, self.agent_conf, ns.name)
|
||||
ns.delete()
|
||||
except RuntimeError:
|
||||
LOG.exception(_LE('Failed to destroy stale namespace %s'), ns)
|
||||
LOG.exception('Failed to destroy stale namespace %s', ns)
|
||||
|
@ -18,7 +18,6 @@ import functools
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
|
||||
from neutron._i18n import _LE, _LW
|
||||
from neutron.agent.linux import ip_lib
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -65,8 +64,8 @@ def check_ns_existence(f):
|
||||
@functools.wraps(f)
|
||||
def wrapped(self, *args, **kwargs):
|
||||
if not self.exists():
|
||||
LOG.warning(_LW('Namespace %(name)s does not exist. Skipping '
|
||||
'%(func)s'),
|
||||
LOG.warning('Namespace %(name)s does not exist. Skipping '
|
||||
'%(func)s',
|
||||
{'name': self.name, 'func': f.__name__})
|
||||
return
|
||||
try:
|
||||
@ -111,7 +110,7 @@ class Namespace(object):
|
||||
try:
|
||||
self.ip_wrapper_root.netns.delete(self.name)
|
||||
except RuntimeError:
|
||||
msg = _LE('Failed trying to delete namespace: %s')
|
||||
msg = 'Failed trying to delete namespace: %s'
|
||||
LOG.exception(msg, self.name)
|
||||
|
||||
def exists(self):
|
||||
|
@ -19,7 +19,7 @@ from neutron_lib import constants as lib_constants
|
||||
from neutron_lib.utils import helpers
|
||||
from oslo_log import log as logging
|
||||
|
||||
from neutron._i18n import _, _LE, _LW
|
||||
from neutron._i18n import _
|
||||
from neutron.agent.l3 import namespaces
|
||||
from neutron.agent.linux import ip_lib
|
||||
from neutron.agent.linux import iptables_manager
|
||||
@ -298,8 +298,8 @@ class RouterInfo(object):
|
||||
except RuntimeError:
|
||||
# any exception occurred here should cause the floating IP
|
||||
# to be set in error state
|
||||
LOG.warning(_LW("Unable to configure IP address for "
|
||||
"floating IP: %s"), fip['id'])
|
||||
LOG.warning("Unable to configure IP address for "
|
||||
"floating IP: %s", fip['id'])
|
||||
|
||||
def add_floating_ip(self, fip, interface_name, device):
|
||||
raise NotImplementedError()
|
||||
@ -876,7 +876,7 @@ class RouterInfo(object):
|
||||
|
||||
except n_exc.FloatingIpSetupException:
|
||||
# All floating IPs must be put in error state
|
||||
LOG.exception(_LE("Failed to process floating IPs."))
|
||||
LOG.exception("Failed to process floating IPs.")
|
||||
fip_statuses = self.put_fips_in_error_state()
|
||||
finally:
|
||||
self.update_fip_statuses(fip_statuses)
|
||||
@ -902,7 +902,7 @@ class RouterInfo(object):
|
||||
except (n_exc.FloatingIpSetupException,
|
||||
n_exc.IpTablesApplyException):
|
||||
# All floating IPs must be put in error state
|
||||
LOG.exception(_LE("Failed to process floating IPs."))
|
||||
LOG.exception("Failed to process floating IPs.")
|
||||
fip_statuses = self.put_fips_in_error_state()
|
||||
finally:
|
||||
self.update_fip_statuses(fip_statuses)
|
||||
@ -1096,8 +1096,8 @@ class RouterInfo(object):
|
||||
self.agent.pd.sync_router(self.router['id'])
|
||||
self._process_external_on_delete()
|
||||
else:
|
||||
LOG.warning(_LW("Can't gracefully delete the router %s: "
|
||||
"no router namespace found."), self.router['id'])
|
||||
LOG.warning("Can't gracefully delete the router %s: "
|
||||
"no router namespace found.", self.router['id'])
|
||||
|
||||
@common_utils.exception_logger()
|
||||
def process(self):
|
||||
|
@ -20,7 +20,7 @@ import eventlet.queue
|
||||
from neutron_lib.utils import helpers
|
||||
from oslo_log import log as logging
|
||||
|
||||
from neutron._i18n import _, _LE
|
||||
from neutron._i18n import _
|
||||
from neutron.agent.linux import ip_lib
|
||||
from neutron.agent.linux import utils
|
||||
from neutron.common import utils as common_utils
|
||||
@ -182,7 +182,7 @@ class AsyncProcess(object):
|
||||
# root and need to be killed via the same helper.
|
||||
utils.kill_process(pid, kill_signal, self.run_as_root)
|
||||
except Exception:
|
||||
LOG.exception(_LE('An error occurred while killing [%s].'),
|
||||
LOG.exception('An error occurred while killing [%s].',
|
||||
self.cmd)
|
||||
return False
|
||||