Remove log translation and i18n

Log messages are no longer being translated. This removes all use of
the _LE, _LI, and _LW translation markers to simplify logging and to
avoid confusion with new contributions.

Change-Id: I9ddb6595fc52e46ed8844e39d2fa71029c90b65c
Closes-Bug: #1674567
This commit is contained in:
Hervé Beraud 2019-04-11 10:47:24 +02:00
parent 433d34ed41
commit 4f385720d7
18 changed files with 140 additions and 268 deletions

View File

@ -46,7 +46,6 @@ from oslo_messaging._drivers.amqp1_driver.addressing import keyify
from oslo_messaging._drivers.amqp1_driver.addressing import SERVICE_NOTIFY
from oslo_messaging._drivers.amqp1_driver.addressing import SERVICE_RPC
from oslo_messaging._drivers.amqp1_driver import eventloop
from oslo_messaging._i18n import _LE, _LI, _LW
from oslo_messaging import exceptions
from oslo_messaging.target import Target
from oslo_messaging import transport
@ -311,7 +310,7 @@ class MessageDispositionTask(Task):
self._disposition(self._released)
except Exception as e:
# there's really nothing we can do about a failed disposition.
LOG.exception(_LE("Message acknowledgment failed: %s"), e)
LOG.exception("Message acknowledgment failed: %s", e)
class Sender(pyngus.SenderEventHandler):
@ -418,8 +417,8 @@ class Sender(pyngus.SenderEventHandler):
def sender_remote_closed(self, sender_link, pn_condition):
# The remote has initiated a close. This could happen when the message
# bus is shutting down, or it detected an error
LOG.warning(_LW("sender %(addr)s failed due to remote initiated close:"
" condition=%(cond)s"),
LOG.warning("Sender %(addr)s failed due to remote initiated close:"
" condition=%(cond)s",
{'addr': self._address, 'cond': pn_condition})
self._link.close()
# sender_closed() will be called once the link completes closing
@ -429,7 +428,7 @@ class Sender(pyngus.SenderEventHandler):
def sender_failed(self, sender_link, error):
"""Protocol error occurred."""
LOG.warning(_LW("sender %(addr)s failed error=%(error)s"),
LOG.warning("Sender %(addr)s failed error=%(error)s",
{'addr': self._address, 'error': error})
self._handle_sender_closed(str(error))
@ -639,13 +638,13 @@ class Replies(pyngus.ReceiverEventHandler):
receiver link has initiated closing the connection.
"""
if pn_condition:
LOG.error(_LE("Reply subscription closed by peer: %s"),
LOG.error("Reply subscription closed by peer: %s",
pn_condition)
receiver.close()
def receiver_failed(self, receiver_link, error):
"""Protocol error occurred."""
LOG.error(_LE("Link to reply queue failed. error=%(error)s"),
LOG.error("Link to reply queue failed. error=%(error)s",
{"error": error})
self._on_down()
@ -661,8 +660,8 @@ class Replies(pyngus.ReceiverEventHandler):
self._correlation[key](message)
receiver.message_accepted(handle)
except KeyError:
LOG.warning(_LW("Can't find receiver for response msg id=%s, "
"dropping!"), key)
LOG.warning("Can't find receiver for response msg id=%s, "
"dropping!", key)
receiver.message_modified(handle, True, True, None)
# ensure we have enough credit
if receiver.capacity <= self._capacity_low:
@ -728,13 +727,13 @@ class Server(pyngus.ReceiverEventHandler):
"addr": receiver.source_address or receiver.target_address,
"err_msg": pn_condition
}
LOG.error(_LE("Server subscription %(addr)s closed "
"by peer: %(err_msg)s"), vals)
LOG.error("Server subscription %(addr)s closed "
"by peer: %(err_msg)s", vals)
receiver.close()
def receiver_failed(self, receiver_link, error):
"""Protocol error occurred."""
LOG.error(_LE("Listener link queue failed. error=%(error)s"),
LOG.error("Listener link queue failed. error=%(error)s",
{"error": error})
self.receiver_closed(receiver_link)
@ -960,7 +959,7 @@ class Controller(pyngus.ConnectionEventHandler):
def shutdown(self, timeout=30):
"""Shutdown the messaging service."""
LOG.info(_LI("Shutting down the AMQP 1.0 connection"))
LOG.info("Shutting down the AMQP 1.0 connection")
if self.processor:
self.processor.wakeup(self._start_shutdown)
LOG.debug("Waiting for eventloop to exit")
@ -1092,7 +1091,7 @@ class Controller(pyngus.ConnectionEventHandler):
try:
self._tasks.get(False)._execute(self)
except Exception as e:
LOG.exception(_LE("Error processing task: %s"), e)
LOG.exception("Error processing task: %s", e)
count += 1
# if we hit _max_task_batch, resume task processing later:
@ -1133,7 +1132,7 @@ class Controller(pyngus.ConnectionEventHandler):
point, we are ready to receive messages, so start all pending RPC
requests.
"""
LOG.info(_LI("Messaging is active (%(hostname)s:%(port)s%(vhost)s)"),
LOG.info("Messaging is active (%(hostname)s:%(port)s%(vhost)s)",
{'hostname': self.hosts.current.hostname,
'port': self.hosts.current.port,
'vhost': ("/" + self.hosts.virtual_host
@ -1156,7 +1155,7 @@ class Controller(pyngus.ConnectionEventHandler):
def socket_error(self, error):
"""Called by eventloop when a socket error occurs."""
LOG.error(_LE("Socket failure: %s"), error)
LOG.error("Socket failure: %s", error)
self._handle_connection_loss(str(error))
# Pyngus connection event callbacks (and their helpers), all invoked from
@ -1217,7 +1216,7 @@ class Controller(pyngus.ConnectionEventHandler):
# connection. Acknowledge the close, and try to reconnect/failover
# later once the connection has closed (connection_closed is called).
if reason:
LOG.info(_LI("Connection closed by peer: %s"), reason)
LOG.info("Connection closed by peer: %s", reason)
self._detach_senders()
self._detach_servers()
self.reply_link.detach()
@ -1230,8 +1229,8 @@ class Controller(pyngus.ConnectionEventHandler):
"""
if outcome == proton.SASL.OK:
return
LOG.error(_LE("AUTHENTICATION FAILURE: Cannot connect to "
"%(hostname)s:%(port)s as user %(username)s"),
LOG.error("AUTHENTICATION FAILURE: Cannot connect to "
"%(hostname)s:%(port)s as user %(username)s",
{'hostname': self.hosts.current.hostname,
'port': self.hosts.current.port,
'username': self.hosts.current.username})
@ -1252,7 +1251,7 @@ class Controller(pyngus.ConnectionEventHandler):
# service. Try to re-establish the connection:
if not self._reconnecting:
self._reconnecting = True
LOG.info(_LI("delaying reconnect attempt for %d seconds"),
LOG.info("Delaying reconnect attempt for %d seconds",
self._delay)
self.processor.defer(lambda: self._do_reconnect(reason),
self._delay)
@ -1270,7 +1269,7 @@ class Controller(pyngus.ConnectionEventHandler):
if not self._closing:
self._hard_reset(reason)
host = self.hosts.next()
LOG.info(_LI("Reconnecting to: %(hostname)s:%(port)s"),
LOG.info("Reconnecting to: %(hostname)s:%(port)s",
{'hostname': host.hostname, 'port': host.port})
self._socket_connection.connect(host)

View File

@ -26,10 +26,6 @@ import oslo_messaging
from oslo_messaging._drivers import amqp as rpc_amqp
from oslo_messaging._drivers import base
from oslo_messaging._drivers import common as rpc_common
from oslo_messaging._i18n import _
from oslo_messaging._i18n import _LE
from oslo_messaging._i18n import _LI
from oslo_messaging._i18n import _LW
from oslo_messaging import MessageDeliveryFailure
__all__ = ['AMQPDriverBase']
@ -160,12 +156,14 @@ class AMQPIncomingMessage(base.RpcIncomingMessage):
time.sleep(0.25)
else:
self._obsolete_reply_queues.add(self.reply_q, self.msg_id)
LOG.info(_LI("The reply %(msg_id)s cannot be sent "
"%(reply_q)s reply queue don't exist after "
"%(duration)s sec abandoning..."), {
'msg_id': self.msg_id,
'reply_q': self.reply_q,
'duration': duration})
infos = {
'msg_id': self.msg_id,
'reply_q': self.reply_q,
'duration': duration
}
LOG.info("The reply %(msg_id)s cannot be sent "
"%(reply_q)s reply queue don't exist after "
"%(duration)s sec abandoning...", infos)
return
def heartbeat(self):
@ -259,9 +257,8 @@ class ObsoleteReplyQueuesCache(object):
self._no_reply_log(reply_q, msg_id)
def _no_reply_log(self, reply_q, msg_id):
LOG.warning(_LW("%(reply_queue)s doesn't exists, drop reply to "
"%(msg_id)s"), {'reply_queue': reply_q,
'msg_id': msg_id})
LOG.warning("%(reply_queue)s doesn't exists, drop reply to "
"%(msg_id)s", {'reply_queue': reply_q, "msg_id": msg_id})
class AMQPListener(base.PollStyleListener):
@ -406,7 +403,7 @@ class ReplyWaiters(object):
def put(self, msg_id, message_data):
queue = self._queues.get(msg_id)
if not queue:
LOG.info(_LI('No calling threads waiting for msg_id : %s'), msg_id)
LOG.info('No calling threads waiting for msg_id : %s', msg_id)
LOG.debug(' queues: %(queues)s, message: %(message)s',
{'queues': len(self._queues), 'message': message_data})
else:
@ -416,10 +413,10 @@ class ReplyWaiters(object):
self._queues[msg_id] = moves.queue.Queue()
queues_length = len(self._queues)
if queues_length > self._wrn_threshold:
LOG.warning(_LW('Number of call queues is %(queues_length)s, '
'greater than warning threshold: %(old_threshold)s'
'. There could be a leak. Increasing threshold to:'
' %(threshold)s'),
LOG.warning('Number of call queues is %(queues_length)s, '
'greater than warning threshold: %(old_threshold)s. '
'There could be a leak. Increasing threshold to: '
'%(threshold)s',
{'queues_length': queues_length,
'old_threshold': self._wrn_threshold,
'threshold': self._wrn_threshold * 2})
@ -460,8 +457,7 @@ class ReplyWaiter(object):
current_timeout = max(current_timeout * 2,
ACK_REQUEUE_EVERY_SECONDS_MAX)
except Exception:
LOG.exception(_LE("Failed to process incoming message, "
"retrying..."))
LOG.exception("Failed to process incoming message, retrying..")
else:
current_timeout = ACK_REQUEUE_EVERY_SECONDS_MIN
@ -485,7 +481,7 @@ class ReplyWaiter(object):
@staticmethod
def _raise_timeout_exception(msg_id):
raise oslo_messaging.MessagingTimeout(
_('Timed out waiting for a reply to message ID %s.') % msg_id)
'Timed out waiting for a reply to message ID %s.', msg_id)
def _process_reply(self, data):
self.msg_id_cache.check_duplicate_message(data)
@ -612,11 +608,9 @@ class AMQPDriverBase(base.BaseDriver):
with self._get_connection(rpc_common.PURPOSE_SEND) as conn:
if notify:
exchange = self._get_exchange(target)
log_msg += "NOTIFY exchange '%(exchange)s'" \
" topic '%(topic)s'" % {
'exchange': exchange,
'topic': target.topic}
LOG.debug(log_msg)
LOG.debug(log_msg + "NOTIFY exchange '%(exchange)s'"
" topic '%(topic)s'", {'exchange': exchange,
'topic': target.topic})
conn.notify_send(exchange, target.topic, msg, retry=retry)
elif target.fanout:
log_msg += "FANOUT topic '%(topic)s'" % {
@ -628,11 +622,9 @@ class AMQPDriverBase(base.BaseDriver):
exchange = self._get_exchange(target)
if target.server:
topic = '%s.%s' % (target.topic, target.server)
log_msg += "exchange '%(exchange)s'" \
" topic '%(topic)s'" % {
'exchange': exchange,
'topic': topic}
LOG.debug(log_msg)
LOG.debug(log_msg + "exchange '%(exchange)s'"
" topic '%(topic)s'", {'exchange': exchange,
'topic': topic})
conn.topic_send(exchange_name=exchange, topic=topic,
msg=msg, timeout=timeout, retry=retry)

View File

@ -32,8 +32,6 @@ from oslo_utils import timeutils
import six
import oslo_messaging
from oslo_messaging._i18n import _
from oslo_messaging._i18n import _LE, _LW
from oslo_messaging import _utils as utils
LOG = logging.getLogger(__name__)
@ -81,7 +79,7 @@ _REMOTE_POSTFIX = '_Remote'
class RPCException(Exception):
msg_fmt = _("An unknown RPC related exception occurred.")
msg_fmt = "An unknown RPC related exception occurred."
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
@ -93,8 +91,8 @@ class RPCException(Exception):
except Exception:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation, '
'kwargs are:'))
LOG.exception('Exception in string format operation, '
'kwargs are:')
for name, value in kwargs.items():
LOG.error("%s: %s", name, value)
# at least get the core message out if something happened
@ -109,9 +107,9 @@ class Timeout(RPCException):
This exception is raised if the rpc_response_timeout is reached while
waiting for a response from the remote side.
"""
msg_fmt = _('Timeout while waiting on RPC response - '
'topic: "%(topic)s", RPC method: "%(method)s" '
'info: "%(info)s"')
msg_fmt = ('Timeout while waiting on RPC response - '
'topic: "%(topic)s", RPC method: "%(method)s" '
'info: "%(info)s"')
def __init__(self, info=None, topic=None, method=None):
"""Initiates Timeout object.
@ -126,31 +124,31 @@ class Timeout(RPCException):
self.method = method
super(Timeout, self).__init__(
None,
info=info or _('<unknown>'),
topic=topic or _('<unknown>'),
method=method or _('<unknown>'))
info=info or '<unknown>',
topic=topic or '<unknown>',
method=method or '<unknown>')
class DuplicateMessageError(RPCException):
msg_fmt = _("Found duplicate message(%(msg_id)s). Skipping it.")
msg_fmt = "Found duplicate message(%(msg_id)s). Skipping it."
class InvalidRPCConnectionReuse(RPCException):
msg_fmt = _("Invalid reuse of an RPC connection.")
msg_fmt = "Invalid reuse of an RPC connection."
class UnsupportedRpcVersion(RPCException):
msg_fmt = _("Specified RPC version, %(version)s, not supported by "
"this endpoint.")
msg_fmt = ("Specified RPC version, %(version)s, not supported by "
"this endpoint.")
class UnsupportedRpcEnvelopeVersion(RPCException):
msg_fmt = _("Specified RPC envelope version, %(version)s, "
"not supported by this endpoint.")
msg_fmt = ("Specified RPC envelope version, %(version)s, "
"not supported by this endpoint.")
class RpcVersionCapError(RPCException):
msg_fmt = _("Specified RPC version cap, %(version_cap)s, is too low")
msg_fmt = "Specified RPC version cap, %(version_cap)s, is too low"
class Connection(object):
@ -236,7 +234,7 @@ def deserialize_remote_exception(data, allowed_remote_exmods):
failure = klass(*failure.get('args', []), **failure.get('kwargs', {}))
except (AttributeError, TypeError, ImportError) as error:
LOG.warning(_LW("Failed to rebuild remote exception due to error: %s"),
LOG.warning("Failed to rebuild remote exception due to error: %s",
six.text_type(error))
return oslo_messaging.RemoteError(name, failure.get('message'), trace)
@ -450,7 +448,7 @@ class ConnectionContext(Connection):
try:
self.connection.reset()
except Exception:
LOG.exception(_LE("Fail to reset the connection, drop it"))
LOG.exception("Fail to reset the connection, drop it")
try:
self.connection.close()
except Exception as exc:

View File

@ -36,7 +36,6 @@ from oslo_messaging._drivers.amqp1_driver.eventloop import compute_timeout
from oslo_messaging._drivers.amqp1_driver import opts
from oslo_messaging._drivers import base
from oslo_messaging._drivers import common
from oslo_messaging._i18n import _LI, _LW
proton = importutils.try_import('proton')
@ -260,7 +259,7 @@ class ProtonDriver(base.BaseDriver):
bad_opts = set(ps).difference(['rpc-call', 'rpc-reply',
'rpc-cast', 'notify'])
if bad_opts:
LOG.warning(_LW("Ignoring unrecognized pre_settle value(s): %s"),
LOG.warning("Ignoring unrecognized pre_settle value(s): %s",
" ".join(bad_opts))
def _ensure_connect_called(func):
@ -284,8 +283,8 @@ class ProtonDriver(base.BaseDriver):
# touch the existing Controller - it is owned by the
# parent. Best we can do here is simply drop it and
# hope we get lucky.
LOG.warning(_LW("Process forked after connection "
"established!"))
LOG.warning("Process forked after connection "
"established!")
self._ctrl = None
# Create a Controller that connects to the messaging
# service:
@ -434,7 +433,7 @@ class ProtonDriver(base.BaseDriver):
if self._ctrl:
self._ctrl.shutdown()
self._ctrl = None
LOG.info(_LI("AMQP 1.0 messaging driver shutdown"))
LOG.info("AMQP 1.0 messaging driver shutdown")
def require_features(self, requeue=True):
pass

View File

@ -43,10 +43,6 @@ from oslo_messaging._drivers import amqpdriver
from oslo_messaging._drivers import base
from oslo_messaging._drivers import common as rpc_common
from oslo_messaging._drivers import pool
from oslo_messaging._i18n import _
from oslo_messaging._i18n import _LE
from oslo_messaging._i18n import _LI
from oslo_messaging._i18n import _LW
from oslo_messaging import _utils
from oslo_messaging import exceptions
@ -273,11 +269,11 @@ class Consumer(object):
'queue': self.queue_name,
'err_str': exc
}
LOG.error(_LE('Internal amqp error (541) '
'during queue declare,'
'retrying in %(sleep_time)s seconds. '
'Queue: [%(queue)s], '
'error message: [%(err_str)s]'), info)
LOG.error('Internal amqp error (541) '
'during queue declare,'
'retrying in %(sleep_time)s seconds. '
'Queue: [%(queue)s], '
'error message: [%(err_str)s]', info)
time.sleep(interval)
self.queue.declare()
else:
@ -338,8 +334,7 @@ class Consumer(object):
try:
self.callback(RabbitMessage(message))
except Exception:
LOG.exception(_LE("Failed to process message"
" ... skipping it."))
LOG.exception("Failed to process message ... skipping it.")
message.reject()
@ -458,9 +453,9 @@ class Connection(object):
self._url = ''
if url.hosts:
if url.transport.startswith('kombu+'):
LOG.warning(_LW('Selecting the kombu transport through the '
'transport url (%s) is a experimental feature '
'and this is not yet supported.'),
LOG.warning('Selecting the kombu transport through the '
'transport url (%s) is a experimental feature '
'and this is not yet supported.',
url.transport)
if len(url.hosts) > 1:
random.shuffle(url.hosts)
@ -605,7 +600,7 @@ class Connection(object):
try:
return cls._SSL_PROTOCOLS[key]
except KeyError:
raise RuntimeError(_("Invalid SSL version : %s") % version)
raise RuntimeError("Invalid SSL version : %s" % version)
def _transform_transport_url(self, url, host, default_username='',
default_password='', default_hostname=''):
@ -650,11 +645,11 @@ class Connection(object):
@staticmethod
def _on_connection_blocked(reason):
LOG.error(_LE("The broker has blocked the connection: %s"), reason)
LOG.error("The broker has blocked the connection: %s", reason)
@staticmethod
def _on_connection_unblocked():
LOG.info(_LI("The broker has unblocked the connection"))
LOG.info("The broker has unblocked the connection")
def ensure_connection(self):
# NOTE(sileht): we reset the channel and ensure
@ -681,10 +676,10 @@ class Connection(object):
current_pid = os.getpid()
if self._initial_pid != current_pid:
LOG.warning(_LW("Process forked after connection established! "
"This can result in unpredictable behavior. "
"See: https://docs.openstack.org/oslo.messaging/"
"latest/reference/transport.html"))
LOG.warning("Process forked after connection established! "
"This can result in unpredictable behavior. "
"See: https://docs.openstack.org/oslo.messaging/"
"latest/reference/transport.html")
self._initial_pid = current_pid
if retry is None or retry < 0:
@ -705,15 +700,15 @@ class Connection(object):
info.update(self._get_connection_info(conn_error=True))
if 'Socket closed' in six.text_type(exc):
LOG.error(_LE('[%(connection_id)s] AMQP server'
' %(hostname)s:%(port)s closed'
' the connection. Check login credentials:'
' %(err_str)s'), info)
LOG.error('[%(connection_id)s] AMQP server'
' %(hostname)s:%(port)s closed'
' the connection. Check login credentials:'
' %(err_str)s', info)
else:
LOG.error(_LE('[%(connection_id)s] AMQP server on '
'%(hostname)s:%(port)s is unreachable: '
'%(err_str)s. Trying again in '
'%(sleep_time)d seconds.'), info)
LOG.error('[%(connection_id)s] AMQP server on '
'%(hostname)s:%(port)s is unreachable: '
'%(err_str)s. Trying again in '
'%(sleep_time)d seconds.', info)
# XXX(nic): when reconnecting to a RabbitMQ cluster
# with mirrored queues in use, the attempt to release the
@ -737,9 +732,9 @@ class Connection(object):
self._set_current_channel(new_channel)
self.set_transport_socket_timeout()
LOG.info(_LI('[%(connection_id)s] Reconnected to AMQP server on '
'%(hostname)s:%(port)s via [%(transport)s] client '
'with port %(client_port)s.'),
LOG.info('[%(connection_id)s] Reconnected to AMQP server on '
'%(hostname)s:%(port)s via [%(transport)s] client '
'with port %(client_port)s.',
self._get_connection_info())
def execute_method(channel):
@ -769,9 +764,9 @@ class Connection(object):
# is still broken
info = {'err_str': exc, 'retry': retry}
info.update(self.connection.info())
msg = _('Unable to connect to AMQP server on '
'%(hostname)s:%(port)s after %(retry)s '
'tries: %(err_str)s') % info
msg = ('Unable to connect to AMQP server on '
'%(hostname)s:%(port)s after %(retry)s '
'tries: %(err_str)s' % info)
LOG.error(msg)
raise exceptions.MessageDeliveryFailure(msg)
@ -837,8 +832,8 @@ class Connection(object):
if self.connection.supports_heartbeats:
return True
elif not self._heartbeat_support_log_emitted:
LOG.warning(_LW("Heartbeat support requested but it is not "
"supported by the kombu driver or the broker"))
LOG.warning("Heartbeat support requested but it is not "
"supported by the kombu driver or the broker")
self._heartbeat_support_log_emitted = True
return False
@ -936,12 +931,12 @@ class Connection(object):
pass
except (socket.timeout,
kombu.exceptions.OperationalError) as exc:
LOG.info(_LI("A recoverable connection/channel error "
"occurred, trying to reconnect: %s"), exc)
LOG.info("A recoverable connection/channel error "
"occurred, trying to reconnect: %s", exc)
self.ensure_connection()
except Exception:
LOG.warning(_LW("Unexpected error during heartbeart "
"thread processing, retrying..."))
LOG.warning("Unexpected error during heartbeart "
"thread processing, retrying...")
LOG.debug('Exception', exc_info=True)
self._heartbeat_exit_event.wait(
@ -955,8 +950,8 @@ class Connection(object):
def _connect_error(exc):
log_info = {'topic': consumer.routing_key, 'err_str': exc}
LOG.error(_LE("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s"), log_info)
LOG.error("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s", log_info)
def _declare_consumer():
consumer.declare(self)
@ -989,8 +984,7 @@ class Connection(object):
def _error_callback(exc):
_recoverable_error_callback(exc)
LOG.error(_LE('Failed to consume message from queue: %s'),
exc)
LOG.error('Failed to consume message from queue: %s', exc)
def _consume():
# NOTE(sileht): in case the acknowledgment or requeue of a
@ -1097,8 +1091,8 @@ class Connection(object):
def _error_callback(exc):
log_info = {'topic': exchange.name, 'err_str': exc}
LOG.error(_LE("Failed to publish message to topic "
"'%(topic)s': %(err_str)s"), log_info)
LOG.error("Failed to publish message to topic "
"'%(topic)s': %(err_str)s", log_info)
LOG.debug('Exception', exc_info=exc)
method = functools.partial(method, exchange, msg, routing_key, timeout)

View File

@ -1,35 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""oslo.i18n integration module.
See https://docs.openstack.org/oslo.i18n/latest/user/index.html
"""
import oslo_i18n
_translators = oslo_i18n.TranslatorFactory(domain='oslo_messaging')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical

View File

@ -95,29 +95,12 @@ class CheckForLoggingIssues(BaseASTChecker):
NONDEBUG_CHECK_DESC = 'O325 Not using translating helper for logging'
EXCESS_HELPER_CHECK_DESC = 'O326 Using hints when _ is necessary'
LOG_MODULES = ('logging')
I18N_MODULES = (
'oslo_messaging._i18n._',
'oslo_messaging._i18n._LI',
'oslo_messaging._i18n._LW',
'oslo_messaging._i18n._LE',
'oslo_messaging._i18n._LC',
)
TRANS_HELPER_MAP = {
'debug': None,
'info': '_LI',
'warn': '_LW',
'warning': '_LW',
'error': '_LE',
'exception': '_LE',
'critical': '_LC',
}
def __init__(self, tree, filename):
super(CheckForLoggingIssues, self).__init__(tree, filename)
self.logger_names = []
self.logger_module_names = []
self.i18n_names = {}
# NOTE(dstanek): this kinda accounts for scopes when talking
# about only leaf node in the graph
@ -136,11 +119,9 @@ class CheckForLoggingIssues(BaseASTChecker):
self.visit(value)
def _filter_imports(self, module_name, alias):
"""Keeps lists of logging and i18n imports."""
"""Keeps lists of logging."""
if module_name in self.LOG_MODULES:
self.logger_module_names.append(alias.asname or alias.name)
elif module_name in self.I18N_MODULES:
self.i18n_names[alias.asname or alias.name] = alias.name
def visit_Import(self, node):
for alias in node.names:
@ -174,18 +155,6 @@ class CheckForLoggingIssues(BaseASTChecker):
This handles the simple case:
name = [logging_module].getLogger(...)
- or -
name = [i18n_name](...)
And some much more comple ones:
name = [i18n_name](...) % X
- or -
self.name = [i18n_name](...) % X
"""
attr_node_types = (ast.Name, ast.Attribute)
@ -199,8 +168,7 @@ class CheckForLoggingIssues(BaseASTChecker):
if (isinstance(node.value, ast.BinOp) and
isinstance(node.value.op, ast.Mod)):
if (isinstance(node.value.left, ast.Call) and
isinstance(node.value.left.func, ast.Name) and
node.value.left.func.id in self.i18n_names):
isinstance(node.value.left.func, ast.Name)):
# NOTE(dstanek): this is done to match cases like:
# `msg = _('something %s') % x`
node = ast.Assign(value=node.value.left)
@ -210,9 +178,7 @@ class CheckForLoggingIssues(BaseASTChecker):
self.assignments.pop(target_name, None)
return super(CheckForLoggingIssues, self).generic_visit(node)
# is this a call to an i18n function?
if (isinstance(node.value.func, ast.Name) and
node.value.func.id in self.i18n_names):
if isinstance(node.value.func, ast.Name):
self.assignments[target_name] = node.value.func.id
return super(CheckForLoggingIssues, self).generic_visit(node)
@ -250,8 +216,7 @@ class CheckForLoggingIssues(BaseASTChecker):
self.add_error(msg, message=self.USING_DEPRECATED_WARN)
# must be a logger instance and one of the support logging methods
if (obj_name not in self.logger_names or
method_name not in self.TRANS_HELPER_MAP):
if obj_name not in self.logger_names:
return super(CheckForLoggingIssues, self).generic_visit(node)
# the call must have arguments
@ -260,21 +225,16 @@ class CheckForLoggingIssues(BaseASTChecker):
if method_name == 'debug':
self._process_debug(node)
elif method_name in self.TRANS_HELPER_MAP:
self._process_non_debug(node, method_name)
return super(CheckForLoggingIssues, self).generic_visit(node)
def _process_debug(self, node):
msg = node.args[0] # first arg to a logging method is the msg
# if first arg is a call to a i18n name
if (isinstance(msg, ast.Call) and
isinstance(msg.func, ast.Name) and
msg.func.id in self.i18n_names):
isinstance(msg.func, ast.Name)):
self.add_error(msg, message=self.DEBUG_CHECK_DESC)
# if the first arg is a reference to a i18n call
elif (isinstance(msg, ast.Name) and
msg.id in self.assignments and
not self._is_raised_later(node, msg.id)):
@ -283,30 +243,9 @@ class CheckForLoggingIssues(BaseASTChecker):
def _process_non_debug(self, node, method_name):
msg = node.args[0] # first arg to a logging method is the msg
# if first arg is a call to a i18n name
if isinstance(msg, ast.Call):
try:
func_name = msg.func.id
except AttributeError:
# in the case of logging only an exception, the msg function
# will not have an id associated with it, for instance:
# LOG.warning(six.text_type(e))
return
# the function name is the correct translation helper
# for the logging method
if func_name == self.TRANS_HELPER_MAP[method_name]:
return
# the function name is an alias for the correct translation
# helper for the loggine method
if (self.i18n_names[func_name] ==
self.TRANS_HELPER_MAP[method_name]):
return
self.add_error(msg, message=self.NONDEBUG_CHECK_DESC)
# if the first arg is not a reference to the correct i18n hint
elif isinstance(msg, ast.Name):
# FIXME(dstanek): to make sure more robust we should be checking
@ -320,12 +259,9 @@ class CheckForLoggingIssues(BaseASTChecker):
if msg.id not in self.assignments:
return
helper_method_name = self.TRANS_HELPER_MAP[method_name]
if (self.assignments[msg.id] != helper_method_name and
not self._is_raised_later(node, msg.id)):
if self._is_raised_later(node, msg.id):
self.add_error(msg, message=self.NONDEBUG_CHECK_DESC)
elif (self.assignments[msg.id] == helper_method_name and
self._is_raised_later(node, msg.id)):
elif self._is_raised_later(node, msg.id):
self.add_error(msg, message=self.EXCESS_HELPER_CHECK_DESC)
def _is_raised_later(self, node, name):

View File

@ -20,7 +20,6 @@ from oslo_utils import fnmatch
from stevedore import dispatch
import yaml
from oslo_messaging._i18n import _LI, _LW
from oslo_messaging.notify import notifier
@ -78,7 +77,7 @@ class RoutingDriver(notifier.Driver):
invoke_on_load=True,
invoke_args=None)
if not list(self.plugin_manager):
LOG.warning(_LW("Failed to load any notifiers for %s"),
LOG.warning("Failed to load any notifiers for %s",
self.NOTIFIER_PLUGIN_NAMESPACE)
def _get_drivers_for_message(self, group, event_type, priority):
@ -115,8 +114,8 @@ class RoutingDriver(notifier.Driver):
"""Emit the notification.
"""
# accepted_drivers is passed in as a result of the map() function
LOG.info(_LI("Routing '%(event)s' notification to '%(driver)s' "
"driver"),
LOG.info("Routing '%(event)s' notification to '%(driver)s' "
"driver",
{'event': message.get('event_type'), 'driver': ext.name})
ext.obj.notify(context, message, priority, retry)

View File

@ -20,7 +20,6 @@ import operator
import six
from oslo_messaging._i18n import _LW
from oslo_messaging import dispatcher
from oslo_messaging import serializer as msg_serializer
@ -59,7 +58,7 @@ class NotificationDispatcher(dispatcher.DispatcherBase):
priority, raw_message, message = self._extract_user_message(incoming)
if priority not in PRIORITIES:
LOG.warning(_LW('Unknown priority "%s"'), priority)
LOG.warning('Unknown priority "%s"', priority)
return
for screen, callback in self._callbacks_by_priority.get(priority,
@ -127,7 +126,7 @@ class BatchNotificationDispatcher(NotificationDispatcher):
for priority, messages in messages_grouped:
__, raw_messages, messages = six.moves.zip(*messages)
if priority not in PRIORITIES:
LOG.warning(_LW('Unknown priority "%s"'), priority)
LOG.warning('Unknown priority "%s"', priority)
continue
for screen, callback in self._callbacks_by_priority.get(priority,
[]):

View File

@ -135,7 +135,6 @@ and arguments from primitive types.
import itertools
import logging
from oslo_messaging._i18n import _LE
from oslo_messaging.notify import dispatcher as notify_dispatcher
from oslo_messaging import server as msg_server
from oslo_messaging import transport as msg_transport
@ -185,7 +184,7 @@ class NotificationServer(NotificationServerBase):
try:
res = self.dispatcher.dispatch(message)
except Exception:
LOG.exception(_LE('Exception during message handling.'))
LOG.exception('Exception during message handling.')
res = notify_dispatcher.NotificationResult.REQUEUE
try:
@ -195,7 +194,7 @@ class NotificationServer(NotificationServerBase):
else:
message.acknowledge()
except Exception:
LOG.exception(_LE("Fail to ack/requeue message."))
LOG.exception("Fail to ack/requeue message.")
class BatchNotificationServer(NotificationServerBase):
@ -205,7 +204,7 @@ class BatchNotificationServer(NotificationServerBase):
not_processed_messages = self.dispatcher.dispatch(incoming)
except Exception:
not_processed_messages = set(incoming)
LOG.exception(_LE('Exception during messages handling.'))
LOG.exception('Exception during messages handling.')
for m in incoming:
try:
if m in not_processed_messages and self._allow_requeue:
@ -213,7 +212,7 @@ class BatchNotificationServer(NotificationServerBase):
else:
m.acknowledge()
except Exception:
LOG.exception(_LE("Fail to ack/requeue message."))
LOG.exception("Fail to ack/requeue message.")
def get_notification_listener(transport, targets, endpoints,

View File

@ -39,7 +39,6 @@ send fail with a MessageDeliveryFailure after the given number of retries.
import logging
import oslo_messaging
from oslo_messaging._i18n import _LE
from oslo_messaging.notify import notifier
LOG = logging.getLogger(__name__)
@ -69,9 +68,9 @@ class MessagingDriver(notifier.Driver):
version=self.version,
retry=retry)
except Exception:
LOG.exception(_LE("Could not send notification to %(topic)s. "
"Payload=%(message)s"),
dict(topic=topic, message=message))
LOG.exception("Could not send notification to %(topic)s. "
"Payload=%(message)s",
{'topic': topic, 'message': message})
class MessagingV2Driver(MessagingDriver):

View File

@ -26,7 +26,6 @@ from oslo_middleware import base
import webob.dec
import oslo_messaging
from oslo_messaging._i18n import _LE
from oslo_messaging import notify
LOG = logging.getLogger(__name__)
@ -37,8 +36,8 @@ def log_and_ignore_error(fn):
try:
return fn(*args, **kwargs)
except Exception as e:
LOG.exception(_LE('An exception occurred processing '
'the API call: %s ') % e)
LOG.exception('An exception occurred processing '
'the API call: %s ', e)
return wrapped

View File

@ -26,7 +26,6 @@ import six
from stevedore import extension
from stevedore import named
from oslo_messaging._i18n import _LE
from oslo_messaging import serializer as msg_serializer
from oslo_messaging import transport as msg_transport
@ -311,9 +310,9 @@ class Notifier(object):
try:
ext.obj.notify(ctxt, msg, priority, retry or self.retry)
except Exception as e:
_LOG.exception(_LE("Problem '%(e)s' attempting to send to "
"notification system. Payload=%(payload)s"),
dict(e=e, payload=payload))
_LOG.exception("Problem '%(e)s' attempting to send to "
"notification system. Payload=%(payload)s",
{'e': e, 'payload': payload})
if self._driver_mgr.extensions:
self._driver_mgr.map(do_notify)

View File

@ -124,7 +124,6 @@ A simple example of an RPC server with multiple endpoints might be::
import logging
import sys
from oslo_messaging._i18n import _LE
from oslo_messaging.rpc import dispatcher as rpc_dispatcher
from oslo_messaging import server as msg_server
from oslo_messaging import transport as msg_transport
@ -158,7 +157,7 @@ class RPCServer(msg_server.MessageHandlingServer):
try:
message.acknowledge()
except Exception:
LOG.exception(_LE("Can not acknowledge message. Skip processing"))
LOG.exception("Can not acknowledge message. Skip processing")
return
failure = None
@ -172,7 +171,7 @@ class RPCServer(msg_server.MessageHandlingServer):
# by another exception raised by a log handler during
# LOG.exception(). So keep a copy and delete it later.
failure = sys.exc_info()
LOG.exception(_LE('Exception during message handling'))
LOG.exception('Exception during message handling')
try:
if failure is None:
@ -180,7 +179,7 @@ class RPCServer(msg_server.MessageHandlingServer):
else:
message.reply(failure=failure)
except Exception:
LOG.exception(_LE("Can not send reply for message"))
LOG.exception("Can not send reply for message")
finally:
# NOTE(dhellmann): Remove circular object reference
# between the current stack frame and the traceback in

View File

@ -32,7 +32,6 @@ import six
from stevedore import driver
from oslo_messaging._drivers import base as driver_base
from oslo_messaging._i18n import _LW
from oslo_messaging import exceptions
__all__ = [
@ -126,7 +125,7 @@ class _OrderedTask(object):
while condition():
if log_timer is not None and log_timer.expired():
LOG.warning(_LW('Possible hang: %s'), msg)
LOG.warning('Possible hang: %s', msg)
LOG.debug(''.join(traceback.format_stack()))
# Only log once. After than we wait indefinitely without
# logging.
@ -396,8 +395,8 @@ class MessageHandlingServer(service.ServiceBase, _OrderedTaskRunner):
current thread.
"""
if self._started:
LOG.warning(_LW('The server has already been started. Ignoring'
' the redundant call to start().'))
LOG.warning('The server has already been started. Ignoring '
'the redundant call to start().')
return
self._started = True

View File

@ -25,7 +25,6 @@ import six
from six.moves.urllib import parse
from stevedore import driver
from oslo_messaging._i18n import _LW
from oslo_messaging import exceptions
__all__ = [
@ -520,10 +519,10 @@ class TransportURL(object):
if (len(hosts_with_credentials) > 0 and
len(hosts_without_credentials) > 0):
LOG.warning(_LW("All hosts must be set with username/password or "
"not at the same time. Hosts with credentials "
"are: %(hosts_with_credentials)s. Hosts without "
"credentials are %(hosts_without_credentials)s."),
LOG.warning("All hosts must be set with username/password or "
"not at the same time. Hosts with credentials "
"are: %(hosts_with_credentials)s. Hosts without "
"credentials are %(hosts_without_credentials)s.",
{'hosts_with_credentials': hosts_with_credentials,
'hosts_without_credentials':
hosts_without_credentials})

View File

@ -10,7 +10,6 @@ oslo.log>=3.36.0 # Apache-2.0
oslo.utils>=3.33.0 # Apache-2.0
oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
oslo.service!=1.28.1,>=1.24.0 # Apache-2.0
oslo.i18n>=3.15.3 # Apache-2.0
stevedore>=1.20.0 # Apache-2.0
debtcollector>=1.2.0 # Apache-2.0
monotonic>=0.6;python_version<'3.3' # Apache-2.0

View File

@ -106,7 +106,6 @@ exclude = .tox,dist,doc,*.egg,build,__init__.py
[hacking]
import_exceptions =
oslo_messaging._i18n
six.moves
local-check-factory = oslo_messaging.hacking.checks.factory