debug level logs should not be translated
According to the OpenStack translation policy available at https://wiki.openstack.org/wiki/LoggingStandards debug messages should not be translated. Like mentioned in several changes in Nova by garyk this is to help prioritize log translation. Change-Id: I4af4a45a56b1364a2f5196b75cff299d607ab393 Partial-Bug: #1317950
This commit is contained in:
parent
84887ccee3
commit
a6200c0a00
|
@ -68,7 +68,7 @@ class ConnectionPool(pool.Pool):
|
|||
|
||||
# TODO(comstud): Timeout connections not used in a while
|
||||
def create(self):
|
||||
LOG.debug(_('Pool creating new connection'))
|
||||
LOG.debug('Pool creating new connection')
|
||||
return self.connection_cls(self.conf, self.url)
|
||||
|
||||
def empty(self):
|
||||
|
@ -193,7 +193,7 @@ def unpack_context(conf, msg):
|
|||
context_dict['reply_q'] = msg.pop('_reply_q', None)
|
||||
context_dict['conf'] = conf
|
||||
ctx = RpcContext.from_dict(context_dict)
|
||||
rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
|
||||
rpc_common._safe_log(LOG.debug, 'unpacked context: %s', ctx.to_dict())
|
||||
return ctx
|
||||
|
||||
|
||||
|
@ -248,7 +248,7 @@ def _add_unique_id(msg):
|
|||
"""Add unique_id for checking duplicate messages."""
|
||||
unique_id = uuid.uuid4().hex
|
||||
msg.update({UNIQUE_ID: unique_id})
|
||||
LOG.debug(_('UNIQUE_ID is %s.') % (unique_id))
|
||||
LOG.debug('UNIQUE_ID is %s.' % (unique_id))
|
||||
|
||||
|
||||
def get_control_exchange(conf):
|
||||
|
|
|
@ -550,7 +550,7 @@ class Connection(object):
|
|||
consumer.reconnect(self.session)
|
||||
self._register_consumer(consumer)
|
||||
|
||||
LOG.debug(_("Re-established AMQP queues"))
|
||||
LOG.debug("Re-established AMQP queues")
|
||||
|
||||
def ensure(self, error_callback, method, *args, **kwargs):
|
||||
while True:
|
||||
|
@ -600,7 +600,7 @@ class Connection(object):
|
|||
|
||||
def _error_callback(exc):
|
||||
if isinstance(exc, qpid_exceptions.Empty):
|
||||
LOG.debug(_('Timed out waiting for RPC response: %s') % exc)
|
||||
LOG.debug('Timed out waiting for RPC response: %s' % exc)
|
||||
raise rpc_common.Timeout()
|
||||
else:
|
||||
LOG.exception(_('Failed to consume message from queue: %s') %
|
||||
|
|
|
@ -700,7 +700,7 @@ class Connection(object):
|
|||
|
||||
def _error_callback(exc):
|
||||
if isinstance(exc, socket.timeout):
|
||||
LOG.debug(_('Timed out waiting for RPC response: %s') % exc)
|
||||
LOG.debug('Timed out waiting for RPC response: %s' % exc)
|
||||
raise rpc_common.Timeout()
|
||||
else:
|
||||
LOG.exception(_('Failed to consume message from queue: %s') %
|
||||
|
|
|
@ -107,7 +107,7 @@ def _serialize(data):
|
|||
|
||||
def _deserialize(data):
|
||||
"""Deserialization wrapper."""
|
||||
LOG.debug(_("Deserializing: %s"), data)
|
||||
LOG.debug("Deserializing: %s", data)
|
||||
return jsonutils.loads(data)
|
||||
|
||||
|
||||
|
@ -142,9 +142,9 @@ class ZmqSocket(object):
|
|||
str_data = {'addr': addr, 'type': self.socket_s(),
|
||||
'subscribe': subscribe, 'bind': bind}
|
||||
|
||||
LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
|
||||
LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
|
||||
LOG.debug(_("-> bind: %(bind)s"), str_data)
|
||||
LOG.debug("Connecting to %(addr)s with %(type)s", str_data)
|
||||
LOG.debug("-> Subscribed to %(subscribe)s", str_data)
|
||||
LOG.debug("-> bind: %(bind)s", str_data)
|
||||
|
||||
try:
|
||||
if bind:
|
||||
|
@ -164,7 +164,7 @@ class ZmqSocket(object):
|
|||
"""Subscribe."""
|
||||
if not self.can_sub:
|
||||
raise RPCException("Cannot subscribe on this socket.")
|
||||
LOG.debug(_("Subscribing to %s"), msg_filter)
|
||||
LOG.debug("Subscribing to %s", msg_filter)
|
||||
|
||||
try:
|
||||
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
|
||||
|
@ -273,7 +273,7 @@ class InternalContext(object):
|
|||
|
||||
def _get_response(self, ctx, proxy, topic, data):
|
||||
"""Process a curried message and cast the result to topic."""
|
||||
LOG.debug(_("Running func with context: %s"), ctx.to_dict())
|
||||
LOG.debug("Running func with context: %s", ctx.to_dict())
|
||||
data.setdefault('version', None)
|
||||
data.setdefault('args', {})
|
||||
|
||||
|
@ -286,7 +286,7 @@ class InternalContext(object):
|
|||
# ignore these since they are just from shutdowns
|
||||
pass
|
||||
except rpc_common.ClientException as e:
|
||||
LOG.debug(_("Expected exception during message handling (%s)") %
|
||||
LOG.debug("Expected exception during message handling (%s)" %
|
||||
e._exc_info[1])
|
||||
return {'exc':
|
||||
rpc_common.serialize_remote_exception(e._exc_info,
|
||||
|
@ -311,7 +311,7 @@ class InternalContext(object):
|
|||
self._get_response(ctx, proxy, topic, payload),
|
||||
ctx.replies)
|
||||
|
||||
LOG.debug(_("Sending reply"))
|
||||
LOG.debug("Sending reply")
|
||||
_multi_send(_cast, ctx, topic, {
|
||||
'method': '-process_reply',
|
||||
'args': {
|
||||
|
@ -549,7 +549,7 @@ class ZmqReactor(ZmqBaseReactor):
|
|||
def consume(self, sock):
|
||||
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
|
||||
data = sock.recv()
|
||||
LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
|
||||
LOG.debug("CONSUMER RECEIVED DATA: %s", data)
|
||||
|
||||
proxy = self.proxies[sock]
|
||||
|
||||
|
@ -603,7 +603,7 @@ class Connection(rpc_common.Connection):
|
|||
inaddr = "ipc://%s/zmq_topic_%s" % \
|
||||
(CONF.rpc_zmq_ipc_dir, topic)
|
||||
|
||||
LOG.debug(_("Consumer is a zmq.%s"),
|
||||
LOG.debug("Consumer is a zmq.%s",
|
||||
['PULL', 'SUB'][sock_type == zmq.SUB])
|
||||
|
||||
self.reactor.register(proxy, inaddr, sock_type,
|
||||
|
@ -655,7 +655,7 @@ def _call(addr, context, topic, msg, timeout=None,
|
|||
# Replies always come into the reply service.
|
||||
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
|
||||
|
||||
LOG.debug(_("Creating payload"))
|
||||
LOG.debug("Creating payload")
|
||||
# Curry the original request into a reply method.
|
||||
mcontext = RpcContext.marshal(context)
|
||||
payload = {
|
||||
|
@ -668,7 +668,7 @@ def _call(addr, context, topic, msg, timeout=None,
|
|||
}
|
||||
}
|
||||
|
||||
LOG.debug(_("Creating queue socket for reply waiter"))
|
||||
LOG.debug("Creating queue socket for reply waiter")
|
||||
|
||||
# Messages arriving async.
|
||||
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt
|
||||
|
@ -681,14 +681,14 @@ def _call(addr, context, topic, msg, timeout=None,
|
|||
zmq.SUB, subscribe=msg_id, bind=False
|
||||
)
|
||||
|
||||
LOG.debug(_("Sending cast"))
|
||||
LOG.debug("Sending cast")
|
||||
_cast(addr, context, topic, payload, envelope=envelope)
|
||||
|
||||
LOG.debug(_("Cast sent; Waiting reply"))
|
||||
LOG.debug("Cast sent; Waiting reply")
|
||||
# Blocks until receives reply
|
||||
msg = msg_waiter.recv()
|
||||
LOG.debug(_("Received message: %s"), msg)
|
||||
LOG.debug(_("Unpacking response"))
|
||||
LOG.debug("Received message: %s", msg)
|
||||
LOG.debug("Unpacking response")
|
||||
|
||||
if msg[2] == 'cast': # Legacy version
|
||||
raw_msg = _deserialize(msg[-1])[-1]
|
||||
|
@ -728,10 +728,10 @@ def _multi_send(method, context, topic, msg, timeout=None,
|
|||
Dispatches to the matchmaker and sends message to all relevant hosts.
|
||||
"""
|
||||
conf = CONF
|
||||
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
|
||||
LOG.debug("%(msg)s" % {'msg': ' '.join(map(pformat, (topic, msg)))})
|
||||
|
||||
queues = _get_matchmaker().queues(topic)
|
||||
LOG.debug(_("Sending message(s) to: %s"), queues)
|
||||
LOG.debug("Sending message(s) to: %s", queues)
|
||||
|
||||
# Don't stack if we have no matchmaker results
|
||||
if not queues:
|
||||
|
|
|
@ -70,7 +70,7 @@ class RoutingDriver(notifier._Driver):
|
|||
for group in self.routing_groups.values():
|
||||
self.used_drivers.update(group.keys())
|
||||
|
||||
LOG.debug(_('loading notifiers from %(namespace)s') %
|
||||
LOG.debug('loading notifiers from %(namespace)s' %
|
||||
{'namespace': self.NOTIFIER_PLUGIN_NAMESPACE})
|
||||
self.plugin_manager = dispatch.DispatchExtensionManager(
|
||||
namespace=self.NOTIFIER_PLUGIN_NAMESPACE,
|
||||
|
|
Loading…
Reference in New Issue