debug level logs should not be translated

According to the OpenStack translation policy available at
https://wiki.openstack.org/wiki/LoggingStandards debug messages
should not be translated. Like mentioned in several changes in
Nova by garyk this is to help prioritize log translation.

Change-Id: I59486b1110f08510d83a4aec2a1666805c59d1cd
Closes-Bug: #1318333
This commit is contained in:
Christian Berendt
2014-05-11 14:44:47 +02:00
parent 1dfe518654
commit 4cd3478b4b
8 changed files with 24 additions and 24 deletions

View File

@@ -116,7 +116,7 @@ class AccountAuditor(Daemon):
broker.get_info() broker.get_info()
self.logger.increment('passes') self.logger.increment('passes')
self.account_passes += 1 self.account_passes += 1
self.logger.debug(_('Audit passed for %s') % broker) self.logger.debug('Audit passed for %s' % broker)
except (Exception, Timeout): except (Exception, Timeout):
self.logger.increment('failures') self.logger.increment('failures')
self.account_failures += 1 self.account_failures += 1

View File

@@ -101,7 +101,7 @@ class AccountReaper(Daemon):
This repeatedly calls :func:`reap_once` no quicker than the This repeatedly calls :func:`reap_once` no quicker than the
configuration interval. configuration interval.
""" """
self.logger.debug(_('Daemon started.')) self.logger.debug('Daemon started.')
sleep(random.random() * self.interval) sleep(random.random() * self.interval)
while True: while True:
begin = time() begin = time()
@@ -117,7 +117,7 @@ class AccountReaper(Daemon):
repeatedly by :func:`run_forever`. This will call :func:`reap_device` repeatedly by :func:`run_forever`. This will call :func:`reap_device`
once for each device on the server. once for each device on the server.
""" """
self.logger.debug(_('Begin devices pass: %s'), self.devices) self.logger.debug('Begin devices pass: %s', self.devices)
begin = time() begin = time()
try: try:
for device in os.listdir(self.devices): for device in os.listdir(self.devices):

View File

@@ -121,8 +121,8 @@ class BufferedHTTPConnection(HTTPConnection):
def getresponse(self): def getresponse(self):
response = HTTPConnection.getresponse(self) response = HTTPConnection.getresponse(self)
logging.debug(_("HTTP PERF: %(time).5f seconds to %(method)s " logging.debug("HTTP PERF: %(time).5f seconds to %(method)s "
"%(host)s:%(port)s %(path)s)"), "%(host)s:%(port)s %(path)s)",
{'time': time.time() - self._connected_time, {'time': time.time() - self._connected_time,
'method': self._method, 'host': self.host, 'method': self._method, 'host': self.host,
'port': self.port, 'path': self._path}) 'port': self.port, 'path': self._path})

View File

@@ -276,7 +276,7 @@ class Replicator(Daemon):
""" """
self.stats['diff'] += 1 self.stats['diff'] += 1
self.logger.increment('diffs') self.logger.increment('diffs')
self.logger.debug(_('Syncing chunks with %s'), http.host) self.logger.debug('Syncing chunks with %s', http.host)
sync_table = broker.get_syncs() sync_table = broker.get_syncs()
objects = broker.get_items_since(point, self.per_diff) objects = broker.get_items_since(point, self.per_diff)
diffs = 0 diffs = 0
@@ -294,9 +294,9 @@ class Replicator(Daemon):
point = objects[-1]['ROWID'] point = objects[-1]['ROWID']
objects = broker.get_items_since(point, self.per_diff) objects = broker.get_items_since(point, self.per_diff)
if objects: if objects:
self.logger.debug(_( self.logger.debug(
'Synchronization for %s has fallen more than ' 'Synchronization for %s has fallen more than '
'%s rows behind; moving on and will try again next pass.'), '%s rows behind; moving on and will try again next pass.',
broker, self.max_diffs * self.per_diff) broker, self.max_diffs * self.per_diff)
self.stats['diff_capped'] += 1 self.stats['diff_capped'] += 1
self.logger.increment('diff_caps') self.logger.increment('diff_caps')
@@ -407,7 +407,7 @@ class Replicator(Daemon):
:param node_id: node id of the node to be replicated to :param node_id: node id of the node to be replicated to
""" """
start_time = now = time.time() start_time = now = time.time()
self.logger.debug(_('Replicating db %s'), object_file) self.logger.debug('Replicating db %s', object_file)
self.stats['attempted'] += 1 self.stats['attempted'] += 1
self.logger.increment('attempts') self.logger.increment('attempts')
shouldbehere = True shouldbehere = True
@@ -611,15 +611,15 @@ class ReplicatorRpc(object):
raise raise
timespan = time.time() - timemark timespan = time.time() - timemark
if timespan > DEBUG_TIMINGS_THRESHOLD: if timespan > DEBUG_TIMINGS_THRESHOLD:
self.logger.debug(_('replicator-rpc-sync time for info: %.02fs') % self.logger.debug('replicator-rpc-sync time for info: %.02fs' %
timespan) timespan)
if metadata: if metadata:
timemark = time.time() timemark = time.time()
broker.update_metadata(simplejson.loads(metadata)) broker.update_metadata(simplejson.loads(metadata))
timespan = time.time() - timemark timespan = time.time() - timemark
if timespan > DEBUG_TIMINGS_THRESHOLD: if timespan > DEBUG_TIMINGS_THRESHOLD:
self.logger.debug(_('replicator-rpc-sync time for ' self.logger.debug('replicator-rpc-sync time for '
'update_metadata: %.02fs') % timespan) 'update_metadata: %.02fs' % timespan)
if info['put_timestamp'] != put_timestamp or \ if info['put_timestamp'] != put_timestamp or \
info['created_at'] != created_at or \ info['created_at'] != created_at or \
info['delete_timestamp'] != delete_timestamp: info['delete_timestamp'] != delete_timestamp:
@@ -628,14 +628,14 @@ class ReplicatorRpc(object):
created_at, put_timestamp, delete_timestamp) created_at, put_timestamp, delete_timestamp)
timespan = time.time() - timemark timespan = time.time() - timemark
if timespan > DEBUG_TIMINGS_THRESHOLD: if timespan > DEBUG_TIMINGS_THRESHOLD:
self.logger.debug(_('replicator-rpc-sync time for ' self.logger.debug('replicator-rpc-sync time for '
'merge_timestamps: %.02fs') % timespan) 'merge_timestamps: %.02fs' % timespan)
timemark = time.time() timemark = time.time()
info['point'] = broker.get_sync(id_) info['point'] = broker.get_sync(id_)
timespan = time.time() - timemark timespan = time.time() - timemark
if timespan > DEBUG_TIMINGS_THRESHOLD: if timespan > DEBUG_TIMINGS_THRESHOLD:
self.logger.debug(_('replicator-rpc-sync time for get_sync: ' self.logger.debug('replicator-rpc-sync time for get_sync: '
'%.02fs') % timespan) '%.02fs' % timespan)
if hash_ == info['hash'] and info['point'] < remote_sync: if hash_ == info['hash'] and info['point'] < remote_sync:
timemark = time.time() timemark = time.time()
broker.merge_syncs([{'remote_id': id_, broker.merge_syncs([{'remote_id': id_,
@@ -643,8 +643,8 @@ class ReplicatorRpc(object):
info['point'] = remote_sync info['point'] = remote_sync
timespan = time.time() - timemark timespan = time.time() - timemark
if timespan > DEBUG_TIMINGS_THRESHOLD: if timespan > DEBUG_TIMINGS_THRESHOLD:
self.logger.debug(_('replicator-rpc-sync time for ' self.logger.debug('replicator-rpc-sync time for '
'merge_syncs: %.02fs') % timespan) 'merge_syncs: %.02fs' % timespan)
return Response(simplejson.dumps(info)) return Response(simplejson.dumps(info))
def merge_syncs(self, broker, args): def merge_syncs(self, broker, args):

View File

@@ -116,7 +116,7 @@ class ContainerAuditor(Daemon):
broker.get_info() broker.get_info()
self.logger.increment('passes') self.logger.increment('passes')
self.container_passes += 1 self.container_passes += 1
self.logger.debug(_('Audit passed for %s'), broker) self.logger.debug('Audit passed for %s', broker)
except (Exception, Timeout): except (Exception, Timeout):
self.logger.increment('failures') self.logger.increment('failures')
self.container_failures += 1 self.container_failures += 1

View File

@@ -100,7 +100,7 @@ class ObjectExpirer(Daemon):
self.report_first_time = self.report_last_time = time() self.report_first_time = self.report_last_time = time()
self.report_objects = 0 self.report_objects = 0
try: try:
self.logger.debug(_('Run begin')) self.logger.debug('Run begin')
containers, objects = \ containers, objects = \
self.swift.get_account_info(self.expiring_objects_account) self.swift.get_account_info(self.expiring_objects_account)
self.logger.info(_('Pass beginning; %s possible containers; %s ' self.logger.info(_('Pass beginning; %s possible containers; %s '
@@ -138,7 +138,7 @@ class ObjectExpirer(Daemon):
self.logger.exception( self.logger.exception(
_('Exception while deleting container %s %s') % _('Exception while deleting container %s %s') %
(container, str(err))) (container, str(err)))
self.logger.debug(_('Run end')) self.logger.debug('Run end')
self.report(final=True) self.report(final=True)
except (Exception, Timeout): except (Exception, Timeout):
self.logger.exception(_('Unhandled exception')) self.logger.exception(_('Unhandled exception'))

View File

@@ -526,6 +526,6 @@ class ObjectReplicator(Daemon):
dump_recon_cache({'object_replication_time': total, dump_recon_cache({'object_replication_time': total,
'object_replication_last': time.time()}, 'object_replication_last': time.time()},
self.rcache, self.logger) self.rcache, self.logger)
self.logger.debug(_('Replication sleeping for %s seconds.'), self.logger.debug('Replication sleeping for %s seconds.',
self.run_pause) self.run_pause)
sleep(self.run_pause) sleep(self.run_pause)

View File

@@ -206,14 +206,14 @@ class ObjectUpdater(Daemon):
if success: if success:
self.successes += 1 self.successes += 1
self.logger.increment('successes') self.logger.increment('successes')
self.logger.debug(_('Update sent for %(obj)s %(path)s'), self.logger.debug('Update sent for %(obj)s %(path)s',
{'obj': obj, 'path': update_path}) {'obj': obj, 'path': update_path})
self.logger.increment("unlinks") self.logger.increment("unlinks")
os.unlink(update_path) os.unlink(update_path)
else: else:
self.failures += 1 self.failures += 1
self.logger.increment('failures') self.logger.increment('failures')
self.logger.debug(_('Update failed for %(obj)s %(path)s'), self.logger.debug('Update failed for %(obj)s %(path)s',
{'obj': obj, 'path': update_path}) {'obj': obj, 'path': update_path})
if new_successes: if new_successes:
update['successes'] = successes update['successes'] = successes