Merge "container: Remove a bunch of logging translations"

This commit is contained in:
Zuul 2021-11-30 19:08:04 +00:00 committed by Gerrit Code Review
commit 93118e2116
3 changed files with 51 additions and 54 deletions

View File

@ -18,7 +18,6 @@ import os
import time
import traceback
import math
from swift import gettext_ as _
from eventlet import Timeout
@ -238,10 +237,10 @@ class ContainerController(BaseStorageServer):
if len(account_hosts) != len(account_devices):
# This shouldn't happen unless there's a bug in the proxy,
# but if there is, we want to know about it.
self.logger.error(_(
self.logger.error(
'ERROR Account update failed: different '
'numbers of hosts and devices in request: '
'"%(hosts)s" vs "%(devices)s"') % {
'"%(hosts)s" vs "%(devices)s"', {
'hosts': req.headers.get('X-Account-Host', ''),
'devices': req.headers.get('X-Account-Device', '')})
return HTTPBadRequest(req=req)
@ -284,18 +283,18 @@ class ContainerController(BaseStorageServer):
if account_response.status == HTTP_NOT_FOUND:
account_404s += 1
elif not is_success(account_response.status):
self.logger.error(_(
self.logger.error(
'ERROR Account update failed '
'with %(ip)s:%(port)s/%(device)s (will retry '
'later): Response %(status)s %(reason)s'),
'later): Response %(status)s %(reason)s',
{'ip': account_ip, 'port': account_port,
'device': account_device,
'status': account_response.status,
'reason': account_response.reason})
except (Exception, Timeout):
self.logger.exception(_(
self.logger.exception(
'ERROR account update failed with '
'%(ip)s:%(port)s/%(device)s (will retry later)'),
'%(ip)s:%(port)s/%(device)s (will retry later)',
{'ip': account_ip, 'port': account_port,
'device': account_device})
if updates and account_404s == len(updates):
@ -900,8 +899,8 @@ class ContainerController(BaseStorageServer):
except HTTPException as error_response:
res = error_response
except (Exception, Timeout):
self.logger.exception(_(
'ERROR __call__ error with %(method)s %(path)s '),
self.logger.exception(
'ERROR __call__ error with %(method)s %(path)s ',
{'method': req.method, 'path': req.path})
res = HTTPInternalServerError(body=traceback.format_exc())
if self.log_requests:

View File

@ -17,7 +17,6 @@ import collections
import errno
import os
import uuid
from swift import gettext_ as _
from time import ctime, time
from random import choice, random
from struct import unpack_from
@ -233,9 +232,9 @@ class ContainerSync(Daemon):
internal_client_conf_path = conf.get('internal_client_conf_path')
if not internal_client_conf_path:
self.logger.warning(
_('Configuration option internal_client_conf_path not '
'defined. Using default configuration, See '
'internal-client.conf-sample for options'))
'Configuration option internal_client_conf_path not '
'defined. Using default configuration, See '
'internal-client.conf-sample for options')
internal_client_conf = ConfigString(ic_conf_body)
else:
internal_client_conf = internal_client_conf_path
@ -248,8 +247,8 @@ class ContainerSync(Daemon):
not str(err).endswith(' not found'):
raise
raise SystemExit(
_('Unable to load internal client from config: '
'%(conf)r (%(error)s)')
'Unable to load internal client from config: '
'%(conf)r (%(error)s)'
% {'conf': internal_client_conf_path, 'error': err})
def run_forever(self, *args, **kwargs):
@ -272,7 +271,7 @@ class ContainerSync(Daemon):
"""
Runs a single container sync scan.
"""
self.logger.info(_('Begin container sync "once" mode'))
self.logger.info('Begin container sync "once" mode')
begin = time()
for path in self.sync_store.synced_containers_generator():
self.container_sync(path)
@ -281,7 +280,7 @@ class ContainerSync(Daemon):
self.report()
elapsed = time() - begin
self.logger.info(
_('Container sync "once" mode completed: %.02fs'), elapsed)
'Container sync "once" mode completed: %.02fs', elapsed)
def report(self):
"""
@ -289,8 +288,8 @@ class ContainerSync(Daemon):
next report.
"""
self.logger.info(
_('Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s '
'puts], %(skip)s skipped, %(fail)s failed'),
'Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s '
'puts], %(skip)s skipped, %(fail)s failed',
{'time': ctime(self.reported),
'sync': self.container_syncs,
'delete': self.container_deletes,
@ -306,16 +305,16 @@ class ContainerSync(Daemon):
def container_report(self, start, end, sync_point1, sync_point2, info,
max_row):
self.logger.info(_('Container sync report: %(container)s, '
'time window start: %(start)s, '
'time window end: %(end)s, '
'puts: %(puts)s, '
'posts: %(posts)s, '
'deletes: %(deletes)s, '
'bytes: %(bytes)s, '
'sync_point1: %(point1)s, '
'sync_point2: %(point2)s, '
'total_rows: %(total)s'),
self.logger.info('Container sync report: %(container)s, '
'time window start: %(start)s, '
'time window end: %(end)s, '
'puts: %(puts)s, '
'posts: %(posts)s, '
'deletes: %(deletes)s, '
'bytes: %(bytes)s, '
'sync_point1: %(point1)s, '
'sync_point2: %(point2)s, '
'total_rows: %(total)s',
{'container': '%s/%s' % (info['account'],
info['container']),
'start': start,
@ -386,7 +385,7 @@ class ContainerSync(Daemon):
sync_to, self.allowed_sync_hosts, self.realms_conf)
if err:
self.logger.info(
_('ERROR %(db_file)s: %(validate_sync_to_err)s'),
'ERROR %(db_file)s: %(validate_sync_to_err)s',
{'db_file': str(broker),
'validate_sync_to_err': err})
self.container_failures += 1
@ -455,7 +454,7 @@ class ContainerSync(Daemon):
except (Exception, Timeout):
self.container_failures += 1
self.logger.increment('failures')
self.logger.exception(_('ERROR Syncing %s'),
self.logger.exception('ERROR Syncing %s',
broker if broker else path)
def _update_sync_to_headers(self, name, sync_to, user_key,
@ -647,27 +646,27 @@ class ContainerSync(Daemon):
except ClientException as err:
if err.http_status == HTTP_UNAUTHORIZED:
self.logger.info(
_('Unauth %(sync_from)r => %(sync_to)r'),
'Unauth %(sync_from)r => %(sync_to)r',
{'sync_from': '%s/%s' %
(quote(info['account']), quote(info['container'])),
'sync_to': sync_to})
elif err.http_status == HTTP_NOT_FOUND:
self.logger.info(
_('Not found %(sync_from)r => %(sync_to)r \
- object %(obj_name)r'),
'Not found %(sync_from)r => %(sync_to)r \
- object %(obj_name)r',
{'sync_from': '%s/%s' %
(quote(info['account']), quote(info['container'])),
'sync_to': sync_to, 'obj_name': row['name']})
else:
self.logger.exception(
_('ERROR Syncing %(db_file)s %(row)s'),
'ERROR Syncing %(db_file)s %(row)s',
{'db_file': str(broker), 'row': row})
self.container_failures += 1
self.logger.increment('failures')
return False
except (Exception, Timeout):
self.logger.exception(
_('ERROR Syncing %(db_file)s %(row)s'),
'ERROR Syncing %(db_file)s %(row)s',
{'db_file': str(broker), 'row': row})
self.container_failures += 1
self.logger.increment('failures')

View File

@ -19,7 +19,6 @@ import os
import signal
import sys
import time
from swift import gettext_ as _
from random import random, shuffle
from tempfile import mkstemp
@ -90,8 +89,8 @@ class ContainerUpdater(Daemon):
try:
return os.listdir(path)
except OSError as e:
self.logger.error(_('ERROR: Failed to get paths to drive '
'partitions: %s') % e)
self.logger.error('ERROR: Failed to get paths to drive '
'partitions: %s', e)
return []
def get_paths(self):
@ -124,7 +123,7 @@ class ContainerUpdater(Daemon):
self.account_suppressions[account] = until
except Exception:
self.logger.exception(
_('ERROR with loading suppressions from %s: ') % filename)
'ERROR with loading suppressions from %s: ', filename)
finally:
os.unlink(filename)
@ -134,7 +133,7 @@ class ContainerUpdater(Daemon):
"""
time.sleep(random() * self.interval)
while True:
self.logger.info(_('Begin container update sweep'))
self.logger.info('Begin container update sweep')
begin = time.time()
now = time.time()
expired_suppressions = \
@ -168,9 +167,9 @@ class ContainerUpdater(Daemon):
self.container_sweep(path)
elapsed = time.time() - forkbegin
self.logger.debug(
_('Container update sweep of %(path)s completed: '
'%(elapsed).02fs, %(success)s successes, %(fail)s '
'failures, %(no_change)s with no changes'),
'Container update sweep of %(path)s completed: '
'%(elapsed).02fs, %(success)s successes, %(fail)s '
'failures, %(no_change)s with no changes',
{'path': path, 'elapsed': elapsed,
'success': self.successes, 'fail': self.failures,
'no_change': self.no_changes})
@ -182,7 +181,7 @@ class ContainerUpdater(Daemon):
finally:
del pid2filename[pid]
elapsed = time.time() - begin
self.logger.info(_('Container update sweep completed: %.02fs'),
self.logger.info('Container update sweep completed: %.02fs',
elapsed)
dump_recon_cache({'container_updater_sweep': elapsed},
self.rcache, self.logger)
@ -194,7 +193,7 @@ class ContainerUpdater(Daemon):
Run the updater once.
"""
eventlet_monkey_patch()
self.logger.info(_('Begin container update single threaded sweep'))
self.logger.info('Begin container update single threaded sweep')
begin = time.time()
self.no_changes = 0
self.successes = 0
@ -202,10 +201,10 @@ class ContainerUpdater(Daemon):
for path in self.get_paths():
self.container_sweep(path)
elapsed = time.time() - begin
self.logger.info(_(
self.logger.info(
'Container update single threaded sweep completed: '
'%(elapsed).02fs, %(success)s successes, %(fail)s failures, '
'%(no_change)s with no changes'),
'%(no_change)s with no changes',
{'elapsed': elapsed, 'success': self.successes,
'fail': self.failures, 'no_change': self.no_changes})
dump_recon_cache({'container_updater_sweep': elapsed},
@ -282,7 +281,7 @@ class ContainerUpdater(Daemon):
self.logger.increment('successes')
self.successes += 1
self.logger.debug(
_('Update report sent for %(container)s %(dbfile)s'),
'Update report sent for %(container)s %(dbfile)s',
{'container': container, 'dbfile': dbfile})
broker.reported(info['put_timestamp'],
info['delete_timestamp'], info['object_count'],
@ -291,7 +290,7 @@ class ContainerUpdater(Daemon):
self.logger.increment('failures')
self.failures += 1
self.logger.debug(
_('Update report stub for %(container)s %(dbfile)s'),
'Update report stub for %(container)s %(dbfile)s',
{'container': container, 'dbfile': dbfile})
broker.quarantine('no account replicas exist')
# All that's left at this point is a few sacks of Gnocchi,
@ -300,7 +299,7 @@ class ContainerUpdater(Daemon):
self.logger.increment('failures')
self.failures += 1
self.logger.debug(
_('Update report failed for %(container)s %(dbfile)s'),
'Update report failed for %(container)s %(dbfile)s',
{'container': container, 'dbfile': dbfile})
self.account_suppressions[info['account']] = until = \
time.time() + self.account_suppression_time
@ -342,9 +341,9 @@ class ContainerUpdater(Daemon):
node['ip'], node['port'], node['device'], part,
'PUT', container, headers=headers)
except (Exception, Timeout):
self.logger.exception(_(
self.logger.exception(
'ERROR account update failed with '
'%(ip)s:%(port)s/%(device)s (will retry later): '), node)
'%(ip)s:%(port)s/%(device)s (will retry later): ', node)
return HTTP_INTERNAL_SERVER_ERROR
with Timeout(self.node_timeout):
try:
@ -354,7 +353,7 @@ class ContainerUpdater(Daemon):
except (Exception, Timeout):
if self.logger.getEffectiveLevel() <= logging.DEBUG:
self.logger.exception(
_('Exception with %(ip)s:%(port)s/%(device)s'), node)
'Exception with %(ip)s:%(port)s/%(device)s', node)
return HTTP_INTERNAL_SERVER_ERROR
finally:
conn.close()