Merge "object: Remove a bunch of logging translations"
This commit is contained in:
commit
c824bc4dee
|
@ -20,7 +20,6 @@ import time
|
|||
import signal
|
||||
from os.path import basename, dirname, join
|
||||
from random import shuffle
|
||||
from swift import gettext_ as _
|
||||
from contextlib import closing
|
||||
from eventlet import Timeout
|
||||
|
||||
|
@ -117,11 +116,11 @@ class AuditorWorker(object):
|
|||
if device_dirs:
|
||||
device_dir_str = ','.join(sorted(device_dirs))
|
||||
if self.auditor_type == 'ALL':
|
||||
description = _(' - parallel, %s') % device_dir_str
|
||||
description = ' - parallel, %s' % device_dir_str
|
||||
else:
|
||||
description = _(' - %s') % device_dir_str
|
||||
self.logger.info(_('Begin object audit "%(mode)s" mode (%(audi_type)s'
|
||||
'%(description)s)') %
|
||||
description = ' - %s' % device_dir_str
|
||||
self.logger.info('Begin object audit "%(mode)s" mode (%(audi_type)s'
|
||||
'%(description)s)',
|
||||
{'mode': mode, 'audi_type': self.auditor_type,
|
||||
'description': description})
|
||||
for watcher in self.watchers:
|
||||
|
@ -152,13 +151,13 @@ class AuditorWorker(object):
|
|||
self.total_files_processed += 1
|
||||
now = time.time()
|
||||
if now - self.last_logged >= self.log_time:
|
||||
self.logger.info(_(
|
||||
self.logger.info(
|
||||
'Object audit (%(type)s). '
|
||||
'Since %(start_time)s: Locally: %(passes)d passed, '
|
||||
'%(quars)d quarantined, %(errors)d errors, '
|
||||
'files/sec: %(frate).2f, bytes/sec: %(brate).2f, '
|
||||
'Total time: %(total).2f, Auditing time: %(audit).2f, '
|
||||
'Rate: %(audit_rate).2f') % {
|
||||
'Rate: %(audit_rate).2f', {
|
||||
'type': '%s%s' % (self.auditor_type, description),
|
||||
'start_time': time.ctime(reported),
|
||||
'passes': self.passes, 'quars': self.quarantines,
|
||||
|
@ -186,12 +185,12 @@ class AuditorWorker(object):
|
|||
time_auditing += (now - loop_time)
|
||||
# Avoid divide by zero during very short runs
|
||||
elapsed = (time.time() - begin) or 0.000001
|
||||
self.logger.info(_(
|
||||
self.logger.info(
|
||||
'Object audit (%(type)s) "%(mode)s" mode '
|
||||
'completed: %(elapsed).02fs. Total quarantined: %(quars)d, '
|
||||
'Total errors: %(errors)d, Total files/sec: %(frate).2f, '
|
||||
'Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, '
|
||||
'Rate: %(audit_rate).2f') % {
|
||||
'Rate: %(audit_rate).2f', {
|
||||
'type': '%s%s' % (self.auditor_type, description),
|
||||
'mode': mode, 'elapsed': elapsed,
|
||||
'quars': total_quarantines + self.quarantines,
|
||||
|
@ -203,7 +202,7 @@ class AuditorWorker(object):
|
|||
watcher.end()
|
||||
if self.stats_sizes:
|
||||
self.logger.info(
|
||||
_('Object audit stats: %s') % json.dumps(self.stats_buckets))
|
||||
'Object audit stats: %s', json.dumps(self.stats_buckets))
|
||||
|
||||
for policy in POLICIES:
|
||||
# Unset remaining partitions to not skip them in the next run
|
||||
|
@ -237,7 +236,7 @@ class AuditorWorker(object):
|
|||
except (Exception, Timeout):
|
||||
self.logger.increment('errors')
|
||||
self.errors += 1
|
||||
self.logger.exception(_('ERROR Trying to audit %s'), location)
|
||||
self.logger.exception('ERROR Trying to audit %s', location)
|
||||
|
||||
def object_audit(self, location):
|
||||
"""
|
||||
|
@ -284,8 +283,8 @@ class AuditorWorker(object):
|
|||
"Requested by %s" % watcher.watcher_name)
|
||||
except DiskFileQuarantined as err:
|
||||
self.quarantines += 1
|
||||
self.logger.error(_('ERROR Object %(obj)s failed audit and was'
|
||||
' quarantined: %(err)s'),
|
||||
self.logger.error('ERROR Object %(obj)s failed audit and was'
|
||||
' quarantined: %(err)s',
|
||||
{'obj': location, 'err': err})
|
||||
except DiskFileExpired:
|
||||
pass # ignore expired objects
|
||||
|
@ -375,7 +374,7 @@ class ObjectAuditor(Daemon):
|
|||
self.run_audit(**kwargs)
|
||||
except Exception as e:
|
||||
self.logger.exception(
|
||||
_("ERROR: Unable to run auditing: %s") % e)
|
||||
"ERROR: Unable to run auditing: %s", e)
|
||||
finally:
|
||||
sys.exit()
|
||||
|
||||
|
@ -454,7 +453,7 @@ class ObjectAuditor(Daemon):
|
|||
try:
|
||||
self.audit_loop(parent, zbo_fps, **kwargs)
|
||||
except (Exception, Timeout) as err:
|
||||
self.logger.exception(_('ERROR auditing: %s'), err)
|
||||
self.logger.exception('ERROR auditing: %s', err)
|
||||
self._sleep()
|
||||
|
||||
def run_once(self, *args, **kwargs):
|
||||
|
@ -475,7 +474,7 @@ class ObjectAuditor(Daemon):
|
|||
self.audit_loop(parent, zbo_fps, override_devices=override_devices,
|
||||
**kwargs)
|
||||
except (Exception, Timeout) as err:
|
||||
self.logger.exception(_('ERROR auditing: %s'), err)
|
||||
self.logger.exception('ERROR auditing: %s', err)
|
||||
|
||||
|
||||
class WatcherWrapper(object):
|
||||
|
|
|
@ -56,7 +56,6 @@ import six
|
|||
from pyeclib.ec_iface import ECDriverError, ECInvalidFragmentMetadata, \
|
||||
ECBadFragmentChecksum, ECInvalidParameter
|
||||
|
||||
from swift import gettext_ as _
|
||||
from swift.common.constraints import check_drive
|
||||
from swift.common.request_helpers import is_sys_meta
|
||||
from swift.common.utils import mkdirs, Timestamp, \
|
||||
|
@ -611,14 +610,14 @@ def get_auditor_status(datadir_path, logger, auditor_type):
|
|||
status = statusfile.read()
|
||||
except (OSError, IOError) as e:
|
||||
if e.errno != errno.ENOENT and logger:
|
||||
logger.warning(_('Cannot read %(auditor_status)s (%(err)s)') %
|
||||
logger.warning('Cannot read %(auditor_status)s (%(err)s)',
|
||||
{'auditor_status': auditor_status, 'err': e})
|
||||
return listdir(datadir_path)
|
||||
try:
|
||||
status = json.loads(status)
|
||||
except ValueError as e:
|
||||
logger.warning(_('Loading JSON from %(auditor_status)s failed'
|
||||
' (%(err)s)') %
|
||||
logger.warning('Loading JSON from %(auditor_status)s failed'
|
||||
' (%(err)s)',
|
||||
{'auditor_status': auditor_status, 'err': e})
|
||||
return listdir(datadir_path)
|
||||
return status['partitions']
|
||||
|
@ -645,7 +644,7 @@ def update_auditor_status(datadir_path, logger, partitions, auditor_type):
|
|||
statusfile.write(status)
|
||||
except (OSError, IOError) as e:
|
||||
if logger:
|
||||
logger.warning(_('Cannot write %(auditor_status)s (%(err)s)') %
|
||||
logger.warning('Cannot write %(auditor_status)s (%(err)s)',
|
||||
{'auditor_status': auditor_status, 'err': e})
|
||||
|
||||
|
||||
|
@ -1187,9 +1186,9 @@ class BaseDiskFileManager(object):
|
|||
join(hsh_path,
|
||||
"made-up-filename"))
|
||||
logging.exception(
|
||||
_('Quarantined %(hsh_path)s to %(quar_path)s because '
|
||||
'it is not a directory'), {'hsh_path': hsh_path,
|
||||
'quar_path': quar_path})
|
||||
'Quarantined %(hsh_path)s to %(quar_path)s because '
|
||||
'it is not a directory', {'hsh_path': hsh_path,
|
||||
'quar_path': quar_path})
|
||||
continue
|
||||
raise
|
||||
if not ondisk_info['files']:
|
||||
|
@ -1322,7 +1321,7 @@ class BaseDiskFileManager(object):
|
|||
except PathNotDir:
|
||||
del hashes[suffix]
|
||||
except OSError:
|
||||
logging.exception(_('Error hashing suffix'))
|
||||
logging.exception('Error hashing suffix')
|
||||
modified = True
|
||||
if modified:
|
||||
with lock_path(partition_path):
|
||||
|
@ -1524,9 +1523,9 @@ class BaseDiskFileManager(object):
|
|||
join(object_path,
|
||||
"made-up-filename"))
|
||||
logging.exception(
|
||||
_('Quarantined %(object_path)s to %(quar_path)s because '
|
||||
'it is not a directory'), {'object_path': object_path,
|
||||
'quar_path': quar_path})
|
||||
'Quarantined %(object_path)s to %(quar_path)s because '
|
||||
'it is not a directory', {'object_path': object_path,
|
||||
'quar_path': quar_path})
|
||||
raise DiskFileNotExist()
|
||||
if err.errno != errno.ENOENT:
|
||||
raise
|
||||
|
@ -1916,7 +1915,7 @@ class BaseDiskFileWriter(object):
|
|||
try:
|
||||
self.manager.cleanup_ondisk_files(self._datadir)
|
||||
except OSError:
|
||||
logging.exception(_('Problem cleaning up %s'), self._datadir)
|
||||
logging.exception('Problem cleaning up %s', self._datadir)
|
||||
|
||||
self._part_power_cleanup(target_path, new_target_path)
|
||||
|
||||
|
@ -1991,7 +1990,7 @@ class BaseDiskFileWriter(object):
|
|||
self.manager.cleanup_ondisk_files(new_target_dir)
|
||||
except OSError:
|
||||
logging.exception(
|
||||
_('Problem cleaning up %s'), new_target_dir)
|
||||
'Problem cleaning up %s', new_target_dir)
|
||||
|
||||
# Partition power has been increased, cleanup not yet finished
|
||||
else:
|
||||
|
@ -2003,7 +2002,7 @@ class BaseDiskFileWriter(object):
|
|||
self.manager.cleanup_ondisk_files(old_target_dir)
|
||||
except OSError:
|
||||
logging.exception(
|
||||
_('Problem cleaning up %s'), old_target_dir)
|
||||
'Problem cleaning up %s', old_target_dir)
|
||||
|
||||
|
||||
class BaseDiskFileReader(object):
|
||||
|
@ -2315,9 +2314,9 @@ class BaseDiskFileReader(object):
|
|||
except DiskFileQuarantined:
|
||||
raise
|
||||
except (Exception, Timeout) as e:
|
||||
self._logger.error(_(
|
||||
self._logger.error(
|
||||
'ERROR DiskFile %(data_file)s'
|
||||
' close failure: %(exc)s : %(stack)s'),
|
||||
' close failure: %(exc)s : %(stack)s',
|
||||
{'exc': e, 'stack': ''.join(traceback.format_stack()),
|
||||
'data_file': self._data_file})
|
||||
finally:
|
||||
|
@ -2666,8 +2665,8 @@ class BaseDiskFile(object):
|
|||
else:
|
||||
if mname != self._name:
|
||||
self._logger.error(
|
||||
_('Client path %(client)s does not match '
|
||||
'path stored in object metadata %(meta)s'),
|
||||
'Client path %(client)s does not match '
|
||||
'path stored in object metadata %(meta)s',
|
||||
{'client': self._name, 'meta': mname})
|
||||
raise DiskFileCollision('Client path does not match path '
|
||||
'stored in object metadata')
|
||||
|
@ -3089,8 +3088,8 @@ class ECDiskFileReader(BaseDiskFileReader):
|
|||
# format so for safety, check the input chunk if it's binary to
|
||||
# avoid quarantining a valid fragment archive.
|
||||
self._diskfile._logger.warn(
|
||||
_('Unexpected fragment data type (not quarantined) '
|
||||
'%(datadir)s: %(type)s at offset 0x%(offset)x'),
|
||||
'Unexpected fragment data type (not quarantined) '
|
||||
'%(datadir)s: %(type)s at offset 0x%(offset)x',
|
||||
{'datadir': self._diskfile._datadir,
|
||||
'type': type(frag),
|
||||
'offset': self.frag_offset})
|
||||
|
@ -3113,7 +3112,7 @@ class ECDiskFileReader(BaseDiskFileReader):
|
|||
raise DiskFileQuarantined(msg)
|
||||
except ECDriverError as err:
|
||||
self._diskfile._logger.warn(
|
||||
_('Problem checking EC fragment %(datadir)s: %(err)s'),
|
||||
'Problem checking EC fragment %(datadir)s: %(err)s',
|
||||
{'datadir': self._diskfile._datadir, 'err': err})
|
||||
|
||||
def _update_checks(self, chunk):
|
||||
|
@ -3181,7 +3180,7 @@ class ECDiskFileWriter(BaseDiskFileWriter):
|
|||
raise
|
||||
params = {'file': durable_data_file_path, 'err': err}
|
||||
self.manager.logger.exception(
|
||||
_('No space left on device for %(file)s (%(err)s)'),
|
||||
'No space left on device for %(file)s (%(err)s)',
|
||||
params)
|
||||
exc = DiskFileNoSpace(
|
||||
'No space left on device for %(file)s (%(err)s)' % params)
|
||||
|
@ -3190,7 +3189,7 @@ class ECDiskFileWriter(BaseDiskFileWriter):
|
|||
self.manager.cleanup_ondisk_files(self._datadir)
|
||||
except OSError as os_err:
|
||||
self.manager.logger.exception(
|
||||
_('Problem cleaning up %(datadir)s (%(err)s)'),
|
||||
'Problem cleaning up %(datadir)s (%(err)s)',
|
||||
{'datadir': self._datadir, 'err': os_err})
|
||||
self._part_power_cleanup(
|
||||
durable_data_file_path, new_durable_data_file_path)
|
||||
|
@ -3198,7 +3197,7 @@ class ECDiskFileWriter(BaseDiskFileWriter):
|
|||
except Exception as err:
|
||||
params = {'file': durable_data_file_path, 'err': err}
|
||||
self.manager.logger.exception(
|
||||
_('Problem making data file durable %(file)s (%(err)s)'),
|
||||
'Problem making data file durable %(file)s (%(err)s)',
|
||||
params)
|
||||
exc = DiskFileError(
|
||||
'Problem making data file durable %(file)s (%(err)s)' % params)
|
||||
|
|
|
@ -18,7 +18,6 @@ import six
|
|||
from random import random
|
||||
from time import time
|
||||
from os.path import join
|
||||
from swift import gettext_ as _
|
||||
from collections import defaultdict, deque
|
||||
|
||||
from eventlet import sleep, Timeout
|
||||
|
@ -151,17 +150,17 @@ class ObjectExpirer(Daemon):
|
|||
"""
|
||||
if final:
|
||||
elapsed = time() - self.report_first_time
|
||||
self.logger.info(_('Pass completed in %(time)ds; '
|
||||
'%(objects)d objects expired') % {
|
||||
'time': elapsed, 'objects': self.report_objects})
|
||||
self.logger.info(
|
||||
'Pass completed in %(time)ds; %(objects)d objects expired', {
|
||||
'time': elapsed, 'objects': self.report_objects})
|
||||
dump_recon_cache({'object_expiration_pass': elapsed,
|
||||
'expired_last_pass': self.report_objects},
|
||||
self.rcache, self.logger)
|
||||
elif time() - self.report_last_time >= self.report_interval:
|
||||
elapsed = time() - self.report_first_time
|
||||
self.logger.info(_('Pass so far %(time)ds; '
|
||||
'%(objects)d objects expired') % {
|
||||
'time': elapsed, 'objects': self.report_objects})
|
||||
self.logger.info(
|
||||
'Pass so far %(time)ds; %(objects)d objects expired', {
|
||||
'time': elapsed, 'objects': self.report_objects})
|
||||
self.report_last_time = time()
|
||||
|
||||
def parse_task_obj(self, task_obj):
|
||||
|
@ -331,13 +330,13 @@ class ObjectExpirer(Daemon):
|
|||
if not container_count:
|
||||
continue
|
||||
|
||||
self.logger.info(_(
|
||||
self.logger.info(
|
||||
'Pass beginning for task account %(account)s; '
|
||||
'%(container_count)s possible containers; '
|
||||
'%(obj_count)s possible objects') % {
|
||||
'account': task_account,
|
||||
'container_count': container_count,
|
||||
'obj_count': obj_count})
|
||||
'%(obj_count)s possible objects', {
|
||||
'account': task_account,
|
||||
'container_count': container_count,
|
||||
'obj_count': obj_count})
|
||||
|
||||
task_account_container_list = \
|
||||
[(task_account, task_container) for task_container in
|
||||
|
@ -368,14 +367,14 @@ class ObjectExpirer(Daemon):
|
|||
acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT))
|
||||
except (Exception, Timeout) as err:
|
||||
self.logger.exception(
|
||||
_('Exception while deleting container %(account)s '
|
||||
'%(container)s %(err)s') % {
|
||||
'account': task_account,
|
||||
'container': task_container, 'err': str(err)})
|
||||
'Exception while deleting container %(account)s '
|
||||
'%(container)s %(err)s', {
|
||||
'account': task_account,
|
||||
'container': task_container, 'err': str(err)})
|
||||
self.logger.debug('Run end')
|
||||
self.report(final=True)
|
||||
except (Exception, Timeout):
|
||||
self.logger.exception(_('Unhandled exception'))
|
||||
self.logger.exception('Unhandled exception')
|
||||
|
||||
def run_forever(self, *args, **kwargs):
|
||||
"""
|
||||
|
@ -392,7 +391,7 @@ class ObjectExpirer(Daemon):
|
|||
try:
|
||||
self.run_once(*args, **kwargs)
|
||||
except (Exception, Timeout):
|
||||
self.logger.exception(_('Unhandled exception'))
|
||||
self.logger.exception('Unhandled exception')
|
||||
elapsed = time() - begin
|
||||
if elapsed < self.interval:
|
||||
sleep(random() * (self.interval - elapsed))
|
||||
|
|
|
@ -27,7 +27,6 @@ import shutil
|
|||
from eventlet import (GreenPile, GreenPool, Timeout, sleep, tpool, spawn)
|
||||
from eventlet.support.greenlets import GreenletExit
|
||||
|
||||
from swift import gettext_ as _
|
||||
from swift.common.utils import (
|
||||
whataremyips, unlink_older_than, compute_eta, get_logger,
|
||||
dump_recon_cache, mkdirs, config_true_value,
|
||||
|
@ -402,7 +401,7 @@ class ObjectReconstructor(Daemon):
|
|||
resp.node = node
|
||||
except (Exception, Timeout):
|
||||
self.logger.exception(
|
||||
_("Trying to GET %(full_path)s"), {
|
||||
"Trying to GET %(full_path)s", {
|
||||
'full_path': full_path})
|
||||
return resp
|
||||
|
||||
|
@ -421,7 +420,7 @@ class ObjectReconstructor(Daemon):
|
|||
|
||||
if resp.status not in [HTTP_OK, HTTP_NOT_FOUND]:
|
||||
self.logger.warning(
|
||||
_("Invalid response %(resp)s from %(full_path)s"),
|
||||
"Invalid response %(resp)s from %(full_path)s",
|
||||
{'resp': resp.status, 'full_path': resp.full_path})
|
||||
if resp.status != HTTP_OK:
|
||||
error_responses[resp.status].append(resp)
|
||||
|
@ -735,8 +734,8 @@ class ObjectReconstructor(Daemon):
|
|||
fragment_payload = [fragment for fragment in pile]
|
||||
except (Exception, Timeout):
|
||||
self.logger.exception(
|
||||
_("Error trying to rebuild %(path)s "
|
||||
"policy#%(policy)d frag#%(frag_index)s"),
|
||||
"Error trying to rebuild %(path)s "
|
||||
"policy#%(policy)d frag#%(frag_index)s",
|
||||
{'path': path,
|
||||
'policy': policy,
|
||||
'frag_index': frag_index,
|
||||
|
@ -758,9 +757,9 @@ class ObjectReconstructor(Daemon):
|
|||
elapsed = (time.time() - self.start) or 0.000001
|
||||
rate = self.reconstruction_part_count / elapsed
|
||||
self.logger.info(
|
||||
_("%(reconstructed)d/%(total)d (%(percentage).2f%%)"
|
||||
" partitions reconstructed in %(time).2fs "
|
||||
"(%(rate).2f/sec, %(remaining)s remaining)"),
|
||||
"%(reconstructed)d/%(total)d (%(percentage).2f%%)"
|
||||
" partitions reconstructed in %(time).2fs "
|
||||
"(%(rate).2f/sec, %(remaining)s remaining)",
|
||||
{'reconstructed': self.reconstruction_part_count,
|
||||
'total': self.part_count,
|
||||
'percentage':
|
||||
|
@ -773,22 +772,22 @@ class ObjectReconstructor(Daemon):
|
|||
|
||||
if self.suffix_count and self.partition_times:
|
||||
self.logger.info(
|
||||
_("%(checked)d suffixes checked - "
|
||||
"%(hashed).2f%% hashed, %(synced).2f%% synced"),
|
||||
"%(checked)d suffixes checked - "
|
||||
"%(hashed).2f%% hashed, %(synced).2f%% synced",
|
||||
{'checked': self.suffix_count,
|
||||
'hashed': (self.suffix_hash * 100.0) / self.suffix_count,
|
||||
'synced': (self.suffix_sync * 100.0) / self.suffix_count})
|
||||
self.partition_times.sort()
|
||||
self.logger.info(
|
||||
_("Partition times: max %(max).4fs, "
|
||||
"min %(min).4fs, med %(med).4fs"),
|
||||
"Partition times: max %(max).4fs, "
|
||||
"min %(min).4fs, med %(med).4fs",
|
||||
{'max': self.partition_times[-1],
|
||||
'min': self.partition_times[0],
|
||||
'med': self.partition_times[
|
||||
len(self.partition_times) // 2]})
|
||||
else:
|
||||
self.logger.info(
|
||||
_("Nothing reconstructed for %s seconds."),
|
||||
"Nothing reconstructed for %s seconds.",
|
||||
(time.time() - self.start))
|
||||
|
||||
def _emplace_log_prefix(self, worker_index):
|
||||
|
@ -823,7 +822,7 @@ class ObjectReconstructor(Daemon):
|
|||
while True:
|
||||
sleep(self.lockup_timeout)
|
||||
if self.reconstruction_count == self.last_reconstruction_count:
|
||||
self.logger.error(_("Lockup detected.. killing live coros."))
|
||||
self.logger.error("Lockup detected.. killing live coros.")
|
||||
self.kill_coros()
|
||||
self.last_reconstruction_count = self.reconstruction_count
|
||||
|
||||
|
@ -920,7 +919,7 @@ class ObjectReconstructor(Daemon):
|
|||
'', headers=headers).getresponse()
|
||||
if resp.status == HTTP_INSUFFICIENT_STORAGE:
|
||||
self.logger.error(
|
||||
_('%s responded as unmounted'),
|
||||
'%s responded as unmounted',
|
||||
_full_path(node, job['partition'], '',
|
||||
job['policy']))
|
||||
attempts_remaining += 1
|
||||
|
@ -928,7 +927,7 @@ class ObjectReconstructor(Daemon):
|
|||
full_path = _full_path(node, job['partition'], '',
|
||||
job['policy'])
|
||||
self.logger.error(
|
||||
_("Invalid response %(resp)s from %(full_path)s"),
|
||||
"Invalid response %(resp)s from %(full_path)s",
|
||||
{'resp': resp.status, 'full_path': full_path})
|
||||
else:
|
||||
remote_suffixes = pickle.loads(resp.read())
|
||||
|
@ -1310,7 +1309,7 @@ class ObjectReconstructor(Daemon):
|
|||
policy.object_ring, 'next_part_power', None)
|
||||
if next_part_power is not None:
|
||||
self.logger.warning(
|
||||
_("next_part_power set in policy '%s'. Skipping"),
|
||||
"next_part_power set in policy '%s'. Skipping",
|
||||
policy.name)
|
||||
continue
|
||||
|
||||
|
@ -1322,7 +1321,7 @@ class ObjectReconstructor(Daemon):
|
|||
self.device_count += 1
|
||||
dev_path = df_mgr.get_dev_path(local_dev['device'])
|
||||
if not dev_path:
|
||||
self.logger.warning(_('%s is not mounted'),
|
||||
self.logger.warning('%s is not mounted',
|
||||
local_dev['device'])
|
||||
continue
|
||||
data_dir = get_data_dir(policy)
|
||||
|
@ -1416,7 +1415,7 @@ class ObjectReconstructor(Daemon):
|
|||
shutil.rmtree(path, ignore_errors=True)
|
||||
remove_file(path)
|
||||
|
||||
self.logger.info(_("Removing partition: %s"), path)
|
||||
self.logger.info("Removing partition: %s", path)
|
||||
tpool.execute(kill_it, path)
|
||||
|
||||
def reconstruct(self, **kwargs):
|
||||
|
@ -1432,8 +1431,8 @@ class ObjectReconstructor(Daemon):
|
|||
for part_info in self.collect_parts(**kwargs):
|
||||
sleep() # Give spawns a cycle
|
||||
if not self.check_ring(part_info['policy'].object_ring):
|
||||
self.logger.info(_("Ring change detected. Aborting "
|
||||
"current reconstruction pass."))
|
||||
self.logger.info("Ring change detected. Aborting "
|
||||
"current reconstruction pass.")
|
||||
return
|
||||
|
||||
self.reconstruction_part_count += 1
|
||||
|
@ -1453,8 +1452,8 @@ class ObjectReconstructor(Daemon):
|
|||
with Timeout(self.lockup_timeout):
|
||||
self.run_pool.waitall()
|
||||
except (Exception, Timeout):
|
||||
self.logger.exception(_("Exception in top-level "
|
||||
"reconstruction loop"))
|
||||
self.logger.exception("Exception in top-level "
|
||||
"reconstruction loop")
|
||||
self.kill_coros()
|
||||
finally:
|
||||
stats.kill()
|
||||
|
@ -1462,14 +1461,14 @@ class ObjectReconstructor(Daemon):
|
|||
self.stats_line()
|
||||
if self.handoffs_only:
|
||||
if self.handoffs_remaining > 0:
|
||||
self.logger.info(_(
|
||||
self.logger.info(
|
||||
"Handoffs only mode still has handoffs remaining. "
|
||||
"Next pass will continue to revert handoffs."))
|
||||
"Next pass will continue to revert handoffs.")
|
||||
else:
|
||||
self.logger.warning(_(
|
||||
self.logger.warning(
|
||||
"Handoffs only mode found no handoffs remaining. "
|
||||
"You should disable handoffs_only once all nodes "
|
||||
"are reporting no handoffs remaining."))
|
||||
"are reporting no handoffs remaining.")
|
||||
|
||||
def final_recon_dump(self, total, override_devices=None, **kwargs):
|
||||
"""
|
||||
|
@ -1506,13 +1505,13 @@ class ObjectReconstructor(Daemon):
|
|||
if multiprocess_worker_index is not None:
|
||||
self._emplace_log_prefix(multiprocess_worker_index)
|
||||
start = time.time()
|
||||
self.logger.info(_("Running object reconstructor in script mode."))
|
||||
self.logger.info("Running object reconstructor in script mode.")
|
||||
override_opts = parse_override_options(once=True, **kwargs)
|
||||
self.reconstruct(override_devices=override_opts.devices,
|
||||
override_partitions=override_opts.partitions)
|
||||
total = (time.time() - start) / 60
|
||||
self.logger.info(
|
||||
_("Object reconstruction complete (once). (%.02f minutes)"), total)
|
||||
"Object reconstruction complete (once). (%.02f minutes)", total)
|
||||
# Only dump stats if they would actually be meaningful -- i.e. we're
|
||||
# collecting per-disk stats and covering all partitions, or we're
|
||||
# covering all partitions, all disks.
|
||||
|
@ -1525,18 +1524,18 @@ class ObjectReconstructor(Daemon):
|
|||
def run_forever(self, multiprocess_worker_index=None, *args, **kwargs):
|
||||
if multiprocess_worker_index is not None:
|
||||
self._emplace_log_prefix(multiprocess_worker_index)
|
||||
self.logger.info(_("Starting object reconstructor in daemon mode."))
|
||||
self.logger.info("Starting object reconstructor in daemon mode.")
|
||||
# Run the reconstructor continually
|
||||
while True:
|
||||
start = time.time()
|
||||
self.logger.info(_("Starting object reconstruction pass."))
|
||||
self.logger.info("Starting object reconstruction pass.")
|
||||
override_opts = parse_override_options(**kwargs)
|
||||
# Run the reconstructor
|
||||
self.reconstruct(override_devices=override_opts.devices,
|
||||
override_partitions=override_opts.partitions)
|
||||
total = (time.time() - start) / 60
|
||||
self.logger.info(
|
||||
_("Object reconstruction complete. (%.02f minutes)"), total)
|
||||
"Object reconstruction complete. (%.02f minutes)", total)
|
||||
self.final_recon_dump(
|
||||
total, override_devices=override_opts.devices,
|
||||
override_partitions=override_opts.partitions)
|
||||
|
|
|
@ -23,7 +23,6 @@ import time
|
|||
import itertools
|
||||
from six import viewkeys
|
||||
import six.moves.cPickle as pickle
|
||||
from swift import gettext_ as _
|
||||
|
||||
import eventlet
|
||||
from eventlet import GreenPool, queue, tpool, Timeout, sleep
|
||||
|
@ -386,7 +385,7 @@ class ObjectReplicator(Daemon):
|
|||
except Timeout:
|
||||
self.logger.error(
|
||||
self._limit_rsync_log(
|
||||
_("Killing long-running rsync after %ds: %s") % (
|
||||
"Killing long-running rsync after %ds: %s" % (
|
||||
self.rsync_timeout, str(args))))
|
||||
if proc:
|
||||
proc.kill()
|
||||
|
@ -570,7 +569,7 @@ class ObjectReplicator(Daemon):
|
|||
stats.remove += 1
|
||||
if (self.conf.get('sync_method', 'rsync') == 'ssync' and
|
||||
delete_objs is not None):
|
||||
self.logger.info(_("Removing %s objects"),
|
||||
self.logger.info("Removing %s objects",
|
||||
len(delete_objs))
|
||||
_junk, error_paths = self.delete_handoff_objs(
|
||||
job, delete_objs)
|
||||
|
@ -598,7 +597,7 @@ class ObjectReplicator(Daemon):
|
|||
job['policy'])
|
||||
self.logger.increment('partition.lock-failure.count')
|
||||
except (Exception, Timeout):
|
||||
self.logger.exception(_("Error syncing handoff partition"))
|
||||
self.logger.exception("Error syncing handoff partition")
|
||||
stats.add_failure_stats(failure_devs_info)
|
||||
finally:
|
||||
target_devs_info = set([(target_dev['replication_ip'],
|
||||
|
@ -611,7 +610,7 @@ class ObjectReplicator(Daemon):
|
|||
self.logger.timing_since('partition.delete.timing', begin)
|
||||
|
||||
def delete_partition(self, path):
|
||||
self.logger.info(_("Removing partition: %s"), path)
|
||||
self.logger.info("Removing partition: %s", path)
|
||||
try:
|
||||
tpool.execute(shutil.rmtree, path)
|
||||
except OSError as e:
|
||||
|
@ -686,15 +685,15 @@ class ObjectReplicator(Daemon):
|
|||
'', headers=headers).getresponse()
|
||||
if resp.status == HTTP_INSUFFICIENT_STORAGE:
|
||||
self.logger.error(
|
||||
_('%(replication_ip)s/%(device)s '
|
||||
'responded as unmounted'), node)
|
||||
'%(replication_ip)s/%(device)s '
|
||||
'responded as unmounted', node)
|
||||
attempts_left += 1
|
||||
failure_devs_info.add((node['replication_ip'],
|
||||
node['device']))
|
||||
continue
|
||||
if resp.status != HTTP_OK:
|
||||
self.logger.error(_("Invalid response %(resp)s "
|
||||
"from %(ip)s"),
|
||||
self.logger.error("Invalid response %(resp)s "
|
||||
"from %(ip)s",
|
||||
{'resp': resp.status,
|
||||
'ip': node['replication_ip']})
|
||||
failure_devs_info.add((node['replication_ip'],
|
||||
|
@ -733,7 +732,7 @@ class ObjectReplicator(Daemon):
|
|||
except (Exception, Timeout):
|
||||
failure_devs_info.add((node['replication_ip'],
|
||||
node['device']))
|
||||
self.logger.exception(_("Error syncing with node: %s") %
|
||||
self.logger.exception("Error syncing with node: %s",
|
||||
node)
|
||||
stats.suffix_count += len(local_hash)
|
||||
except StopIteration:
|
||||
|
@ -742,7 +741,7 @@ class ObjectReplicator(Daemon):
|
|||
job['partition'], int(job['policy']))
|
||||
except (Exception, Timeout):
|
||||
failure_devs_info.update(target_devs_info)
|
||||
self.logger.exception(_("Error syncing partition"))
|
||||
self.logger.exception("Error syncing partition")
|
||||
finally:
|
||||
stats.add_failure_stats(failure_devs_info)
|
||||
stats.success += len(target_devs_info - failure_devs_info)
|
||||
|
@ -760,23 +759,23 @@ class ObjectReplicator(Daemon):
|
|||
elapsed = (time.time() - self.start) or 0.000001
|
||||
rate = replication_count / elapsed
|
||||
self.logger.info(
|
||||
_("%(replicated)d/%(total)d (%(percentage).2f%%)"
|
||||
" partitions replicated in %(time).2fs (%(rate).2f/sec, "
|
||||
"%(remaining)s remaining)"),
|
||||
"%(replicated)d/%(total)d (%(percentage).2f%%)"
|
||||
" partitions replicated in %(time).2fs (%(rate).2f/sec, "
|
||||
"%(remaining)s remaining)",
|
||||
{'replicated': replication_count, 'total': self.job_count,
|
||||
'percentage': replication_count * 100.0 / self.job_count,
|
||||
'time': time.time() - self.start, 'rate': rate,
|
||||
'remaining': '%d%s' % compute_eta(self.start,
|
||||
replication_count,
|
||||
self.job_count)})
|
||||
self.logger.info(_('%(success)s successes, %(failure)s failures')
|
||||
% dict(success=stats.success,
|
||||
failure=stats.failure))
|
||||
self.logger.info('%(success)s successes, %(failure)s failures',
|
||||
dict(success=stats.success,
|
||||
failure=stats.failure))
|
||||
|
||||
if stats.suffix_count:
|
||||
self.logger.info(
|
||||
_("%(checked)d suffixes checked - "
|
||||
"%(hashed).2f%% hashed, %(synced).2f%% synced"),
|
||||
"%(checked)d suffixes checked - "
|
||||
"%(hashed).2f%% hashed, %(synced).2f%% synced",
|
||||
{'checked': stats.suffix_count,
|
||||
'hashed':
|
||||
(stats.suffix_hash * 100.0) / stats.suffix_count,
|
||||
|
@ -784,15 +783,15 @@ class ObjectReplicator(Daemon):
|
|||
(stats.suffix_sync * 100.0) / stats.suffix_count})
|
||||
self.partition_times.sort()
|
||||
self.logger.info(
|
||||
_("Partition times: max %(max).4fs, "
|
||||
"min %(min).4fs, med %(med).4fs"),
|
||||
"Partition times: max %(max).4fs, "
|
||||
"min %(min).4fs, med %(med).4fs",
|
||||
{'max': self.partition_times[-1],
|
||||
'min': self.partition_times[0],
|
||||
'med': self.partition_times[
|
||||
len(self.partition_times) // 2]})
|
||||
else:
|
||||
self.logger.info(
|
||||
_("Nothing replicated for %s seconds."),
|
||||
"Nothing replicated for %s seconds.",
|
||||
(time.time() - self.start))
|
||||
|
||||
def heartbeat(self):
|
||||
|
@ -919,7 +918,7 @@ class ObjectReplicator(Daemon):
|
|||
policy.object_ring, 'next_part_power', None)
|
||||
if next_part_power is not None:
|
||||
self.logger.warning(
|
||||
_("next_part_power set in policy '%s'. Skipping"),
|
||||
"next_part_power set in policy '%s'. Skipping",
|
||||
policy.name)
|
||||
continue
|
||||
|
||||
|
@ -979,14 +978,14 @@ class ObjectReplicator(Daemon):
|
|||
# in handoffs first mode, we won't process primary
|
||||
# partitions until rebalance was successful!
|
||||
if self.handoffs_remaining:
|
||||
self.logger.warning(_(
|
||||
self.logger.warning(
|
||||
"Handoffs first mode still has handoffs "
|
||||
"remaining. Aborting current "
|
||||
"replication pass."))
|
||||
"replication pass.")
|
||||
break
|
||||
if not self.check_ring(job['policy'].object_ring):
|
||||
self.logger.info(_("Ring change detected. Aborting "
|
||||
"current replication pass."))
|
||||
self.logger.info("Ring change detected. Aborting "
|
||||
"current replication pass.")
|
||||
return
|
||||
|
||||
try:
|
||||
|
@ -1016,7 +1015,7 @@ class ObjectReplicator(Daemon):
|
|||
else:
|
||||
dev_stats.add_failure_stats(self.all_devs_info)
|
||||
self.logger.exception(
|
||||
_("Exception in top-level replication loop: %s"), err)
|
||||
"Exception in top-level replication loop: %s", err)
|
||||
finally:
|
||||
stats.kill()
|
||||
self.stats_line()
|
||||
|
@ -1083,7 +1082,7 @@ class ObjectReplicator(Daemon):
|
|||
|
||||
rsync_reaper = eventlet.spawn(self._child_process_reaper)
|
||||
self._zero_stats()
|
||||
self.logger.info(_("Running object replicator in script mode."))
|
||||
self.logger.info("Running object replicator in script mode.")
|
||||
|
||||
override_opts = parse_override_options(once=True, **kwargs)
|
||||
devices = override_opts.devices or None
|
||||
|
@ -1099,7 +1098,7 @@ class ObjectReplicator(Daemon):
|
|||
end_time = time.time()
|
||||
total = (end_time - start_time) / 60
|
||||
self.logger.info(
|
||||
_("Object replication complete (once). (%.02f minutes)"), total)
|
||||
"Object replication complete (once). (%.02f minutes)", total)
|
||||
|
||||
# If we've been manually run on a subset of
|
||||
# policies/devices/partitions, then our recon stats are not
|
||||
|
@ -1126,19 +1125,19 @@ class ObjectReplicator(Daemon):
|
|||
if multiprocess_worker_index is not None:
|
||||
self.is_multiprocess_worker = True
|
||||
self._emplace_log_prefix(multiprocess_worker_index)
|
||||
self.logger.info(_("Starting object replicator in daemon mode."))
|
||||
self.logger.info("Starting object replicator in daemon mode.")
|
||||
eventlet.spawn_n(self._child_process_reaper)
|
||||
# Run the replicator continually
|
||||
while True:
|
||||
self._zero_stats()
|
||||
self.logger.info(_("Starting object replication pass."))
|
||||
self.logger.info("Starting object replication pass.")
|
||||
# Run the replicator
|
||||
start = time.time()
|
||||
self.replicate(override_devices=override_devices)
|
||||
end = time.time()
|
||||
total = (end - start) / 60
|
||||
self.logger.info(
|
||||
_("Object replication complete. (%.02f minutes)"), total)
|
||||
"Object replication complete. (%.02f minutes)", total)
|
||||
self.update_recon(total, end, override_devices)
|
||||
self.logger.debug('Replication sleeping for %s seconds.',
|
||||
self.interval)
|
||||
|
|
|
@ -25,7 +25,6 @@ import time
|
|||
import traceback
|
||||
import socket
|
||||
import math
|
||||
from swift import gettext_ as _
|
||||
|
||||
from eventlet import sleep, wsgi, Timeout, tpool
|
||||
from eventlet.greenthread import spawn
|
||||
|
@ -330,16 +329,16 @@ class ObjectController(BaseStorageServer):
|
|||
'Container update failed for %r; problem with '
|
||||
'redirect location: %s' % (obj, err))
|
||||
else:
|
||||
self.logger.error(_(
|
||||
self.logger.error(
|
||||
'ERROR Container update failed '
|
||||
'(saving for async update later): %(status)d '
|
||||
'response from %(ip)s:%(port)s/%(dev)s'),
|
||||
'response from %(ip)s:%(port)s/%(dev)s',
|
||||
{'status': response.status, 'ip': ip, 'port': port,
|
||||
'dev': contdevice})
|
||||
except (Exception, Timeout):
|
||||
self.logger.exception(_(
|
||||
self.logger.exception(
|
||||
'ERROR container update failed with '
|
||||
'%(ip)s:%(port)s/%(dev)s (saving for async update later)'),
|
||||
'%(ip)s:%(port)s/%(dev)s (saving for async update later)',
|
||||
{'ip': ip, 'port': port, 'dev': contdevice})
|
||||
data = {'op': op, 'account': account, 'container': container,
|
||||
'obj': obj, 'headers': headers_out}
|
||||
|
@ -380,10 +379,10 @@ class ObjectController(BaseStorageServer):
|
|||
if len(conthosts) != len(contdevices):
|
||||
# This shouldn't happen unless there's a bug in the proxy,
|
||||
# but if there is, we want to know about it.
|
||||
self.logger.error(_(
|
||||
self.logger.error(
|
||||
'ERROR Container update failed: different '
|
||||
'numbers of hosts and devices in request: '
|
||||
'"%(hosts)s" vs "%(devices)s"') % {
|
||||
'"%(hosts)s" vs "%(devices)s"', {
|
||||
'hosts': headers_in.get('X-Container-Host', ''),
|
||||
'devices': headers_in.get('X-Container-Device', '')})
|
||||
return
|
||||
|
@ -1341,9 +1340,9 @@ class ObjectController(BaseStorageServer):
|
|||
except HTTPException as error_response:
|
||||
res = error_response
|
||||
except (Exception, Timeout):
|
||||
self.logger.exception(_(
|
||||
self.logger.exception(
|
||||
'ERROR __call__ error with %(method)s'
|
||||
' %(path)s '), {'method': req.method, 'path': req.path})
|
||||
' %(path)s ', {'method': req.method, 'path': req.path})
|
||||
res = HTTPInternalServerError(body=traceback.format_exc())
|
||||
trans_time = time.time() - start_time
|
||||
res.fix_conditional_response()
|
||||
|
|
|
@ -19,7 +19,6 @@ import os
|
|||
import signal
|
||||
import sys
|
||||
import time
|
||||
from swift import gettext_ as _
|
||||
from random import random, shuffle
|
||||
|
||||
from eventlet import spawn, Timeout
|
||||
|
@ -125,8 +124,8 @@ class ObjectUpdater(Daemon):
|
|||
except OSError as e:
|
||||
self.stats.errors += 1
|
||||
self.logger.increment('errors')
|
||||
self.logger.error(_('ERROR: Unable to access %(path)s: '
|
||||
'%(error)s') %
|
||||
self.logger.error('ERROR: Unable to access %(path)s: '
|
||||
'%(error)s',
|
||||
{'path': path, 'error': e})
|
||||
return []
|
||||
|
||||
|
@ -140,7 +139,7 @@ class ObjectUpdater(Daemon):
|
|||
"""Run the updater continuously."""
|
||||
time.sleep(random() * self.interval)
|
||||
while True:
|
||||
self.logger.info(_('Begin object update sweep'))
|
||||
self.logger.info('Begin object update sweep')
|
||||
begin = time.time()
|
||||
pids = []
|
||||
# read from container ring to ensure it's fresh
|
||||
|
@ -176,7 +175,7 @@ class ObjectUpdater(Daemon):
|
|||
while pids:
|
||||
pids.remove(os.wait()[0])
|
||||
elapsed = time.time() - begin
|
||||
self.logger.info(_('Object update sweep completed: %.02fs'),
|
||||
self.logger.info('Object update sweep completed: %.02fs',
|
||||
elapsed)
|
||||
dump_recon_cache({'object_updater_sweep': elapsed},
|
||||
self.rcache, self.logger)
|
||||
|
@ -185,7 +184,7 @@ class ObjectUpdater(Daemon):
|
|||
|
||||
def run_once(self, *args, **kwargs):
|
||||
"""Run the updater once."""
|
||||
self.logger.info(_('Begin object update single threaded sweep'))
|
||||
self.logger.info('Begin object update single threaded sweep')
|
||||
begin = time.time()
|
||||
self.stats.reset()
|
||||
for device in self._listdir(self.devices):
|
||||
|
@ -228,9 +227,9 @@ class ObjectUpdater(Daemon):
|
|||
except PolicyError as e:
|
||||
# This isn't an error, but a misconfiguration. Logging a
|
||||
# warning should be sufficient.
|
||||
self.logger.warning(_('Directory %(directory)r does not map '
|
||||
'to a valid policy (%(error)s)') % {
|
||||
'directory': asyncdir, 'error': e})
|
||||
self.logger.warning('Directory %(directory)r does not map '
|
||||
'to a valid policy (%(error)s)', {
|
||||
'directory': asyncdir, 'error': e})
|
||||
continue
|
||||
prefix_dirs = self._listdir(async_pending)
|
||||
shuffle(prefix_dirs)
|
||||
|
@ -249,9 +248,8 @@ class ObjectUpdater(Daemon):
|
|||
self.stats.errors += 1
|
||||
self.logger.increment('errors')
|
||||
self.logger.error(
|
||||
_('ERROR async pending file with unexpected '
|
||||
'name %s')
|
||||
% (update_path))
|
||||
'ERROR async pending file with unexpected '
|
||||
'name %s', update_path)
|
||||
continue
|
||||
# Async pendings are stored on disk like this:
|
||||
#
|
||||
|
@ -353,7 +351,7 @@ class ObjectUpdater(Daemon):
|
|||
if getattr(e, 'errno', None) == errno.ENOENT:
|
||||
return
|
||||
self.logger.exception(
|
||||
_('ERROR Pickle problem, quarantining %s'), update_path)
|
||||
'ERROR Pickle problem, quarantining %s', update_path)
|
||||
self.stats.quarantines += 1
|
||||
self.logger.increment('quarantines')
|
||||
target_path = os.path.join(device, 'quarantined', 'objects',
|
||||
|
@ -487,12 +485,12 @@ class ObjectUpdater(Daemon):
|
|||
success = is_success(resp.status)
|
||||
if not success:
|
||||
self.logger.debug(
|
||||
_('Error code %(status)d is returned from remote '
|
||||
'server %(ip)s: %(port)s / %(device)s'),
|
||||
'Error code %(status)d is returned from remote '
|
||||
'server %(ip)s: %(port)s / %(device)s',
|
||||
{'status': resp.status, 'ip': node['ip'],
|
||||
'port': node['port'], 'device': node['device']})
|
||||
return success, node['id'], redirect
|
||||
except (Exception, Timeout):
|
||||
self.logger.exception(_('ERROR with remote server '
|
||||
'%(ip)s:%(port)s/%(device)s'), node)
|
||||
self.logger.exception('ERROR with remote server '
|
||||
'%(ip)s:%(port)s/%(device)s', node)
|
||||
return HTTP_INTERNAL_SERVER_ERROR, node['id'], redirect
|
||||
|
|
Loading…
Reference in New Issue