gettext updates

This commit is contained in:
Michael Barton 2010-12-20 21:47:50 +00:00
parent 977a2893ee
commit d7dd3ec065
22 changed files with 306 additions and 285 deletions

@ -16,6 +16,7 @@
import os
import time
from random import random
from gettext import gettext as _
from swift.account import server as account_server
from swift.common.db import AccountBroker
@ -49,11 +50,11 @@ class AccountAuditor(Daemon):
for path, device, partition in all_locs:
self.account_audit(path)
if time.time() - reported >= 3600: # once an hour
self.logger.info(
'Since %s: Account audits: %s passed audit, '
'%s failed audit' % (time.ctime(reported),
self.account_passes,
self.account_failures))
self.logger.info(_('Since %(time)s: Account audits: '
'%(passed)s passed audit, %(failed)s failed audit'),
{'time': time.ctime(reported),
'passed': self.account_passes,
'failed': self.account_failures})
reported = time.time()
self.account_passes = 0
self.account_failures = 0
@ -72,17 +73,17 @@ class AccountAuditor(Daemon):
for path, device, partition in all_locs:
self.account_audit(path)
if time.time() - reported >= 3600: # once an hour
self.logger.info(
'Since %s: Account audits: %s passed audit, '
'%s failed audit' % (time.ctime(reported),
self.account_passes,
self.account_failures))
self.logger.info(_('Since %(time)s: Account audits: '
'%(passed)s passed audit, %(failed)s failed audit'),
{'time': time.ctime(reported),
'passed': self.account_passes,
'failed': self.account_failures})
reported = time.time()
self.account_passes = 0
self.account_failures = 0
elapsed = time.time() - begin
self.logger.info(
'Account audit "once" mode completed: %.02fs' % elapsed)
'Account audit "once" mode completed: %.02fs', elapsed)
def account_audit(self, path):
"""
@ -97,8 +98,8 @@ class AccountAuditor(Daemon):
if not broker.is_deleted():
info = broker.get_info()
self.account_passes += 1
self.logger.debug('Audit passed for %s' % broker.db_file)
self.logger.debug(_('Audit passed for %s') % broker.db_file)
except Exception:
self.account_failures += 1
self.logger.exception('ERROR Could not get account info %s' %
self.logger.exception(_('ERROR Could not get account info %s'),
(broker.db_file))

@ -18,6 +18,7 @@ import random
from logging import DEBUG
from math import sqrt
from time import time
from gettext import gettext as _
from eventlet import GreenPool, sleep
@ -77,7 +78,7 @@ class AccountReaper(Daemon):
""" The account :class:`swift.common.ring.Ring` for the cluster. """
if not self.account_ring:
self.logger.debug(
'Loading account ring from %s' % self.account_ring_path)
_('Loading account ring from %s'), self.account_ring_path)
self.account_ring = Ring(self.account_ring_path)
return self.account_ring
@ -85,7 +86,7 @@ class AccountReaper(Daemon):
""" The container :class:`swift.common.ring.Ring` for the cluster. """
if not self.container_ring:
self.logger.debug(
'Loading container ring from %s' % self.container_ring_path)
_('Loading container ring from %s'), self.container_ring_path)
self.container_ring = Ring(self.container_ring_path)
return self.container_ring
@ -93,7 +94,7 @@ class AccountReaper(Daemon):
""" The object :class:`swift.common.ring.Ring` for the cluster. """
if not self.object_ring:
self.logger.debug(
'Loading object ring from %s' % self.object_ring_path)
_('Loading object ring from %s'), self.object_ring_path)
self.object_ring = Ring(self.object_ring_path)
return self.object_ring
@ -103,7 +104,7 @@ class AccountReaper(Daemon):
This repeatedly calls :func:`reap_once` no quicker than the
configuration interval.
"""
self.logger.debug('Daemon started.')
self.logger.debug(_('Daemon started.'))
sleep(random.random() * self.interval)
while True:
begin = time()
@ -119,17 +120,17 @@ class AccountReaper(Daemon):
repeatedly by :func:`run_forever`. This will call :func:`reap_device`
once for each device on the server.
"""
self.logger.debug('Begin devices pass: %s' % self.devices)
self.logger.debug(_('Begin devices pass: %s'), self.devices)
begin = time()
for device in os.listdir(self.devices):
if self.mount_check and \
not os.path.ismount(os.path.join(self.devices, device)):
self.logger.debug(
'Skipping %s as it is not mounted' % device)
_('Skipping %s as it is not mounted'), device)
continue
self.reap_device(device)
elapsed = time() - begin
self.logger.info('Devices pass completed: %.02fs' % elapsed)
self.logger.info(_('Devices pass completed: %.02fs'), elapsed)
def reap_device(self, device):
"""
@ -212,7 +213,7 @@ class AccountReaper(Daemon):
"""
begin = time()
account = broker.get_info()['account']
self.logger.info('Beginning pass on account %s' % account)
self.logger.info(_('Beginning pass on account %s'), account)
self.stats_return_codes = {}
self.stats_containers_deleted = 0
self.stats_objects_deleted = 0
@ -235,12 +236,12 @@ class AccountReaper(Daemon):
self.container_pool.waitall()
except Exception:
self.logger.exception(
'Exception with containers for account %s' % account)
_('Exception with containers for account %s'), account)
marker = containers[-1][0]
log = 'Completed pass on account %s' % account
except Exception:
self.logger.exception(
'Exception with account %s' % account)
_('Exception with account %s'), account)
log = 'Incomplete pass on account %s' % account
if self.stats_containers_deleted:
log += ', %s containers deleted' % self.stats_containers_deleted
@ -317,7 +318,7 @@ class AccountReaper(Daemon):
except ClientException, err:
if self.logger.getEffectiveLevel() <= DEBUG:
self.logger.exception(
'Exception with %(ip)s:%(port)s/%(device)s' % node)
_('Exception with %(ip)s:%(port)s/%(device)s'), node)
self.stats_return_codes[err.http_status / 100] = \
self.stats_return_codes.get(err.http_status / 100, 0) + 1
if not objects:
@ -330,8 +331,9 @@ class AccountReaper(Daemon):
nodes, obj['name'])
pool.waitall()
except Exception:
self.logger.exception('Exception with objects for container '
'%s for account %s' % (container, account))
self.logger.exception(_('Exception with objects for container '
'%(container)s for account %(account)s'),
{'container': container, 'account': account})
marker = objects[-1]['name']
successes = 0
failures = 0
@ -351,7 +353,7 @@ class AccountReaper(Daemon):
except ClientException, err:
if self.logger.getEffectiveLevel() <= DEBUG:
self.logger.exception(
'Exception with %(ip)s:%(port)s/%(device)s' % node)
_('Exception with %(ip)s:%(port)s/%(device)s'), node)
failures += 1
self.stats_return_codes[err.http_status / 100] = \
self.stats_return_codes.get(err.http_status / 100, 0) + 1
@ -402,7 +404,7 @@ class AccountReaper(Daemon):
except ClientException, err:
if self.logger.getEffectiveLevel() <= DEBUG:
self.logger.exception(
'Exception with %(ip)s:%(port)s/%(device)s' % node)
_('Exception with %(ip)s:%(port)s/%(device)s'), node)
failures += 1
self.stats_return_codes[err.http_status / 100] = \
self.stats_return_codes.get(err.http_status / 100, 0) + 1

@ -18,15 +18,15 @@ from __future__ import with_statement
import os
import time
import traceback
from urllib import unquote
from xml.sax import saxutils
from gettext import gettext as _
from webob import Request, Response
from webob.exc import HTTPAccepted, HTTPBadRequest, \
HTTPCreated, HTTPForbidden, HTTPInternalServerError, \
HTTPMethodNotAllowed, HTTPNoContent, HTTPNotFound, HTTPPreconditionFailed
import simplejson
from xml.sax import saxutils
from swift.common.db import AccountBroker
from swift.common.utils import get_logger, get_param, hash_path, \
@ -307,10 +307,8 @@ class AccountController(object):
else:
res = HTTPMethodNotAllowed()
except:
self.logger.exception('ERROR __call__ error with %s %s '
'transaction %s' % (env.get('REQUEST_METHOD', '-'),
env.get('PATH_INFO', '-'), env.get('HTTP_X_CF_TRANS_ID',
'-')))
self.logger.exception(_('ERROR __call__ error with %(method)s'
' %(path)s '), {'method': req.method, 'path': req.path})
res = HTTPInternalServerError(body=traceback.format_exc())
trans_time = '%.4f' % (time.time() - start_time)
additional_info = ''

@ -18,6 +18,7 @@ import time
import random
from urlparse import urlparse
from contextlib import contextmanager
from gettext import gettext as _
import eventlet.pools
from eventlet.green.httplib import CannotSendRequest
@ -82,10 +83,10 @@ class Bench(object):
def _log_status(self, title):
total = time.time() - self.beginbeat
self.logger.info('%s %s [%s failures], %.01f/s' % (
self.complete, title, self.failures,
(float(self.complete) / total),
))
self.logger.info(_('%(complete)s %(title)s [%(fail)s failures], '
'%(rate).01f/s'),
{'title': title, 'complete': self.complete, 'fail': self.failures,
'rate': (float(self.complete) / total)})
@contextmanager
def connection(self):
@ -94,7 +95,7 @@ class Bench(object):
try:
yield hc
except CannotSendRequest:
self.logger.info("CannotSendRequest. Skipping...")
self.logger.info(_("CannotSendRequest. Skipping..."))
try:
hc.close()
except:

@ -29,6 +29,7 @@ BufferedHTTPResponse.
from urllib import quote
import logging
import time
from gettext import gettext as _
from eventlet.green.httplib import CONTINUE, HTTPConnection, HTTPMessage, \
HTTPResponse, HTTPSConnection, _UNKNOWN
@ -82,15 +83,9 @@ class BufferedHTTPConnection(HTTPConnection):
def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
self._method = method
self._path = url
self._txn_id = '-'
return HTTPConnection.putrequest(self, method, url, skip_host,
skip_accept_encoding)
def putheader(self, header, value):
if header.lower() == 'x-cf-trans-id':
self._txn_id = value
return HTTPConnection.putheader(self, header, value)
def getexpect(self):
response = BufferedHTTPResponse(self.sock, strict=self.strict,
method=self._method)
@ -99,9 +94,10 @@ class BufferedHTTPConnection(HTTPConnection):
def getresponse(self):
response = HTTPConnection.getresponse(self)
logging.debug("HTTP PERF: %.5f seconds to %s %s:%s %s (%s)" %
(time.time() - self._connected_time, self._method, self.host,
self.port, self._path, self._txn_id))
logging.debug(_("HTTP PERF: %(time).5f seconds to %(method)s "
"%(host)s:%(port)s %(path)s)"),
{'time': time.time() - self._connected_time, 'method': self._method,
'host': self.host, 'port': self.port, 'path': self._path})
return response

@ -27,6 +27,7 @@ import cPickle as pickle
import errno
from random import randint
from tempfile import mkstemp
from gettext import gettext as _
from eventlet import sleep
import simplejson as json
@ -295,7 +296,7 @@ class DatabaseBroker(object):
self.conn = conn
except: # pragma: no cover
logging.exception(
'Broker error trying to rollback locked connection')
_('Broker error trying to rollback locked connection'))
conn.close()
def newid(self, remote_id):
@ -750,8 +751,8 @@ class ContainerBroker(DatabaseBroker):
'deleted': deleted})
except:
self.logger.exception(
'Invalid pending entry %s: %s'
% (self.pending_file, entry))
_('Invalid pending entry %(file)s: %(entry)s'),
{'file': self.pending_file, 'entry': entry})
if item_list:
self.merge_items(item_list)
try:
@ -1217,8 +1218,8 @@ class AccountBroker(DatabaseBroker):
'deleted': deleted})
except:
self.logger.exception(
'Invalid pending entry %s: %s'
% (self.pending_file, entry))
_('Invalid pending entry %(file)s: %(entry)s'),
{'file': self.pending_file, 'entry': entry})
if item_list:
self.merge_items(item_list)
try:

@ -20,6 +20,7 @@ import random
import math
import time
import shutil
from gettext import gettext as _
from eventlet import GreenPool, sleep, Timeout
from eventlet.green import subprocess
@ -81,7 +82,7 @@ class ReplConnection(BufferedHTTPConnection):
return response
except:
self.logger.exception(
'ERROR reading HTTP response from %s' % self.node)
_('ERROR reading HTTP response from %s'), self.node)
return None
@ -120,12 +121,14 @@ class Replicator(Daemon):
def _report_stats(self):
"""Report the current stats to the logs."""
self.logger.info(
'Attempted to replicate %d dbs in %.5f seconds (%.5f/s)'
% (self.stats['attempted'], time.time() - self.stats['start'],
self.stats['attempted'] /
(time.time() - self.stats['start'] + 0.0000001)))
self.logger.info('Removed %(remove)d dbs' % self.stats)
self.logger.info('%(success)s successes, %(failure)s failures'
_('Attempted to replicate %(count)d dbs in %(time).5f seconds '
'(%(rate).5f/s)'),
{'count': self.stats['attempted'],
'time': time.time() - self.stats['start'],
'rate': self.stats['attempted'] /
(time.time() - self.stats['start'] + 0.0000001)})
self.logger.info(_('Removed %(remove)d dbs') % self.stats)
self.logger.info(_('%(success)s successes, %(failure)s failures')
% self.stats)
self.logger.info(' '.join(['%s:%s' % item for item in
self.stats.items() if item[0] in
@ -150,8 +153,8 @@ class Replicator(Daemon):
proc = subprocess.Popen(popen_args)
proc.communicate()
if proc.returncode != 0:
self.logger.error('ERROR rsync failed with %s: %s' %
(proc.returncode, popen_args))
self.logger.error(_('ERROR rsync failed with %(code)s: %(args)s'),
{'code': proc.returncode, 'args': popen_args})
return proc.returncode == 0
def _rsync_db(self, broker, device, http, local_id,
@ -200,7 +203,7 @@ class Replicator(Daemon):
:returns: boolean indicating completion and success
"""
self.stats['diff'] += 1
self.logger.debug('Syncing chunks with %s', http.host)
self.logger.debug(_('Syncing chunks with %s'), http.host)
sync_table = broker.get_syncs()
objects = broker.get_items_since(point, self.per_diff)
while len(objects):
@ -208,8 +211,9 @@ class Replicator(Daemon):
response = http.replicate('merge_items', objects, local_id)
if not response or response.status >= 300 or response.status < 200:
if response:
self.logger.error('ERROR Bad response %s from %s' %
(response.status, http.host))
self.logger.error(_('ERROR Bad response %(status)s from '
'%(host)s'),
{'status': response.status, 'host': http.host})
return False
point = objects[-1]['ROWID']
objects = broker.get_items_since(point, self.per_diff)
@ -272,7 +276,7 @@ class Replicator(Daemon):
http = self._http_connect(node, partition, broker.db_file)
if not http:
self.logger.error(
'ERROR Unable to connect to remote server: %s' % node)
_('ERROR Unable to connect to remote server: %s'), node)
return False
with Timeout(self.node_timeout):
response = http.replicate('sync', info['max_row'], info['hash'],
@ -310,7 +314,7 @@ class Replicator(Daemon):
:param object_file: DB file name to be replicated
:param node_id: node id of the node to be replicated to
"""
self.logger.debug('Replicating db %s' % object_file)
self.logger.debug(_('Replicating db %s'), object_file)
self.stats['attempted'] += 1
try:
broker = self.brokerclass(object_file, pending_timeout=30)
@ -319,10 +323,10 @@ class Replicator(Daemon):
info = broker.get_replication_info()
except Exception, e:
if 'no such table' in str(e):
self.logger.error('Quarantining DB %s' % object_file)
self.logger.error(_('Quarantining DB %s'), object_file)
quarantine_db(broker.db_file, broker.db_type)
else:
self.logger.exception('ERROR reading db %s' % object_file)
self.logger.exception(_('ERROR reading db %s'), object_file)
self.stats['failure'] += 1
return
# The db is considered deleted if the delete_timestamp value is greater
@ -355,10 +359,10 @@ class Replicator(Daemon):
success = self._repl_to_node(node, broker, partition, info)
except DriveNotMounted:
repl_nodes.append(more_nodes.next())
self.logger.error('ERROR Remote drive not mounted %s' % node)
self.logger.error(_('ERROR Remote drive not mounted %s'), node)
except:
self.logger.exception('ERROR syncing %s with node %s' %
(object_file, node))
self.logger.exception(_('ERROR syncing %(file)s with node'
' %(node)s'), {'file': object_file, 'node': node})
self.stats['success' if success else 'failure'] += 1
responses.append(success)
if not shouldbehere and all(responses):
@ -399,14 +403,14 @@ class Replicator(Daemon):
dirs = []
ips = whataremyips()
if not ips:
self.logger.error('ERROR Failed to get my own IPs?')
self.logger.error(_('ERROR Failed to get my own IPs?'))
return
for node in self.ring.devs:
if node and node['ip'] in ips and node['port'] == self.port:
if self.mount_check and not os.path.ismount(
os.path.join(self.root, node['device'])):
self.logger.warn(
'Skipping %(device)s as it is not mounted' % node)
_('Skipping %(device)s as it is not mounted') % node)
continue
unlink_older_than(
os.path.join(self.root, node['device'], 'tmp'),
@ -414,12 +418,12 @@ class Replicator(Daemon):
datadir = os.path.join(self.root, node['device'], self.datadir)
if os.path.isdir(datadir):
dirs.append((datadir, node['id']))
self.logger.info('Beginning replication run')
self.logger.info(_('Beginning replication run'))
for part, object_file, node_id in self.roundrobin_datadirs(dirs):
self.cpool.spawn_n(
self._replicate_object, part, object_file, node_id)
self.cpool.waitall()
self.logger.info('Replication run OVER')
self.logger.info(_('Replication run OVER'))
self._report_stats()
def run_forever(self):
@ -430,7 +434,7 @@ class Replicator(Daemon):
try:
self.run_once()
except:
self.logger.exception('ERROR trying to replicate')
self.logger.exception(_('ERROR trying to replicate'))
sleep(self.run_pause)
@ -473,7 +477,7 @@ class ReplicatorRpc(object):
except Exception, e:
if 'no such table' in str(e):
# TODO(unknown): find a real logger
print "Quarantining DB %s" % broker.db_file
print _("Quarantining DB %s") % broker.db_file
quarantine_db(broker.db_file, broker.db_type)
return HTTPNotFound()
raise

@ -26,7 +26,7 @@ import socket
import time
from bisect import bisect
from hashlib import md5
from gettext import gettext as _
CONN_TIMEOUT = 0.3
IO_TIMEOUT = 2.0
@ -67,9 +67,11 @@ class MemcacheRing(object):
def _exception_occurred(self, server, e, action='talking'):
if isinstance(e, socket.timeout):
logging.error("Timeout %s to memcached: %s" % (action, server))
logging.error(_("Timeout %(action)s to memcached: %(server)s"),
{'action': action, 'server': server})
else:
logging.exception("Error %s to memcached: %s" % (action, server))
logging.exception(_("Error %(action)s to memcached: %(server)s"),
{'action': action, 'server': server})
now = time.time()
self._errors[server].append(time.time())
if len(self._errors[server]) > ERROR_LIMIT_COUNT:
@ -77,7 +79,7 @@ class MemcacheRing(object):
if err > now - ERROR_LIMIT_TIME]
if len(self._errors[server]) > ERROR_LIMIT_COUNT:
self._error_limited[server] = now + ERROR_LIMIT_DURATION
logging.error('Error limiting server %s' % server)
logging.error(_('Error limiting server %s'), server)
def _get_conns(self, key):
"""

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from gettext import gettext as _
from webob import Request
from webob.exc import HTTPServerError
@ -32,7 +34,7 @@ class CatchErrorMiddleware(object):
try:
return self.app(env, start_response)
except Exception, err:
self.logger.exception('Error: %s' % err)
self.logger.exception(_('Error: %s'), err)
resp = HTTPServerError(request=Request(env),
body='An error occurred',
content_type='text/plain')

@ -15,6 +15,7 @@ import time
import eventlet
from webob import Request, Response
from webob.exc import HTTPNotFound
from gettext import gettext as _
from swift.common.utils import split_path, cache_from_env, get_logger
from swift.proxy.server import get_container_memcache_key
@ -167,7 +168,7 @@ class RateLimitMiddleware(object):
:param obj_name: object name from path
'''
if account_name in self.ratelimit_blacklist:
self.logger.error('Returning 497 because of blacklisting')
self.logger.error(_('Returning 497 because of blacklisting'))
return Response(status='497 Blacklisted',
body='Your account has been blacklisted', request=req)
if account_name in self.ratelimit_whitelist:
@ -181,14 +182,15 @@ class RateLimitMiddleware(object):
need_to_sleep = self._get_sleep_time(key, max_rate)
if self.log_sleep_time_seconds and \
need_to_sleep > self.log_sleep_time_seconds:
self.logger.info("Ratelimit sleep log: %s for %s/%s/%s" % (
need_to_sleep, account_name,
container_name, obj_name))
self.logger.info(_("Ratelimit sleep log: %(sleep)s for "
"%(account)s/%(container)s/%(object)s"),
{'sleep': need_to_sleep, 'account': account_name,
'container': container_name, 'object': obj_name})
if need_to_sleep > 0:
eventlet.sleep(need_to_sleep)
except MaxSleepTimeHit, e:
self.logger.error('Returning 498 because of ops ' + \
'rate limiting (Max Sleep) %s' % e)
self.logger.error(_('Returning 498 because of ops rate '
'limiting (Max Sleep) %s') % str(e))
error_resp = Response(status='498 Rate Limited',
body='Slow down', request=req)
return error_resp
@ -207,7 +209,7 @@ class RateLimitMiddleware(object):
self.memcache_client = cache_from_env(env)
if not self.memcache_client:
self.logger.warning(
'Warning: Cannot ratelimit without a memcached client')
_('Warning: Cannot ratelimit without a memcached client'))
return self.app(env, start_response)
try:
version, account, container, obj = split_path(req.path, 1, 4, True)

@ -34,7 +34,7 @@ from ConfigParser import ConfigParser, NoSectionError, NoOptionError
from optparse import OptionParser
from tempfile import mkstemp
import cPickle as pickle
from gettext import gettext as _
import eventlet
from eventlet import greenio, GreenPool, sleep, Timeout, listen
@ -85,8 +85,8 @@ def load_libc_function(func_name):
libc = ctypes.CDLL(ctypes.util.find_library('c'))
return getattr(libc, func_name)
except AttributeError:
logging.warn("Unable to locate %s in libc. Leaving as a no-op."
% func_name)
logging.warn(_("Unable to locate %s in libc. Leaving as a no-op."),
func_name)
def noop_libc_function(*args):
return 0
@ -252,12 +252,12 @@ class LoggerFileObject(object):
value = value.strip()
if value:
if 'Connection reset by peer' in value:
self.logger.error('STDOUT: Connection reset by peer')
self.logger.error(_('STDOUT: Connection reset by peer'))
else:
self.logger.error('STDOUT: %s' % value)
self.logger.error(_('STDOUT: %s'), value)
def writelines(self, values):
self.logger.error('STDOUT: %s' % '#012'.join(values))
self.logger.error(_('STDOUT: %s'), '#012'.join(values))
def close(self):
pass
@ -462,12 +462,12 @@ def parse_options(usage="%prog CONFIG [options]", once=False, test_args=None):
if not args:
parser.print_usage()
print "Error: missing config file argument"
print _("Error: missing config file argument")
sys.exit(1)
config = os.path.abspath(args.pop(0))
if not os.path.exists(config):
parser.print_usage()
print "Error: unable to locate %s" % config
print _("Error: unable to locate %s") % config
sys.exit(1)
extra_args = []
@ -690,14 +690,14 @@ def readconf(conf, section_name=None, log_name=None, defaults=None):
defaults = {}
c = ConfigParser(defaults)
if not c.read(conf):
print "Unable to read config file %s" % conf
print _("Unable to read config file %s") % conf
sys.exit(1)
if section_name:
if c.has_section(section_name):
conf = dict(c.items(section_name))
else:
print "Unable to find %s config section in %s" % (section_name,
conf)
print _("Unable to find %s config section in %s") % \
(section_name, conf)
sys.exit(1)
if "log_name" not in conf:
if log_name is not None:
@ -749,7 +749,7 @@ def audit_location_generator(devices, datadir, mount_check=True, logger=None):
os.path.ismount(os.path.join(devices, device)):
if logger:
logger.debug(
'Skipping %s as it is not mounted' % device)
_('Skipping %s as it is not mounted'), device)
continue
datadir = os.path.join(devices, device, datadir)
if not os.path.exists(datadir):

@ -16,6 +16,7 @@
import os
import time
from random import random
from gettext import gettext as _
from swift.container import server as container_server
from swift.common.db import ContainerBroker
@ -51,10 +52,11 @@ class ContainerAuditor(Daemon):
self.container_audit(path)
if time.time() - reported >= 3600: # once an hour
self.logger.info(
'Since %s: Container audits: %s passed audit, '
'%s failed audit' % (time.ctime(reported),
self.container_passes,
self.container_failures))
_('Since %(time)s: Container audits: %(pass)s passed '
'audit, %(fail)s failed audit'),
{'time': time.ctime(reported),
'pass': self.container_passes,
'fail': self.container_failures})
reported = time.time()
self.container_passes = 0
self.container_failures = 0
@ -64,7 +66,7 @@ class ContainerAuditor(Daemon):
def run_once(self):
"""Run the container audit once."""
self.logger.info('Begin container audit "once" mode')
self.logger.info(_('Begin container audit "once" mode'))
begin = reported = time.time()
all_locs = audit_location_generator(self.devices,
container_server.DATADIR,
@ -74,16 +76,17 @@ class ContainerAuditor(Daemon):
self.container_audit(path)
if time.time() - reported >= 3600: # once an hour
self.logger.info(
'Since %s: Container audits: %s passed audit, '
'%s failed audit' % (time.ctime(reported),
self.container_passes,
self.container_failures))
_('Since %(time)s: Container audits: %(pass)s passed '
'audit, %(fail)s failed audit'),
{'time': time.ctime(reported),
'pass': self.container_passes,
'fail': self.container_failures})
reported = time.time()
self.container_passes = 0
self.container_failures = 0
elapsed = time.time() - begin
self.logger.info(
'Container audit "once" mode completed: %.02fs' % elapsed)
_('Container audit "once" mode completed: %.02fs'), elapsed)
def container_audit(self, path):
"""
@ -98,8 +101,8 @@ class ContainerAuditor(Daemon):
if not broker.is_deleted():
info = broker.get_info()
self.container_passes += 1
self.logger.debug('Audit passed for %s' % broker.db_file)
self.logger.debug(_('Audit passed for %s'), broker.db_file)
except Exception:
self.container_failures += 1
self.logger.exception('ERROR Could not get container info %s' %
self.logger.exception(_('ERROR Could not get container info %s'),
(broker.db_file))

@ -21,6 +21,7 @@ import traceback
from urllib import unquote
from xml.sax import saxutils
from datetime import datetime
from gettext import gettext as _
import simplejson
from eventlet.timeout import Timeout
@ -111,18 +112,18 @@ class ContainerController(object):
return HTTPNotFound(request=req)
elif account_response.status < 200 or \
account_response.status > 299:
self.logger.error('ERROR Account update failed '
'with %s:%s/%s transaction %s (will retry '
'later): Response %s %s' % (account_ip,
account_port, account_device,
req.headers.get('x-cf-trans-id'),
account_response.status,
account_response.reason))
self.logger.error(_('ERROR Account update failed '
'with %(ip)s:%(port)s/%(device)s (will retry '
'later): Response %(status)s %(reason)s'),
{'ip': account_ip, 'port': account_port,
'device': account_device,
'status': account_response.status,
'reason': account_response.reason})
except:
self.logger.exception('ERROR account update failed with '
'%s:%s/%s transaction %s (will retry later)' %
(account_ip, account_port, account_device,
req.headers.get('x-cf-trans-id', '-')))
self.logger.exception(_('ERROR account update failed with '
'%(ip)s:%(port)s/%(device)s (will retry later)'),
{'ip': account_ip, 'port': account_port,
'device': account_device})
return None
def DELETE(self, req):
@ -394,10 +395,8 @@ class ContainerController(object):
else:
res = HTTPMethodNotAllowed()
except:
self.logger.exception('ERROR __call__ error with %s %s '
'transaction %s' % (env.get('REQUEST_METHOD', '-'),
env.get('PATH_INFO', '-'), env.get('HTTP_X_CF_TRANS_ID',
'-')))
self.logger.exception(_('ERROR __call__ error with %(method)s'
' %(path)s '), {'method': req.method, 'path': req.path})
res = HTTPInternalServerError(body=traceback.format_exc())
trans_time = '%.4f' % (time.time() - start_time)
log_message = '%s - - [%s] "%s %s" %s %s "%s" "%s" "%s" %s' % (

@ -19,6 +19,7 @@ import signal
import sys
import time
from random import random, shuffle
from gettext import gettext as _
from eventlet import spawn, patcher, Timeout
@ -56,7 +57,7 @@ class ContainerUpdater(Daemon):
"""Get the account ring. Load it if it hasn't been yet."""
if not self.account_ring:
self.logger.debug(
'Loading account ring from %s' % self.account_ring_path)
_('Loading account ring from %s'), self.account_ring_path)
self.account_ring = Ring(self.account_ring_path)
return self.account_ring
@ -70,7 +71,7 @@ class ContainerUpdater(Daemon):
for device in os.listdir(self.devices):
dev_path = os.path.join(self.devices, device)
if self.mount_check and not os.path.ismount(dev_path):
self.logger.warn('%s is not mounted' % device)
self.logger.warn(_('%s is not mounted'), device)
continue
con_path = os.path.join(dev_path, DATADIR)
if not os.path.exists(con_path):
@ -86,7 +87,7 @@ class ContainerUpdater(Daemon):
"""
time.sleep(random() * self.interval)
while True:
self.logger.info('Begin container update sweep')
self.logger.info(_('Begin container update sweep'))
begin = time.time()
pids = []
# read from account ring to ensure it's fresh
@ -107,15 +108,17 @@ class ContainerUpdater(Daemon):
self.container_sweep(path)
elapsed = time.time() - forkbegin
self.logger.debug(
'Container update sweep of %s completed: '
'%.02fs, %s successes, %s failures, %s with no changes'
% (path, elapsed, self.successes, self.failures,
self.no_changes))
_('Container update sweep of %(path)s completed: '
'%(elapsed).02fs, %(success)s successes, %(fail)s '
'failures, %(no_change)s with no changes'),
{'path': path, 'elapsed': elapsed,
'success': self.successes, 'fail': self.failures,
'no_change': self.no_changes})
sys.exit()
while pids:
pids.remove(os.wait()[0])
elapsed = time.time() - begin
self.logger.info('Container update sweep completed: %.02fs' %
self.logger.info(_('Container update sweep completed: %.02fs'),
elapsed)
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
@ -133,9 +136,11 @@ class ContainerUpdater(Daemon):
for path in self.get_paths():
self.container_sweep(path)
elapsed = time.time() - begin
self.logger.info('Container update single threaded sweep completed: '
'%.02fs, %s successes, %s failures, %s with no changes' %
(elapsed, self.successes, self.failures, self.no_changes))
self.logger.info(_('Container update single threaded sweep completed: '
'%(elapsed).02fs, %(success)s successes, %(fail)s failures, '
'%(no_change)s with no changes'),
{'elapsed': elapsed, 'success': self.successes,
'fail': self.failures, 'no_change': self.no_changes})
def container_sweep(self, path):
"""
@ -181,14 +186,16 @@ class ContainerUpdater(Daemon):
if successes > failures:
self.successes += 1
self.logger.debug(
'Update report sent for %s %s' % (container, dbfile))
_('Update report sent for %(container)s %(dbfile)s'),
{'container': container, 'dbfile': dbfile})
broker.reported(info['put_timestamp'],
info['delete_timestamp'], info['object_count'],
info['bytes_used'])
else:
self.failures += 1
self.logger.debug(
'Update report failed for %s %s' % (container, dbfile))
_('Update report failed for %(container)s %(dbfile)s'),
{'container': container, 'dbfile': dbfile})
else:
self.no_changes += 1
@ -216,8 +223,8 @@ class ContainerUpdater(Daemon):
'X-Bytes-Used': bytes,
'X-Account-Override-Deleted': 'yes'})
except:
self.logger.exception('ERROR account update failed with '
'%(ip)s:%(port)s/%(device)s (will retry later): ' % node)
self.logger.exception(_('ERROR account update failed with '
'%(ip)s:%(port)s/%(device)s (will retry later): '), node)
return 500
with Timeout(self.node_timeout):
try:
@ -227,5 +234,5 @@ class ContainerUpdater(Daemon):
except:
if self.logger.getEffectiveLevel() <= logging.DEBUG:
self.logger.exception(
'Exception with %(ip)s:%(port)s/%(device)s' % node)
_('Exception with %(ip)s:%(port)s/%(device)s'), node)
return 500

@ -17,6 +17,7 @@ import os
import time
from hashlib import md5
from random import random
from gettext import gettext as _
from swift.obj import server as object_server
from swift.obj.replicator import invalidate_hash
@ -52,10 +53,10 @@ class ObjectAuditor(Daemon):
for path, device, partition in all_locs:
self.object_audit(path, device, partition)
if time.time() - reported >= 3600: # once an hour
self.logger.info(
'Since %s: Locally: %d passed audit, %d quarantined, '
'%d errors' % (time.ctime(reported), self.passes,
self.quarantines, self.errors))
self.logger.info(_('Since %(time)s: Locally: %(pass)d '
'passed audit, %(quar)d quarantined, %(error)d errors'),
{'time': time.ctime(reported), 'pass': self.passes,
'quar': self.quarantines, 'error': self.errors})
reported = time.time()
self.passes = 0
self.quarantines = 0
@ -66,7 +67,7 @@ class ObjectAuditor(Daemon):
def run_once(self):
"""Run the object audit once."""
self.logger.info('Begin object audit "once" mode')
self.logger.info(_('Begin object audit "once" mode'))
begin = reported = time.time()
all_locs = audit_location_generator(self.devices,
object_server.DATADIR,
@ -75,17 +76,17 @@ class ObjectAuditor(Daemon):
for path, device, partition in all_locs:
self.object_audit(path, device, partition)
if time.time() - reported >= 3600: # once an hour
self.logger.info(
'Since %s: Locally: %d passed audit, %d quarantined, '
'%d errors' % (time.ctime(reported), self.passes,
self.quarantines, self.errors))
self.logger.info(_('Since %(time)s: Locally: %(pass)d '
'passed audit, %(quar)d quarantined, %(error)d errors'),
{'time': time.ctime(reported), 'pass': self.passes,
'quar': self.quarantines, 'error': self.errors})
reported = time.time()
self.passes = 0
self.quarantines = 0
self.errors = 0
elapsed = time.time() - begin
self.logger.info(
'Object audit "once" mode completed: %.02fs' % elapsed)
_('Object audit "once" mode completed: %.02fs'), elapsed)
def object_audit(self, path, device, partition):
"""
@ -124,8 +125,8 @@ class ObjectAuditor(Daemon):
"%s" % (df.metadata['ETag'], etag))
except AuditException, err:
self.quarantines += 1
self.logger.error('ERROR Object %s failed audit and will be '
'quarantined: %s' % (path, err))
self.logger.error(_('ERROR Object %(obj)s failed audit and will be '
'quarantined: %(err)s'), {'obj': path, 'err': err})
invalidate_hash(os.path.dirname(path))
renamer_path = os.path.dirname(path)
renamer(renamer_path, os.path.join(self.devices, device,
@ -133,6 +134,6 @@ class ObjectAuditor(Daemon):
return
except Exception:
self.errors += 1
self.logger.exception('ERROR Trying to audit %s' % path)
self.logger.exception(_('ERROR Trying to audit %s'), path)
return
self.passes += 1

@ -22,6 +22,7 @@ import logging
import hashlib
import itertools
import cPickle as pickle
from gettext import gettext as _
import eventlet
from eventlet import GreenPool, tpool, Timeout, sleep, hubs
@ -243,26 +244,27 @@ class ObjectReplicator(Daemon):
results = proc.stdout.read()
ret_val = proc.wait()
except Timeout:
self.logger.error("Killing long-running rsync: %s" % str(args))
self.logger.error(_("Killing long-running rsync: %s"), str(args))
proc.kill()
return 1 # failure response code
total_time = time.time() - start_time
if results:
for result in results.split('\n'):
if result == '':
continue
if result.startswith('cd+'):
continue
self.logger.info(result)
for result in results.split('\n'):
if result == '':
continue
if result.startswith('cd+'):
continue
self.logger.info(result)
if ret_val:
self.logger.error(_('Bad rsync return code: %s -> %d'),
(str(args), ret_val))
elif results:
self.logger.info(
"Sync of %s at %s complete (%.03f) [%d]" % (
args[-2], args[-1], total_time, ret_val))
_("Successful rsync of %(src)s at %(dst)s (%(time).03f)"),
{'src': args[-2], 'dst': args[-1], 'time': total_time})
else:
self.logger.debug(
"Sync of %s at %s complete (%.03f) [%d]" % (
args[-2], args[-1], total_time, ret_val))
if ret_val:
self.logger.error('Bad rsync return code: %d' % ret_val)
_("Successful rsync of %(src)s at %(dst)s (%(time).03f)"),
{'src': args[-2], 'dst': args[-1], 'time': total_time})
return ret_val
def rsync(self, node, job, suffixes):
@ -346,10 +348,10 @@ class ObjectReplicator(Daemon):
responses.append(success)
if not suffixes or (len(responses) == \
self.object_ring.replica_count and all(responses)):
self.logger.info("Removing partition: %s" % job['path'])
self.logger.info(_("Removing partition: %s"), job['path'])
tpool.execute(shutil.rmtree, job['path'], ignore_errors=True)
except (Exception, Timeout):
self.logger.exception("Error syncing handoff partition")
self.logger.exception(_("Error syncing handoff partition"))
finally:
self.partition_times.append(time.time() - begin)
@ -379,13 +381,14 @@ class ObjectReplicator(Daemon):
node['device'], job['partition'], 'REPLICATE',
'', headers={'Content-Length': '0'}).getresponse()
if resp.status == 507:
self.logger.error('%s/%s responded as unmounted' %
(node['ip'], node['device']))
self.logger.error(_('%(ip)s/%(device)s responded'
' as unmounted'), node)
attempts_left += 1
continue
if resp.status != 200:
self.logger.error("Invalid response %s from %s" %
(resp.status, node['ip']))
self.logger.error(_("Invalid response %(resp)s "
"from %(ip)s"),
{'resp': resp.status, 'ip': node['ip']})
continue
remote_hash = pickle.loads(resp.read())
del resp
@ -408,7 +411,7 @@ class ObjectReplicator(Daemon):
logging.exception("Error syncing with node: %s" % node)
self.suffix_count += len(local_hash)
except (Exception, Timeout):
self.logger.exception("Error syncing partition")
self.logger.exception(_("Error syncing partition"))
finally:
self.partition_times.append(time.time() - begin)
@ -418,27 +421,30 @@ class ObjectReplicator(Daemon):
"""
if self.replication_count:
rate = self.replication_count / (time.time() - self.start)
self.logger.info("%d/%d (%.2f%%) partitions replicated in %.2f "
"seconds (%.2f/sec, %s remaining)"
% (self.replication_count, self.job_count,
self.replication_count * 100.0 / self.job_count,
time.time() - self.start, rate,
'%d%s' % compute_eta(self.start,
self.replication_count, self.job_count)))
self.logger.info(_("%(replicated)d/%(total)d (%(percentage).2f%%)"
" partitions replicated in %(time).2fs (%(rate).2f/sec, "
"%(remaining)s remaining)"),
{'replicated': self.replication_count, 'total': self.job_count,
'percentage': self.replication_count * 100.0 / self.job_count,
'time': time.time() - self.start, 'rate': rate,
'remaining': '%d%s' % compute_eta(self.start,
self.replication_count, self.job_count)})
if self.suffix_count:
self.logger.info("%d suffixes checked - %.2f%% hashed, "
"%.2f%% synced" %
(self.suffix_count,
(self.suffix_hash * 100.0) / self.suffix_count,
(self.suffix_sync * 100.0) / self.suffix_count))
self.logger.info(_("%(checked)d suffixes checked - "
"%(hashed).2f%% hashed, %(synced).2f%% synced"),
{'checked': self.suffix_count,
'hashed': (self.suffix_hash * 100.0) / self.suffix_count,
'synced': (self.suffix_sync * 100.0) / self.suffix_count})
self.partition_times.sort()
self.logger.info("Partition times: max %.4fs, min %.4fs, "
"med %.4fs"
% (self.partition_times[-1], self.partition_times[0],
self.partition_times[len(self.partition_times) // 2]))
self.logger.info(_("Partition times: max %(max).4fs, "
"min %(min).4fs, med %(med).4fs"),
{'max': self.partition_times[-1],
'min': self.partition_times[0],
'med': self.partition_times[
len(self.partition_times) // 2]})
else:
self.logger.info("Nothing replicated for %s seconds."
% (time.time() - self.start))
self.logger.info(_("Nothing replicated for %s seconds."),
(time.time() - self.start))
def kill_coros(self):
"""Utility function that kills all coroutines currently running."""
@ -466,7 +472,7 @@ class ObjectReplicator(Daemon):
while True:
eventlet.sleep(self.lockup_timeout)
if self.replication_count == self.last_replication_count:
self.logger.error("Lockup detected.. killing live coros.")
self.logger.error(_("Lockup detected.. killing live coros."))
self.kill_coros()
self.last_replication_count = self.replication_count
@ -483,7 +489,7 @@ class ObjectReplicator(Daemon):
obj_path = join(dev_path, 'objects')
tmp_path = join(dev_path, 'tmp')
if self.mount_check and not os.path.ismount(dev_path):
self.logger.warn('%s is not mounted' % local_dev['device'])
self.logger.warn(_('%s is not mounted'), local_dev['device'])
continue
unlink_older_than(tmp_path, time.time() - self.reclaim_age)
if not os.path.exists(obj_path):
@ -521,8 +527,8 @@ class ObjectReplicator(Daemon):
jobs = self.collect_jobs()
for job in jobs:
if not self.check_ring():
self.logger.info(
"Ring change detected. Aborting current replication pass.")
self.logger.info(_("Ring change detected. Aborting "
"current replication pass."))
return
if job['delete']:
self.run_pool.spawn(self.update_deleted, job)
@ -531,7 +537,7 @@ class ObjectReplicator(Daemon):
with Timeout(self.lockup_timeout):
self.run_pool.waitall()
except (Exception, Timeout):
self.logger.exception("Exception in top-level replication loop")
self.logger.exception(_("Exception in top-level replication loop"))
self.kill_coros()
finally:
stats.kill()
@ -540,23 +546,23 @@ class ObjectReplicator(Daemon):
def run_once(self):
start = time.time()
self.logger.info("Running object replicator in script mode.")
self.logger.info(_("Running object replicator in script mode."))
self.replicate()
total = (time.time() - start) / 60
self.logger.info(
"Object replication complete. (%.02f minutes)" % total)
_("Object replication complete. (%.02f minutes)"), total)
def run_forever(self):
self.logger.info("Starting object replicator in daemon mode.")
# Run the replicator continually
while True:
start = time.time()
self.logger.info("Starting object replication pass.")
self.logger.info(_("Starting object replication pass."))
# Run the replicator
self.replicate()
total = (time.time() - start) / 60
self.logger.info(
"Object replication complete. (%.02f minutes)" % total)
self.logger.debug('Replication sleeping for %s seconds.' %
_("Object replication complete. (%.02f minutes)"), total)
self.logger.debug(_('Replication sleeping for %s seconds.'),
self.run_pause)
sleep(self.run_pause)

@ -26,6 +26,7 @@ from hashlib import md5
from tempfile import mkstemp
from urllib import unquote
from contextlib import contextmanager
from gettext import gettext as _
from webob import Request, Response, UTC
from webob.exc import HTTPAccepted, HTTPBadRequest, HTTPCreated, \
@ -292,13 +293,15 @@ class ObjectController(object):
if 200 <= response.status < 300:
return
else:
self.logger.error('ERROR Container update failed (saving '
'for async update later): %d response from %s:%s/%s' %
(response.status, ip, port, contdevice))
self.logger.error(_('ERROR Container update failed '
'(saving for async update later): %(status)d '
'response from %(ip)s:%(port)s/%(dev)s'),
{'status': response.status, 'ip': ip, 'port': port,
'dev': contdevice})
except:
self.logger.exception('ERROR container update failed with '
'%s:%s/%s transaction %s (saving for async update later)' %
(ip, port, contdevice, headers_in.get('x-cf-trans-id', '-')))
self.logger.exception(_('ERROR container update failed with '
'%(ip)s:%(port)s/%(dev)s (saving for async update later)'),
{'ip': ip, 'port': port, 'dev': contdevice})
async_dir = os.path.join(self.devices, objdevice, ASYNCDIR)
ohash = hash_path(account, container, obj)
write_pickle(
@ -565,10 +568,8 @@ class ObjectController(object):
else:
res = HTTPMethodNotAllowed()
except:
self.logger.exception('ERROR __call__ error with %s %s '
'transaction %s' % (env.get('REQUEST_METHOD', '-'),
env.get('PATH_INFO', '-'), env.get('HTTP_X_CF_TRANS_ID',
'-')))
self.logger.exception(_('ERROR __call__ error with %(method)s'
' %(path)s '), {'method': req.method, 'path': req.path})
res = HTTPInternalServerError(body=traceback.format_exc())
trans_time = time.time() - start_time
if self.log_requests:

@ -19,6 +19,7 @@ import signal
import sys
import time
from random import random
from gettext import gettext as _
from eventlet import patcher, Timeout
@ -54,7 +55,7 @@ class ObjectUpdater(Daemon):
"""Get the container ring. Load it, if it hasn't been yet."""
if not self.container_ring:
self.logger.debug(
'Loading container ring from %s' % self.container_ring_path)
_('Loading container ring from %s'), self.container_ring_path)
self.container_ring = Ring(self.container_ring_path)
return self.container_ring
@ -62,7 +63,7 @@ class ObjectUpdater(Daemon):
"""Run the updater continuously."""
time.sleep(random() * self.interval)
while True:
self.logger.info('Begin object update sweep')
self.logger.info(_('Begin object update sweep'))
begin = time.time()
pids = []
# read from container ring to ensure it's fresh
@ -71,7 +72,7 @@ class ObjectUpdater(Daemon):
if self.mount_check and not \
os.path.ismount(os.path.join(self.devices, device)):
self.logger.warn(
'Skipping %s as it is not mounted' % device)
_('Skipping %s as it is not mounted'), device)
continue
while len(pids) >= self.concurrency:
pids.remove(os.wait()[0])
@ -86,20 +87,23 @@ class ObjectUpdater(Daemon):
forkbegin = time.time()
self.object_sweep(os.path.join(self.devices, device))
elapsed = time.time() - forkbegin
self.logger.info('Object update sweep of %s completed: '
'%.02fs, %s successes, %s failures' %
(device, elapsed, self.successes, self.failures))
self.logger.info(_('Object update sweep of %(device)s'
' completed: %(elapsed).02fs, %(success)s successes'
', %(fail)s failures'),
{'device': device, 'elapsed': elapsed,
'success': self.successes, 'fail': self.failures})
sys.exit()
while pids:
pids.remove(os.wait()[0])
elapsed = time.time() - begin
self.logger.info('Object update sweep completed: %.02fs' % elapsed)
self.logger.info(_('Object update sweep completed: %.02fs'),
elapsed)
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
def run_once(self):
"""Run the updater once"""
self.logger.info('Begin object update single threaded sweep')
self.logger.info(_('Begin object update single threaded sweep'))
begin = time.time()
self.successes = 0
self.failures = 0
@ -107,13 +111,14 @@ class ObjectUpdater(Daemon):
if self.mount_check and \
not os.path.ismount(os.path.join(self.devices, device)):
self.logger.warn(
'Skipping %s as it is not mounted' % device)
_('Skipping %s as it is not mounted'), device)
continue
self.object_sweep(os.path.join(self.devices, device))
elapsed = time.time() - begin
self.logger.info('Object update single threaded sweep completed: '
'%.02fs, %s successes, %s failures' %
(elapsed, self.successes, self.failures))
self.logger.info(_('Object update single threaded sweep completed: '
'%(elapsed).02fs, %(success)s successes, %(fail)s failures'),
{'elapsed': elapsed, 'success': self.successes,
'fail': self.failures})
def object_sweep(self, device):
"""
@ -150,7 +155,7 @@ class ObjectUpdater(Daemon):
update = pickle.load(open(update_path, 'rb'))
except Exception:
self.logger.exception(
'ERROR Pickle problem, quarantining %s' % update_path)
_('ERROR Pickle problem, quarantining %s'), update_path)
renamer(update_path, os.path.join(device,
'quarantined', 'objects', os.path.basename(update_path)))
return
@ -170,11 +175,13 @@ class ObjectUpdater(Daemon):
successes.append(node['id'])
if success:
self.successes += 1
self.logger.debug('Update sent for %s %s' % (obj, update_path))
self.logger.debug(_('Update sent for %(obj)s %(path)s'),
{'obj': obj, 'path': update_path})
os.unlink(update_path)
else:
self.failures += 1
self.logger.debug('Update failed for %s %s' % (obj, update_path))
self.logger.debug(_('Update failed for %(obj)s %(path)s'),
{'obj': obj, 'path': update_path})
update['successes'] = successes
write_pickle(update, update_path, os.path.join(device, 'tmp'))
@ -197,6 +204,6 @@ class ObjectUpdater(Daemon):
resp.read()
return resp.status
except:
self.logger.exception('ERROR with remote server '
'%(ip)s:%(port)s/%(device)s' % node)
self.logger.exception(_('ERROR with remote server '
'%(ip)s:%(port)s/%(device)s'), node)
return 500

@ -22,6 +22,7 @@ from ConfigParser import ConfigParser
from urllib import unquote, quote
import uuid
import functools
from gettext import gettext as _
from eventlet.timeout import Timeout
from webob.exc import HTTPBadRequest, HTTPMethodNotAllowed, \
@ -120,8 +121,8 @@ class Controller(object):
:param msg: error message
"""
self.error_increment(node)
self.app.logger.error(
'%s %s:%s' % (msg, node['ip'], node['port']))
self.app.logger.error(_('%(msg)s %(ip)s:%(port)s'),
{'msg': msg, 'ip': node['ip'], 'port': node['port']})
def exception_occurred(self, node, typ, additional_info):
"""
@ -132,9 +133,9 @@ class Controller(object):
:param additional_info: additional information to log
"""
self.app.logger.exception(
'ERROR with %s server %s:%s/%s transaction %s re: %s' % (typ,
node['ip'], node['port'], node['device'], self.trans_id,
additional_info))
_('ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s'),
{'type': typ, 'ip': node['ip'], 'port': node['port'],
'device': node['device'], 'info': additional_info})
def error_limited(self, node):
"""
@ -155,8 +156,7 @@ class Controller(object):
limited = node['errors'] > self.app.error_suppression_limit
if limited:
self.app.logger.debug(
'Node error limited %s:%s (%s)' % (
node['ip'], node['port'], node['device']))
_('Node error limited %(ip)s:%(port)s (%(device)s)'), node)
return limited
def error_limit(self, node):
@ -380,8 +380,8 @@ class Controller(object):
if etag:
resp.headers['etag'] = etag.strip('"')
return resp
self.app.logger.error('%s returning 503 for %s, transaction %s' %
(server_type, statuses, self.trans_id))
self.app.logger.error(_('%(type)s returning 503 for %(statuses)s'),
{'type': server_type, 'statuses': statuses})
resp.status = '503 Internal Server Error'
return resp
@ -454,9 +454,7 @@ class Controller(object):
res.bytes_transferred += len(chunk)
except GeneratorExit:
res.client_disconnect = True
self.app.logger.info(
'Client disconnected on read transaction %s' %
self.trans_id)
self.app.logger.info(_('Client disconnected on read'))
except:
self.exception_occurred(node, 'Object',
'Trying to read during GET of %s' % req.path)
@ -561,7 +559,7 @@ class ObjectController(Controller):
error_response = check_metadata(req, 'object')
if error_response:
return error_response
container_partition, containers, _, req.acl = \
container_partition, containers, _junk, req.acl = \
self.container_info(self.account_name, self.container_name)
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
@ -603,7 +601,7 @@ class ObjectController(Controller):
@delay_denial
def PUT(self, req):
"""HTTP PUT request handler."""
container_partition, containers, _, req.acl = \
container_partition, containers, _junk, req.acl = \
self.container_info(self.account_name, self.container_name)
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
@ -618,7 +616,7 @@ class ObjectController(Controller):
req.headers['X-Timestamp'] = normalize_timestamp(time.time())
# Sometimes the 'content-type' header exists, but is set to None.
if not req.headers.get('content-type'):
guessed_type, _ = mimetypes.guess_type(req.path_info)
guessed_type, _junk = mimetypes.guess_type(req.path_info)
if not guessed_type:
req.headers['Content-Type'] = 'application/octet-stream'
else:
@ -698,9 +696,9 @@ class ObjectController(Controller):
containers.insert(0, container)
if len(conns) <= len(nodes) / 2:
self.app.logger.error(
'Object PUT returning 503, %s/%s required connections, '
'transaction %s' %
(len(conns), len(nodes) / 2 + 1, self.trans_id))
_('Object PUT returning 503, %(conns)s/%(nodes)s '
'required connections'),
{'conns': len(conns), 'nodes': len(nodes) // 2 + 1})
return HTTPServiceUnavailable(request=req)
try:
req.bytes_transferred = 0
@ -730,27 +728,26 @@ class ObjectController(Controller):
conns.remove(conn)
if len(conns) <= len(nodes) / 2:
self.app.logger.error(
'Object PUT exceptions during send, %s/%s '
'required connections, transaction %s' %
(len(conns), len(nodes) // 2 + 1,
self.trans_id))
_('Object PUT exceptions during send, '
'%(conns)s/%(nodes)s required connections'),
{'conns': len(conns),
'nodes': len(nodes) // 2 + 1})
return HTTPServiceUnavailable(request=req)
if req.headers.get('transfer-encoding') and chunk == '':
break
except ChunkReadTimeout, err:
self.app.logger.info(
'ERROR Client read timeout (%ss)' % err.seconds)
_('ERROR Client read timeout (%ss)'), err.seconds)
return HTTPRequestTimeout(request=req)
except:
req.client_disconnect = True
self.app.logger.exception(
'ERROR Exception causing client disconnect')
_('ERROR Exception causing client disconnect'))
return Response(status='499 Client Disconnect')
if req.content_length and req.bytes_transferred < req.content_length:
req.client_disconnect = True
self.app.logger.info(
'Client disconnected without sending enough data %s' %
self.trans_id)
_('Client disconnected without sending enough data'))
return Response(status='499 Client Disconnect')
statuses = []
reasons = []
@ -774,7 +771,7 @@ class ObjectController(Controller):
'Trying to get final status of PUT to %s' % req.path)
if len(etags) > 1:
self.app.logger.error(
'Object servers returned %s mismatched etags' % len(etags))
_('Object servers returned %s mismatched etags'), len(etags))
return HTTPServerError(request=req)
etag = len(etags) and etags.pop() or None
while len(statuses) < len(nodes):
@ -798,7 +795,7 @@ class ObjectController(Controller):
@delay_denial
def DELETE(self, req):
"""HTTP DELETE request handler."""
container_partition, containers, _, req.acl = \
container_partition, containers, _junk, req.acl = \
self.container_info(self.account_name, self.container_name)
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
@ -848,7 +845,7 @@ class ObjectController(Controller):
if not dest.startswith('/'):
dest = '/' + dest
try:
_, dest_container, dest_object = dest.split('/', 2)
_junk, dest_container, dest_object = dest.split('/', 2)
except ValueError:
return HTTPPreconditionFailed(request=req,
body='Destination header must be of the form '
@ -1116,9 +1113,8 @@ class ContainerController(Controller):
# If even one node doesn't do the delete, we can't be sure
# what the outcome will be once everything is in sync; so
# we 503.
self.app.logger.error('Returning 503 because not all '
'container nodes confirmed DELETE, transaction %s' %
self.trans_id)
self.app.logger.error(_('Returning 503 because not all '
'container nodes confirmed DELETE'))
return HTTPServiceUnavailable(request=req)
if resp.status_int == 202: # Indicates no server had the container
return HTTPNotFound(request=req)
@ -1440,7 +1436,7 @@ class BaseApplication(object):
return resp
return handler(req)
except Exception:
self.logger.exception('ERROR Unhandled exception in request')
self.logger.exception(_('ERROR Unhandled exception in request'))
return HTTPServerError(request=req)

@ -462,7 +462,7 @@ class TestAuthServer(unittest.TestCase):
auth_server.http_connect = fake_http_connect(201)
url = self.controller.create_user('test', 'tester', 'testing')
self.assertEquals(log.getvalue().rsplit(' ', 1)[0],
"auth SUCCESS create_user('test', 'tester', _, False, False) "
"SUCCESS create_user('test', 'tester', _, False, False) "
"= %s" % repr(url))
log.truncate(0)
def start_response(*args):
@ -491,7 +491,7 @@ class TestAuthServer(unittest.TestCase):
logsegs[1] = '[01/Jan/2001:01:02:03 +0000]'
logsegs[2:] = logsegs[2].split(' ')
logsegs[-1] = '0.1234'
self.assertEquals(' '.join(logsegs), 'auth testhost - - '
self.assertEquals(' '.join(logsegs), 'testhost - - '
'[01/Jan/2001:01:02:03 +0000] "GET /v1/test/auth?test=True '
'HTTP/1.0" 204 - "-" "-" - - - - - - - - - "-" "None" "-" '
'0.1234')
@ -519,7 +519,7 @@ class TestAuthServer(unittest.TestCase):
logsegs[1] = '[01/Jan/2001:01:02:03 +0000]'
logsegs[2:] = logsegs[2].split(' ')
logsegs[-1] = '0.1234'
self.assertEquals(' '.join(logsegs), 'auth None - - [01/Jan/2001:'
self.assertEquals(' '.join(logsegs), 'None - - [01/Jan/2001:'
'01:02:03 +0000] "GET /v1/test/auth HTTP/1.0" 204 - "-" "-" - '
'- - - - - - - - "-" "None" "Content-Length: 0\n'
'X-Storage-User: tester\nX-Storage-Pass: testing" 0.1234')
@ -556,7 +556,7 @@ class TestAuthServer(unittest.TestCase):
'HTTP_X_STORAGE_PASS': 'testing'},
start_response)
self.assert_(log.getvalue().startswith(
'auth ERROR Unhandled exception in ReST request'),
'ERROR Unhandled exception in ReST request'),
log.getvalue())
log.truncate(0)
finally:

@ -50,7 +50,7 @@ class TestDaemon(unittest.TestCase):
def test_create(self):
d = daemon.Daemon({})
self.assertEquals(d.conf, {})
self.assert_(isinstance(d.logger, utils.NamedLogger))
self.assert_(isinstance(d.logger, utils.LogAdapter))
def test_stubs(self):
d = daemon.Daemon({})

@ -283,35 +283,27 @@ Error: unable to locate %s
utils.sys.stdout = orig_stdout
utils.sys.stderr = orig_stderr
def test_NamedLogger(self):
sio = StringIO()
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler(sio))
nl = utils.NamedLogger(logger, 'server')
nl.warn('test')
self.assertEquals(sio.getvalue(), 'server test\n')
def test_get_logger(self):
sio = StringIO()
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler(sio))
logger = utils.get_logger(None, 'server')
logger.warn('test1')
self.assertEquals(sio.getvalue(), 'server test1\n')
self.assertEquals(sio.getvalue(), 'test1\n')
logger.debug('test2')
self.assertEquals(sio.getvalue(), 'server test1\n')
self.assertEquals(sio.getvalue(), 'test1\n')
logger = utils.get_logger({'log_level': 'DEBUG'}, 'server')
logger.debug('test3')
self.assertEquals(sio.getvalue(), 'server test1\nserver test3\n')
self.assertEquals(sio.getvalue(), 'test1\ntest3\n')
# Doesn't really test that the log facility is truly being used all the
# way to syslog; but exercises the code.
logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server')
logger.warn('test4')
self.assertEquals(sio.getvalue(),
'server test1\nserver test3\nserver test4\n')
'test1\nserver test3\nserver test4\n')
logger.debug('test5')
self.assertEquals(sio.getvalue(),
'server test1\nserver test3\nserver test4\n')
'test1\nserver test3\nserver test4\n')
def test_storage_directory(self):
self.assertEquals(utils.storage_directory('objects', '1', 'ABCDEF'),