Merge "changed TRUE_VALUES references to utils.config_true_value() call"

This commit is contained in:
Jenkins 2012-10-31 18:46:11 +00:00 committed by Gerrit Code Review
commit c7948ec5d9
28 changed files with 444 additions and 376 deletions

View File

@ -23,7 +23,7 @@ from optparse import OptionParser
from swift.common.bench import (BenchController, DistributedBenchController,
create_containers, delete_containers)
from swift.common.utils import readconf, LogAdapter, TRUE_VALUES
from swift.common.utils import readconf, LogAdapter, config_true_value
# The defaults should be sufficient to run swift-bench on a SAIO
CONF_DEFAULTS = {
@ -138,7 +138,7 @@ if __name__ == '__main__':
options.containers = ['%s_%d' % (options.container_name, i)
for i in xrange(int(options.num_containers))]
# check boolean options vs config parameter values
if str(options.delete).lower() in TRUE_VALUES:
if config_true_value(str(options.delete).lower()):
options.delete = 'yes'
else:
options.delete = 'no'
@ -170,5 +170,5 @@ if __name__ == '__main__':
controller = controller_class(logger, options)
controller.run()
if options.delete.lower() in TRUE_VALUES:
if config_true_value(options.delete.lower()):
delete_containers(logger, options)

View File

@ -30,7 +30,7 @@ from eventlet.pools import Pool
from swift.common import direct_client
from swiftclient import ClientException, Connection, get_auth
from swift.common.ring import Ring
from swift.common.utils import compute_eta, get_time_units, TRUE_VALUES
from swift.common.utils import compute_eta, get_time_units, config_true_value
unmounted = []
@ -45,7 +45,8 @@ def get_error_log(prefix):
global debug, unmounted, notfound
if hasattr(msg_or_exc, 'http_status'):
identifier = '%s:%s/%s' % (msg_or_exc.http_host,
msg_or_exc.http_port, msg_or_exc.http_device)
msg_or_exc.http_port,
msg_or_exc.http_device)
if msg_or_exc.http_status == 507:
if identifier not in unmounted:
unmounted.append(identifier)
@ -68,8 +69,8 @@ def get_error_log(prefix):
def container_dispersion_report(coropool, connpool, account, container_ring,
retries):
with connpool.item() as conn:
containers = [c['name'] for c in conn.get_account(prefix='dispersion_',
full_listing=True)[1]]
containers = [c['name'] for c in conn.get_account(
prefix='dispersion_', full_listing=True)[1]]
containers_listed = len(containers)
if not containers_listed:
print >>stderr, 'No containers to query. Has ' \
@ -88,9 +89,8 @@ def container_dispersion_report(coropool, connpool, account, container_ring,
error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node)
try:
attempts, _junk = direct_client.retry(
direct_client.direct_head_container, node,
part, account, container, error_log=error_log,
retries=retries)
direct_client.direct_head_container, node, part, account,
container, error_log=error_log, retries=retries)
retries_done[0] += attempts - 1
found_count += 1
except ClientException, err:
@ -134,11 +134,12 @@ def container_dispersion_report(coropool, connpool, account, container_ring,
missing_copies = container_ring.replica_count - copies
if container_copies_found[copies]:
print missing_string(container_copies_found[copies],
missing_copies, container_ring.replica_count)
missing_copies,
container_ring.replica_count)
print '%.02f%% of container copies found (%d of %d)' % (
value, copies_found, copies_expected)
value, copies_found, copies_expected)
print 'Sample represents %.02f%% of the container partition space' % (
100.0 * distinct_partitions / container_ring.partition_count)
100.0 * distinct_partitions / container_ring.partition_count)
stdout.flush()
return None
else:
@ -159,8 +160,8 @@ def object_dispersion_report(coropool, connpool, account, object_ring,
container = 'dispersion_objects'
with connpool.item() as conn:
try:
objects = [o['name'] for o in conn.get_container(container,
prefix='dispersion_', full_listing=True)[1]]
objects = [o['name'] for o in conn.get_container(
container, prefix='dispersion_', full_listing=True)[1]]
except ClientException, err:
if err.http_status != 404:
raise
@ -186,9 +187,8 @@ def object_dispersion_report(coropool, connpool, account, object_ring,
error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node)
try:
attempts, _junk = direct_client.retry(
direct_client.direct_head_object, node, part,
account, container, obj, error_log=error_log,
retries=retries)
direct_client.direct_head_object, node, part, account,
container, obj, error_log=error_log, retries=retries)
retries_done[0] += attempts - 1
found_count += 1
except ClientException, err:
@ -232,11 +232,11 @@ def object_dispersion_report(coropool, connpool, account, object_ring,
missing_copies = object_ring.replica_count - copies
if object_copies_found[copies]:
print missing_string(object_copies_found[copies],
missing_copies, object_ring.replica_count)
missing_copies, object_ring.replica_count)
print '%.02f%% of object copies found (%d of %d)' % \
(value, copies_found, copies_expected)
(value, copies_found, copies_expected)
print 'Sample represents %.02f%% of the object partition space' % (
100.0 * distinct_partitions / object_ring.partition_count)
100.0 * distinct_partitions / object_ring.partition_count)
stdout.flush()
return None
else:
@ -304,7 +304,7 @@ Usage: %prog [options] [conf_file]
dispersion_coverage = int(conf.get('dispersion_coverage', 1))
retries = int(conf.get('retries', 5))
concurrency = int(conf.get('concurrency', 25))
if options.dump_json or conf.get('dump_json', 'no').lower() in TRUE_VALUES:
if options.dump_json or config_true_value(conf.get('dump_json', 'no')):
json_output = True
if options.debug:
debug = True
@ -316,10 +316,9 @@ Usage: %prog [options] [conf_file]
auth_version=conf.get('auth_version', '1.0'))
account = url.rsplit('/', 1)[1]
connpool = Pool(max_size=concurrency)
connpool.create = lambda: Connection(conf['auth_url'],
conf['auth_user'], conf['auth_key'],
retries=retries,
preauthurl=url, preauthtoken=token)
connpool.create = lambda: Connection(
conf['auth_url'], conf['auth_user'], conf['auth_key'], retries=retries,
preauthurl=url, preauthtoken=token)
container_ring = Ring(swift_dir, ring_name='container')
object_ring = Ring(swift_dir, ring_name='object')

View File

@ -21,7 +21,7 @@ import swift.common.db
from swift.account import server as account_server
from swift.common.db import AccountBroker
from swift.common.utils import get_logger, audit_location_generator, \
TRUE_VALUES, dump_recon_cache
config_true_value, dump_recon_cache
from swift.common.daemon import Daemon
from eventlet import Timeout
@ -34,13 +34,12 @@ class AccountAuditor(Daemon):
self.conf = conf
self.logger = get_logger(conf, log_route='account-auditor')
self.devices = conf.get('devices', '/srv/node')
self.mount_check = conf.get('mount_check', 'true').lower() in \
TRUE_VALUES
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.interval = int(conf.get('interval', 1800))
self.account_passes = 0
self.account_failures = 0
swift.common.db.DB_PREALLOCATION = \
conf.get('db_preallocation', 'f').lower() in TRUE_VALUES
config_true_value(conf.get('db_preallocation', 'f'))
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = os.path.join(self.recon_cache_path, "account.recon")

View File

@ -27,7 +27,7 @@ from swift.common.db import AccountBroker
from swift.common.direct_client import ClientException, \
direct_delete_container, direct_delete_object, direct_get_container
from swift.common.ring import Ring
from swift.common.utils import get_logger, whataremyips, TRUE_VALUES
from swift.common.utils import get_logger, whataremyips, config_true_value
from swift.common.daemon import Daemon
@ -56,8 +56,7 @@ class AccountReaper(Daemon):
self.conf = conf
self.logger = get_logger(conf, log_route='account-reaper')
self.devices = conf.get('devices', '/srv/node')
self.mount_check = conf.get('mount_check', 'true').lower() in \
TRUE_VALUES
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.interval = int(conf.get('interval', 3600))
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.account_ring = None
@ -71,7 +70,7 @@ class AccountReaper(Daemon):
sqrt(self.concurrency)
self.container_pool = GreenPool(size=self.container_concurrency)
swift.common.db.DB_PREALLOCATION = \
conf.get('db_preallocation', 'f').lower() in TRUE_VALUES
config_true_value(conf.get('db_preallocation', 'f'))
self.delay_reaping = int(conf.get('delay_reaping') or 0)
def get_account_ring(self):

View File

@ -26,7 +26,7 @@ from eventlet import Timeout
import swift.common.db
from swift.common.db import AccountBroker
from swift.common.utils import get_logger, get_param, hash_path, public, \
normalize_timestamp, split_path, storage_directory, TRUE_VALUES, \
normalize_timestamp, split_path, storage_directory, config_true_value, \
validate_device_partition, json
from swift.common.constraints import ACCOUNT_LISTING_LIMIT, \
check_mount, check_float, check_utf8, FORMAT2CONTENT_TYPE
@ -47,15 +47,14 @@ class AccountController(object):
def __init__(self, conf):
self.logger = get_logger(conf, log_route='account-server')
self.root = conf.get('devices', '/srv/node')
self.mount_check = conf.get('mount_check', 'true').lower() in \
TRUE_VALUES
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.replicator_rpc = ReplicatorRpc(self.root, DATADIR, AccountBroker,
self.mount_check,
logger=self.logger)
self.auto_create_account_prefix = \
conf.get('auto_create_account_prefix') or '.'
swift.common.db.DB_PREALLOCATION = \
conf.get('db_preallocation', 'f').lower() in TRUE_VALUES
config_true_value(conf.get('db_preallocation', 'f'))
def _get_account_broker(self, drive, part, account):
hsh = hash_path(account)

View File

@ -28,7 +28,7 @@ import eventlet
import eventlet.pools
from eventlet.green.httplib import CannotSendRequest
from swift.common.utils import TRUE_VALUES, LogAdapter
from swift.common.utils import config_true_value, LogAdapter
import swiftclient as client
from swift.common import direct_client
from swift.common.http import HTTP_CONFLICT
@ -144,7 +144,7 @@ class Bench(object):
self.user = conf.user
self.key = conf.key
self.auth_url = conf.auth
self.use_proxy = conf.use_proxy.lower() in TRUE_VALUES
self.use_proxy = config_true_value(conf.use_proxy)
self.auth_version = conf.auth_version
self.logger.info("Auth version: %s" % self.auth_version)
if self.use_proxy:
@ -314,7 +314,7 @@ class BenchController(object):
self.logger = logger
self.conf = conf
self.names = []
self.delete = conf.delete.lower() in TRUE_VALUES
self.delete = config_true_value(conf.delete)
self.gets = int(conf.num_gets)
self.aborted = False

View File

@ -75,8 +75,7 @@ def run_daemon(klass, conf_file, section_name='', once=False, **kwargs):
log_name=kwargs.get('log_name'))
# once on command line (i.e. daemonize=false) will over-ride config
once = once or \
conf.get('daemonize', 'true').lower() not in utils.TRUE_VALUES
once = once or not utils.config_true_value(conf.get('daemonize', 'true'))
# pre-configure logger
if 'logger' in kwargs:
@ -87,7 +86,7 @@ def run_daemon(klass, conf_file, section_name='', once=False, **kwargs):
log_route=section_name)
# disable fallocate if desired
if conf.get('disable_fallocate', 'no').lower() in utils.TRUE_VALUES:
if utils.config_true_value(conf.get('disable_fallocate', 'no')):
utils.disable_fallocate()
try:

View File

@ -29,8 +29,8 @@ import simplejson
import swift.common.db
from swift.common.utils import get_logger, whataremyips, storage_directory, \
renamer, mkdirs, lock_parent_directory, TRUE_VALUES, unlink_older_than, \
dump_recon_cache, rsync_ip
renamer, mkdirs, lock_parent_directory, config_true_value, \
unlink_older_than, dump_recon_cache, rsync_ip
from swift.common import ring
from swift.common.http import HTTP_NOT_FOUND, HTTP_INSUFFICIENT_STORAGE
from swift.common.bufferedhttp import BufferedHTTPConnection
@ -53,9 +53,9 @@ def quarantine_db(object_file, server_type):
('container' or 'account')
"""
object_dir = os.path.dirname(object_file)
quarantine_dir = os.path.abspath(os.path.join(object_dir, '..',
'..', '..', '..', 'quarantined', server_type + 's',
os.path.basename(object_dir)))
quarantine_dir = os.path.abspath(
os.path.join(object_dir, '..', '..', '..', '..', 'quarantined',
server_type + 's', os.path.basename(object_dir)))
try:
renamer(object_dir, quarantine_dir)
except OSError, e:
@ -88,7 +88,7 @@ class ReplConnection(BufferedHTTPConnection):
try:
body = simplejson.dumps(args)
self.request('REPLICATE', self.path, body,
{'Content-Type': 'application/json'})
{'Content-Type': 'application/json'})
response = self.getresponse()
response.data = response.read()
return response
@ -107,8 +107,7 @@ class Replicator(Daemon):
self.conf = conf
self.logger = get_logger(conf, log_route='replicator')
self.root = conf.get('devices', '/srv/node')
self.mount_check = conf.get('mount_check', 'true').lower() in \
('true', 't', '1', 'on', 'yes', 'y')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.port = int(conf.get('bind_port', self.default_port))
concurrency = int(conf.get('concurrency', 8))
self.cpool = GreenPool(size=concurrency)
@ -118,13 +117,12 @@ class Replicator(Daemon):
self.max_diffs = int(conf.get('max_diffs') or 100)
self.interval = int(conf.get('interval') or
conf.get('run_pause') or 30)
self.vm_test_mode = conf.get(
'vm_test_mode', 'no').lower() in ('yes', 'true', 'on', '1')
self.vm_test_mode = config_true_value(conf.get('vm_test_mode', 'no'))
self.node_timeout = int(conf.get('node_timeout', 10))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.reclaim_age = float(conf.get('reclaim_age', 86400 * 7))
swift.common.db.DB_PREALLOCATION = \
conf.get('db_preallocation', 'f').lower() in TRUE_VALUES
config_true_value(conf.get('db_preallocation', 'f'))
self._zero_stats()
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
@ -149,17 +147,18 @@ class Replicator(Daemon):
{'count': self.stats['attempted'],
'time': time.time() - self.stats['start'],
'rate': self.stats['attempted'] /
(time.time() - self.stats['start'] + 0.0000001)})
(time.time() - self.stats['start'] + 0.0000001)})
self.logger.info(_('Removed %(remove)d dbs') % self.stats)
self.logger.info(_('%(success)s successes, %(failure)s failures')
% self.stats)
dump_recon_cache({'replication_stats': self.stats,
'replication_time': time.time() - self.stats['start']
}, self.rcache, self.logger)
% self.stats)
dump_recon_cache(
{'replication_stats': self.stats,
'replication_time': time.time() - self.stats['start']},
self.rcache, self.logger)
self.logger.info(' '.join(['%s:%s' % item for item in
self.stats.items() if item[0] in
('no_change', 'hashmatch', 'rsync', 'diff', 'ts_repl', 'empty',
'diff_capped')]))
self.stats.items() if item[0] in
('no_change', 'hashmatch', 'rsync', 'diff', 'ts_repl',
'empty', 'diff_capped')]))
def _rsync_file(self, db_file, remote_file, whole_file=True):
"""
@ -185,7 +184,7 @@ class Replicator(Daemon):
return proc.returncode == 0
def _rsync_db(self, broker, device, http, local_id,
replicate_method='complete_rsync', replicate_timeout=None):
replicate_method='complete_rsync', replicate_timeout=None):
"""
Sync a whole db using rsync.
@ -198,18 +197,18 @@ class Replicator(Daemon):
"""
device_ip = rsync_ip(device['ip'])
if self.vm_test_mode:
remote_file = '%s::%s%s/%s/tmp/%s' % (device_ip,
self.server_type, device['port'], device['device'],
local_id)
remote_file = '%s::%s%s/%s/tmp/%s' % (
device_ip, self.server_type, device['port'], device['device'],
local_id)
else:
remote_file = '%s::%s/%s/tmp/%s' % (device_ip,
self.server_type, device['device'], local_id)
remote_file = '%s::%s/%s/tmp/%s' % (
device_ip, self.server_type, device['device'], local_id)
mtime = os.path.getmtime(broker.db_file)
if not self._rsync_file(broker.db_file, remote_file):
return False
# perform block-level sync if the db was modified during the first sync
if os.path.exists(broker.db_file + '-journal') or \
os.path.getmtime(broker.db_file) > mtime:
os.path.getmtime(broker.db_file) > mtime:
# grab a lock so nobody else can modify it
with broker.lock():
if not self._rsync_file(broker.db_file, remote_file, False):
@ -243,13 +242,15 @@ class Replicator(Daemon):
if not response or response.status >= 300 or response.status < 200:
if response:
self.logger.error(_('ERROR Bad response %(status)s from '
'%(host)s'),
{'status': response.status, 'host': http.host})
'%(host)s'),
{'status': response.status,
'host': http.host})
return False
point = objects[-1]['ROWID']
objects = broker.get_items_since(point, self.per_diff)
if objects:
self.logger.debug(_('Synchronization for %s has fallen more than '
self.logger.debug(_(
'Synchronization for %s has fallen more than '
'%s rows behind; moving on and will try again next pass.') %
(broker.db_file, self.max_diffs * self.per_diff))
self.stats['diff_capped'] += 1
@ -259,7 +260,8 @@ class Replicator(Daemon):
response = http.replicate('merge_syncs', sync_table)
if response and response.status >= 200 and response.status < 300:
broker.merge_syncs([{'remote_id': remote_id,
'sync_point': point}], incoming=False)
'sync_point': point}],
incoming=False)
return True
return False
@ -283,7 +285,8 @@ class Replicator(Daemon):
self.stats['hashmatch'] += 1
self.logger.increment('hashmatches')
broker.merge_syncs([{'remote_id': rinfo['id'],
'sync_point': rinfo['point']}], incoming=False)
'sync_point': rinfo['point']}],
incoming=False)
return True
def _http_connect(self, node, partition, db_file):
@ -297,7 +300,8 @@ class Replicator(Daemon):
:returns: ReplConnection object
"""
return ReplConnection(node, partition,
os.path.basename(db_file).split('.', 1)[0], self.logger)
os.path.basename(db_file).split('.', 1)[0],
self.logger)
def _repl_to_node(self, node, broker, partition, info):
"""
@ -319,8 +323,9 @@ class Replicator(Daemon):
_('ERROR Unable to connect to remote server: %s'), node)
return False
with Timeout(self.node_timeout):
response = http.replicate('sync', info['max_row'], info['hash'],
info['id'], info['created_at'], info['put_timestamp'],
response = http.replicate(
'sync', info['max_row'], info['hash'], info['id'],
info['created_at'], info['put_timestamp'],
info['delete_timestamp'], info['metadata'])
if not response:
return False
@ -341,11 +346,11 @@ class Replicator(Daemon):
self.stats['remote_merge'] += 1
self.logger.increment('remote_merges')
return self._rsync_db(broker, node, http, info['id'],
replicate_method='rsync_then_merge',
replicate_timeout=(info['count'] / 2000))
replicate_method='rsync_then_merge',
replicate_timeout=(info['count'] / 2000))
# else send diffs over to the remote server
return self._usync_db(max(rinfo['point'], local_sync),
broker, http, rinfo['id'], info['id'])
broker, http, rinfo['id'], info['id'])
def _replicate_object(self, partition, object_file, node_id):
"""
@ -412,7 +417,8 @@ class Replicator(Daemon):
self.logger.error(_('ERROR Remote drive not mounted %s'), node)
except (Exception, Timeout):
self.logger.exception(_('ERROR syncing %(file)s with node'
' %(node)s'), {'file': object_file, 'node': node})
' %(node)s'),
{'file': object_file, 'node': node})
self.stats['success' if success else 'failure'] += 1
self.logger.increment('successes' if success else 'failures')
responses.append(success)
@ -542,7 +548,8 @@ class ReplicatorRpc(object):
not os.path.ismount(os.path.join(self.root, drive)):
return Response(status='507 %s is not mounted' % drive)
db_file = os.path.join(self.root, drive,
storage_directory(self.datadir, partition, hsh), hsh + '.db')
storage_directory(self.datadir, partition, hsh),
hsh + '.db')
if op == 'rsync_then_merge':
return self.rsync_then_merge(drive, db_file, args)
if op == 'complete_rsync':
@ -577,23 +584,23 @@ class ReplicatorRpc(object):
timespan = time.time() - timemark
if timespan > DEBUG_TIMINGS_THRESHOLD:
self.logger.debug(_('replicator-rpc-sync time for '
'update_metadata: %.02fs') % timespan)
'update_metadata: %.02fs') % timespan)
if info['put_timestamp'] != put_timestamp or \
info['created_at'] != created_at or \
info['delete_timestamp'] != delete_timestamp:
info['created_at'] != created_at or \
info['delete_timestamp'] != delete_timestamp:
timemark = time.time()
broker.merge_timestamps(
created_at, put_timestamp, delete_timestamp)
timespan = time.time() - timemark
if timespan > DEBUG_TIMINGS_THRESHOLD:
self.logger.debug(_('replicator-rpc-sync time for '
'merge_timestamps: %.02fs') % timespan)
'merge_timestamps: %.02fs') % timespan)
timemark = time.time()
info['point'] = broker.get_sync(id_)
timespan = time.time() - timemark
if timespan > DEBUG_TIMINGS_THRESHOLD:
self.logger.debug(_('replicator-rpc-sync time for get_sync: '
'%.02fs') % timespan)
'%.02fs') % timespan)
if hash_ == info['hash'] and info['point'] < remote_sync:
timemark = time.time()
broker.merge_syncs([{'remote_id': id_,
@ -602,7 +609,7 @@ class ReplicatorRpc(object):
timespan = time.time() - timemark
if timespan > DEBUG_TIMINGS_THRESHOLD:
self.logger.debug(_('replicator-rpc-sync time for '
'merge_syncs: %.02fs') % timespan)
'merge_syncs: %.02fs') % timespan)
return Response(simplejson.dumps(info))
def merge_syncs(self, broker, args):

View File

@ -91,12 +91,12 @@ class KeystoneAuth(object):
self.reseller_admin_role = conf.get('reseller_admin_role',
'ResellerAdmin')
config_is_admin = conf.get('is_admin', "false").lower()
self.is_admin = config_is_admin in swift_utils.TRUE_VALUES
self.is_admin = swift_utils.config_true_value(config_is_admin)
cfg_synchosts = conf.get('allowed_sync_hosts', '127.0.0.1')
self.allowed_sync_hosts = [h.strip() for h in cfg_synchosts.split(',')
if h.strip()]
config_overrides = conf.get('allow_overrides', 't').lower()
self.allow_overrides = config_overrides in swift_utils.TRUE_VALUES
self.allow_overrides = swift_utils.config_true_value(config_overrides)
def __call__(self, environ, start_response):
identity = self._keystone_identity(environ)

View File

@ -42,7 +42,7 @@ from urllib import quote, unquote
from swift.common.swob import Request
from swift.common.utils import (get_logger, get_remote_client,
get_valid_utf8_str, TRUE_VALUES)
get_valid_utf8_str, config_true_value)
class InputProxy(object):
@ -92,7 +92,7 @@ class ProxyLoggingMiddleware(object):
def __init__(self, app, conf):
self.app = app
self.log_hdrs = conf.get('log_headers', 'no').lower() in TRUE_VALUES
self.log_hdrs = config_true_value(conf.get('log_headers', 'no'))
# The leading access_* check is in case someone assumes that
# log_statsd_valid_http_methods behaves like the other log_statsd_*

View File

@ -17,7 +17,7 @@ import errno
import os
from swift.common.swob import Request, Response
from swift.common.utils import split_path, get_logger, TRUE_VALUES
from swift.common.utils import split_path, get_logger, config_true_value
from swift.common.constraints import check_mount
from resource import getpagesize
from hashlib import md5
@ -59,8 +59,7 @@ class ReconMiddleware(object):
self.object_ring_path = os.path.join(swift_dir, 'object.ring.gz')
self.rings = [self.account_ring_path, self.container_ring_path,
self.object_ring_path]
self.mount_check = conf.get('mount_check', 'true').lower() \
in TRUE_VALUES
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
def _from_recon_cache(self, cache_keys, cache_file, openr=open):
"""retrieve values from a recon cache file
@ -159,7 +158,7 @@ class ReconMiddleware(object):
if recon_type == 'object':
return self._from_recon_cache(['object_expiration_pass',
'expired_last_pass'],
self.object_recon_cache)
self.object_recon_cache)
def get_auditor_info(self, recon_type):
"""get auditor info"""
@ -186,8 +185,8 @@ class ReconMiddleware(object):
"""list unmounted (failed?) devices"""
mountlist = []
for entry in os.listdir(self.devices):
mpoint = {'device': entry, \
"mounted": check_mount(self.devices, entry)}
mpoint = {'device': entry,
'mounted': check_mount(self.devices, entry)}
if not mpoint['mounted']:
mountlist.append(mpoint)
return mountlist
@ -202,11 +201,12 @@ class ReconMiddleware(object):
capacity = disk.f_bsize * disk.f_blocks
available = disk.f_bsize * disk.f_bavail
used = disk.f_bsize * (disk.f_blocks - disk.f_bavail)
devices.append({'device': entry, 'mounted': True, \
'size': capacity, 'used': used, 'avail': available})
devices.append({'device': entry, 'mounted': True,
'size': capacity, 'used': used,
'avail': available})
else:
devices.append({'device': entry, 'mounted': False, \
'size': '', 'used': '', 'avail': ''})
devices.append({'device': entry, 'mounted': False,
'size': '', 'used': '', 'avail': ''})
return devices
def get_ring_md5(self, openr=open):

View File

@ -120,9 +120,9 @@ from urllib import unquote, quote as urllib_quote
from swift.common.utils import cache_from_env, get_logger, human_readable, \
split_path, TRUE_VALUES
split_path, config_true_value
from swift.common.wsgi import make_pre_authed_env, make_pre_authed_request, \
WSGIContext
WSGIContext
from swift.common.http import is_success, is_redirection, HTTP_NOT_FOUND
from swift.common.swob import Response, HTTPMovedPermanently, HTTPNotFound
@ -179,10 +179,11 @@ class _StaticWebContext(WSGIContext):
save_response_status = self._response_status
save_response_headers = self._response_headers
save_response_exc_info = self._response_exc_info
resp = self._app_call(make_pre_authed_env(env, 'GET',
'/%s/%s/%s/%s%s' % (self.version, self.account, self.container,
self._get_status_int(), self._error),
self.agent))
resp = self._app_call(make_pre_authed_env(
env, 'GET', '/%s/%s/%s/%s%s' % (
self.version, self.account, self.container,
self._get_status_int(), self._error),
self.agent))
if is_success(self._get_status_int()):
start_response(save_response_status, self._response_headers,
self._response_exc_info)
@ -210,9 +211,10 @@ class _StaticWebContext(WSGIContext):
(self._index, self._error, self._listings,
self._listings_css) = cached_data
return
resp = make_pre_authed_request(env, 'HEAD',
'/%s/%s/%s' % (self.version, self.account, self.container),
agent=self.agent).get_response(self.app)
resp = make_pre_authed_request(
env, 'HEAD', '/%s/%s/%s' % (
self.version, self.account, self.container),
agent=self.agent).get_response(self.app)
if is_success(resp.status_int):
self._index = \
resp.headers.get('x-container-meta-web-index', '').strip()
@ -225,9 +227,9 @@ class _StaticWebContext(WSGIContext):
'').strip()
if memcache_client:
memcache_client.set(memcache_key,
(self._index, self._error, self._listings,
self._listings_css),
timeout=self.cache_timeout)
(self._index, self._error, self._listings,
self._listings_css),
timeout=self.cache_timeout)
def _listing(self, env, start_response, prefix=None):
"""
@ -237,12 +239,13 @@ class _StaticWebContext(WSGIContext):
:param start_response: The original WSGI start_response hook.
:param prefix: Any prefix desired for the container listing.
"""
if self._listings.lower() not in TRUE_VALUES:
if not config_true_value(self._listings):
resp = HTTPNotFound()(env, self._start_response)
return self._error_response(resp, env, start_response)
tmp_env = make_pre_authed_env(env, 'GET',
'/%s/%s/%s' % (self.version, self.account, self.container),
self.agent)
tmp_env = make_pre_authed_env(
env, 'GET', '/%s/%s/%s' % (
self.version, self.account, self.container),
self.agent)
tmp_env['QUERY_STRING'] = 'delimiter=/&format=json'
if prefix:
tmp_env['QUERY_STRING'] += '&prefix=%s' % quote(prefix)
@ -260,14 +263,14 @@ class _StaticWebContext(WSGIContext):
return self._error_response(resp, env, start_response)
headers = {'Content-Type': 'text/html; charset=UTF-8'}
body = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 ' \
'Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\n' \
'Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\n' \
'<html>\n' \
' <head>\n' \
' <title>Listing of %s</title>\n' % \
cgi.escape(env['PATH_INFO'])
if self._listings_css:
body += ' <link rel="stylesheet" type="text/css" ' \
'href="%s" />\n' % (self._build_css_path(prefix))
'href="%s" />\n' % (self._build_css_path(prefix))
else:
body += ' <style type="text/css">\n' \
' h1 {font-size: 1em; font-weight: bold;}\n' \
@ -347,7 +350,7 @@ class _StaticWebContext(WSGIContext):
"""
self._get_container_info(env)
if not self._listings and not self._index:
if env.get('HTTP_X_WEB_MODE', 'f').lower() in TRUE_VALUES:
if config_true_value(env.get('HTTP_X_WEB_MODE', 'f')):
return HTTPNotFound()(env, start_response)
return self.app(env, start_response)
if env['PATH_INFO'][-1] != '/':
@ -366,7 +369,7 @@ class _StaticWebContext(WSGIContext):
if status_int == HTTP_NOT_FOUND:
return self._listing(env, start_response)
elif not is_success(self._get_status_int()) or \
not is_redirection(self._get_status_int()):
not is_redirection(self._get_status_int()):
return self._error_response(resp, env, start_response)
start_response(self._response_status, self._response_headers,
self._response_exc_info)
@ -415,10 +418,10 @@ class _StaticWebContext(WSGIContext):
return resp
if status_int == HTTP_NOT_FOUND:
if env['PATH_INFO'][-1] != '/':
tmp_env = make_pre_authed_env(env, 'GET',
'/%s/%s/%s' % (self.version, self.account,
self.container),
self.agent)
tmp_env = make_pre_authed_env(
env, 'GET', '/%s/%s/%s' % (
self.version, self.account, self.container),
self.agent)
tmp_env['QUERY_STRING'] = 'limit=1&format=json&delimiter' \
'=/&limit=1&prefix=%s' % quote(self.obj + '/')
resp = self._app_call(tmp_env)
@ -427,8 +430,7 @@ class _StaticWebContext(WSGIContext):
not json.loads(body):
resp = HTTPNotFound()(env, self._start_response)
return self._error_response(resp, env, start_response)
resp = HTTPMovedPermanently(location=env['PATH_INFO'] +
'/')
resp = HTTPMovedPermanently(location=env['PATH_INFO'] + '/')
self._log_response(env, resp.status_int)
return resp(env, start_response)
return self._listing(env, start_response, self.obj)
@ -522,7 +524,7 @@ class StaticWeb(object):
self.access_logger = get_logger(access_log_conf,
log_route='staticweb-access')
#: Indicates whether full HTTP headers should be logged or not.
self.log_headers = conf.get('log_headers', 'no').lower() in TRUE_VALUES
self.log_headers = config_true_value(conf.get('log_headers', 'no'))
def __call__(self, env, start_response):
"""
@ -547,7 +549,7 @@ class StaticWeb(object):
if env['REQUEST_METHOD'] not in ('HEAD', 'GET'):
return self.app(env, start_response)
if env.get('REMOTE_USER') and \
env.get('HTTP_X_WEB_MODE', 'f').lower() not in TRUE_VALUES:
not config_true_value(env.get('HTTP_X_WEB_MODE', 'f')):
return self.app(env, start_response)
if not container:
return self.app(env, start_response)

View File

@ -28,7 +28,7 @@ from swift.common.swob import HTTPBadRequest, HTTPForbidden, HTTPNotFound, \
from swift.common.middleware.acl import clean_acl, parse_acl, referrer_allowed
from swift.common.utils import cache_from_env, get_logger, get_remote_client, \
split_path, TRUE_VALUES
split_path, config_true_value
from swift.common.http import HTTP_CLIENT_CLOSED_REQUEST
@ -70,7 +70,7 @@ class TempAuth(object):
self.app = app
self.conf = conf
self.logger = get_logger(conf, log_route='tempauth')
self.log_headers = conf.get('log_headers', 'f').lower() in TRUE_VALUES
self.log_headers = config_true_value(conf.get('log_headers', 'f'))
self.reseller_prefix = conf.get('reseller_prefix', 'AUTH').strip()
if self.reseller_prefix and self.reseller_prefix[-1] != '_':
self.reseller_prefix += '_'
@ -88,8 +88,8 @@ class TempAuth(object):
h.strip()
for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
if h.strip()]
self.allow_overrides = \
conf.get('allow_overrides', 't').lower() in TRUE_VALUES
self.allow_overrides = config_true_value(
conf.get('allow_overrides', 't'))
self.users = {}
for conf_key in conf:
if conf_key.startswith('user_') or conf_key.startswith('user64_'):

View File

@ -85,6 +85,15 @@ if hash_conf.read('/etc/swift/swift.conf'):
TRUE_VALUES = set(('true', '1', 'yes', 'on', 't', 'y'))
def config_true_value(value):
"""
Returns True if the value is either True or a string in TRUE_VALUES.
Returns False otherwise.
"""
return value is True or \
(isinstance(value, basestring) and value.lower() in TRUE_VALUES)
def noop_libc_function(*args):
return 0

View File

@ -31,7 +31,7 @@ from urllib import unquote
from swift.common.swob import Request
from swift.common.utils import capture_stdio, disable_fallocate, \
drop_privileges, get_logger, NullLogger, TRUE_VALUES, \
drop_privileges, get_logger, NullLogger, config_true_value, \
validate_configuration
@ -66,20 +66,20 @@ def get_socket(conf, default_port=8080):
"""
bind_addr = (conf.get('bind_ip', '0.0.0.0'),
int(conf.get('bind_port', default_port)))
address_family = [addr[0] for addr in socket.getaddrinfo(bind_addr[0],
bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
if addr[0] in (socket.AF_INET, socket.AF_INET6)][0]
address_family = [addr[0] for addr in socket.getaddrinfo(
bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
if addr[0] in (socket.AF_INET, socket.AF_INET6)][0]
sock = None
retry_until = time.time() + 30
warn_ssl = False
while not sock and time.time() < retry_until:
try:
sock = listen(bind_addr, backlog=int(conf.get('backlog', 4096)),
family=address_family)
family=address_family)
if 'cert_file' in conf:
warn_ssl = True
sock = ssl.wrap_socket(sock, certfile=conf['cert_file'],
keyfile=conf['key_file'])
keyfile=conf['key_file'])
except socket.error, err:
if err.args[0] != errno.EADDRINUSE:
raise
@ -124,10 +124,11 @@ def run_wsgi(conf_file, app_section, *args, **kwargs):
logger = kwargs.pop('logger')
else:
logger = get_logger(conf, log_name,
log_to_console=kwargs.pop('verbose', False), log_route='wsgi')
log_to_console=kwargs.pop('verbose', False),
log_route='wsgi')
# disable fallocate if desired
if conf.get('disable_fallocate', 'no').lower() in TRUE_VALUES:
if config_true_value(conf.get('disable_fallocate', 'no')):
disable_fallocate()
# bind to address and port

View File

@ -23,7 +23,7 @@ import swift.common.db
from swift.container import server as container_server
from swift.common.db import ContainerBroker
from swift.common.utils import get_logger, audit_location_generator, \
TRUE_VALUES, dump_recon_cache
config_true_value, dump_recon_cache
from swift.common.daemon import Daemon
@ -34,13 +34,12 @@ class ContainerAuditor(Daemon):
self.conf = conf
self.logger = get_logger(conf, log_route='container-auditor')
self.devices = conf.get('devices', '/srv/node')
self.mount_check = conf.get('mount_check', 'true').lower() in \
('true', 't', '1', 'on', 'yes', 'y')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.interval = int(conf.get('interval', 1800))
self.container_passes = 0
self.container_failures = 0
swift.common.db.DB_PREALLOCATION = \
conf.get('db_preallocation', 'f').lower() in TRUE_VALUES
config_true_value(conf.get('db_preallocation', 'f'))
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = os.path.join(self.recon_cache_path, "container.recon")
@ -59,12 +58,11 @@ class ContainerAuditor(Daemon):
{'time': time.ctime(reported),
'pass': self.container_passes,
'fail': self.container_failures})
dump_recon_cache({'container_audits_since': reported,
'container_audits_passed':
self.container_passes,
'container_audits_failed':
self.container_failures},
self.rcache, self.logger)
dump_recon_cache(
{'container_audits_since': reported,
'container_audits_passed': self.container_passes,
'container_audits_failed': self.container_failures},
self.rcache, self.logger)
reported = time.time()
self.container_passes = 0
self.container_failures = 0
@ -99,7 +97,7 @@ class ContainerAuditor(Daemon):
self.logger.info(
_('Container audit "once" mode completed: %.02fs'), elapsed)
dump_recon_cache({'container_auditor_pass_completed': elapsed},
self.recon_container)
self.recon_container)
def container_audit(self, path):
"""
@ -121,5 +119,5 @@ class ContainerAuditor(Daemon):
self.logger.increment('failures')
self.container_failures += 1
self.logger.exception(_('ERROR Could not get container info %s'),
(broker.db_file))
broker.db_file)
self.logger.timing_since('timing', start_time)

View File

@ -28,7 +28,7 @@ import swift.common.db
from swift.common.db import ContainerBroker
from swift.common.utils import get_logger, get_param, hash_path, public, \
normalize_timestamp, storage_directory, split_path, validate_sync_to, \
TRUE_VALUES, validate_device_partition, json
config_true_value, validate_device_partition, json
from swift.common.constraints import CONTAINER_LISTING_LIMIT, \
check_mount, check_float, check_utf8, FORMAT2CONTENT_TYPE
from swift.common.bufferedhttp import http_connect
@ -53,21 +53,22 @@ class ContainerController(object):
def __init__(self, conf):
self.logger = get_logger(conf, log_route='container-server')
self.root = conf.get('devices', '/srv/node/')
self.mount_check = conf.get('mount_check', 'true').lower() in \
TRUE_VALUES
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.node_timeout = int(conf.get('node_timeout', 3))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.allowed_sync_hosts = [h.strip()
self.allowed_sync_hosts = [
h.strip()
for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
if h.strip()]
self.replicator_rpc = ReplicatorRpc(self.root, DATADIR,
ContainerBroker, self.mount_check, logger=self.logger)
self.replicator_rpc = ReplicatorRpc(
self.root, DATADIR, ContainerBroker, self.mount_check,
logger=self.logger)
self.auto_create_account_prefix = \
conf.get('auto_create_account_prefix') or '.'
if conf.get('allow_versions', 'f').lower() in TRUE_VALUES:
if config_true_value(conf.get('allow_versions', 'f')):
self.save_headers.append('x-versions-location')
swift.common.db.DB_PREALLOCATION = \
conf.get('db_preallocation', 'f').lower() in TRUE_VALUES
config_true_value(conf.get('db_preallocation', 'f'))
def _get_container_broker(self, drive, part, account, container):
"""
@ -103,7 +104,8 @@ class ContainerController(object):
account_ip, account_port = account_host.rsplit(':', 1)
new_path = '/' + '/'.join([account, container])
info = broker.get_info()
account_headers = {'x-put-timestamp': info['put_timestamp'],
account_headers = {
'x-put-timestamp': info['put_timestamp'],
'x-delete-timestamp': info['delete_timestamp'],
'x-object-count': info['object_count'],
'x-bytes-used': info['bytes_used'],
@ -113,16 +115,17 @@ class ContainerController(object):
account_headers['x-account-override-deleted'] = 'yes'
try:
with ConnectionTimeout(self.conn_timeout):
conn = http_connect(account_ip, account_port,
account_device, account_partition, 'PUT', new_path,
account_headers)
conn = http_connect(
account_ip, account_port, account_device,
account_partition, 'PUT', new_path, account_headers)
with Timeout(self.node_timeout):
account_response = conn.getresponse()
account_response.read()
if account_response.status == HTTP_NOT_FOUND:
return HTTPNotFound(request=req)
elif not is_success(account_response.status):
self.logger.error(_('ERROR Account update failed '
self.logger.error(_(
'ERROR Account update failed '
'with %(ip)s:%(port)s/%(device)s (will retry '
'later): Response %(status)s %(reason)s'),
{'ip': account_ip, 'port': account_port,
@ -130,7 +133,8 @@ class ContainerController(object):
'status': account_response.status,
'reason': account_response.reason})
except (Exception, Timeout):
self.logger.exception(_('ERROR account update failed with '
self.logger.exception(_(
'ERROR account update failed with '
'%(ip)s:%(port)s/%(device)s (will retry later)'),
{'ip': account_ip, 'port': account_port,
'device': account_device})
@ -147,12 +151,12 @@ class ContainerController(object):
except ValueError, err:
self.logger.increment('DELETE.errors')
return HTTPBadRequest(body=str(err), content_type='text/plain',
request=req)
request=req)
if 'x-timestamp' not in req.headers or \
not check_float(req.headers['x-timestamp']):
not check_float(req.headers['x-timestamp']):
self.logger.increment('DELETE.errors')
return HTTPBadRequest(body='Missing timestamp', request=req,
content_type='text/plain')
content_type='text/plain')
if self.mount_check and not check_mount(self.root, drive):
self.logger.increment('DELETE.errors')
return HTTPInsufficientStorage(drive=drive, request=req)
@ -174,7 +178,7 @@ class ContainerController(object):
self.logger.increment('DELETE.errors')
return HTTPConflict(request=req)
existed = float(broker.get_info()['put_timestamp']) and \
not broker.is_deleted()
not broker.is_deleted()
broker.delete_db(req.headers['X-Timestamp'])
if not broker.is_deleted():
self.logger.increment('DELETE.errors')
@ -198,12 +202,12 @@ class ContainerController(object):
except ValueError, err:
self.logger.increment('PUT.errors')
return HTTPBadRequest(body=str(err), content_type='text/plain',
request=req)
request=req)
if 'x-timestamp' not in req.headers or \
not check_float(req.headers['x-timestamp']):
not check_float(req.headers['x-timestamp']):
self.logger.increment('PUT.errors')
return HTTPBadRequest(body='Missing timestamp', request=req,
content_type='text/plain')
content_type='text/plain')
if 'x-container-sync-to' in req.headers:
err = validate_sync_to(req.headers['x-container-sync-to'],
self.allowed_sync_hosts)
@ -223,7 +227,8 @@ class ContainerController(object):
self.logger.timing_since('PUT.timing', start_time)
return HTTPNotFound()
broker.put_object(obj, timestamp, int(req.headers['x-size']),
req.headers['x-content-type'], req.headers['x-etag'])
req.headers['x-content-type'],
req.headers['x-etag'])
self.logger.timing_since('PUT.timing', start_time)
return HTTPCreated(request=req)
else: # put container
@ -237,10 +242,11 @@ class ContainerController(object):
self.logger.increment('PUT.errors')
return HTTPConflict(request=req)
metadata = {}
metadata.update((key, (value, timestamp))
metadata.update(
(key, (value, timestamp))
for key, value in req.headers.iteritems()
if key.lower() in self.save_headers or
key.lower().startswith('x-container-meta-'))
key.lower().startswith('x-container-meta-'))
if metadata:
if 'X-Container-Sync-To' in metadata:
if 'X-Container-Sync-To' not in broker.metadata or \
@ -268,7 +274,7 @@ class ContainerController(object):
except ValueError, err:
self.logger.increment('HEAD.errors')
return HTTPBadRequest(body=str(err), content_type='text/plain',
request=req)
request=req)
if self.mount_check and not check_mount(self.root, drive):
self.logger.increment('HEAD.errors')
return HTTPInsufficientStorage(drive=drive, request=req)
@ -285,7 +291,8 @@ class ContainerController(object):
'X-Timestamp': info['created_at'],
'X-PUT-Timestamp': info['put_timestamp'],
}
headers.update((key, value)
headers.update(
(key, value)
for key, (value, timestamp) in broker.metadata.iteritems()
if value != '' and (key.lower() in self.save_headers or
key.lower().startswith('x-container-meta-')))
@ -303,7 +310,7 @@ class ContainerController(object):
except ValueError, err:
self.logger.increment('GET.errors')
return HTTPBadRequest(body=str(err), content_type='text/plain',
request=req)
request=req)
if self.mount_check and not check_mount(self.root, drive):
self.logger.increment('GET.errors')
return HTTPInsufficientStorage(drive=drive, request=req)
@ -320,7 +327,8 @@ class ContainerController(object):
'X-Timestamp': info['created_at'],
'X-PUT-Timestamp': info['put_timestamp'],
}
resp_headers.update((key, value)
resp_headers.update(
(key, value)
for key, (value, timestamp) in broker.metadata.iteritems()
if value != '' and (key.lower() in self.save_headers or
key.lower().startswith('x-container-meta-')))
@ -338,7 +346,8 @@ class ContainerController(object):
if given_limit and given_limit.isdigit():
limit = int(given_limit)
if limit > CONTAINER_LISTING_LIMIT:
return HTTPPreconditionFailed(request=req,
return HTTPPreconditionFailed(
request=req,
body='Maximum limit is %d' % CONTAINER_LISTING_LIMIT)
query_format = get_param(req, 'format')
except UnicodeDecodeError, err:
@ -350,9 +359,9 @@ class ContainerController(object):
FORMAT2CONTENT_TYPE['plain'])
try:
out_content_type = req.accept.best_match(
['text/plain', 'application/json',
'application/xml', 'text/xml'],
default_match='text/plain')
['text/plain', 'application/json', 'application/xml',
'text/xml'],
default_match='text/plain')
except AssertionError, err:
self.logger.increment('GET.errors')
return HTTPBadRequest(body='bad accept header: %s' % req.accept,
@ -389,10 +398,11 @@ class ContainerController(object):
'</subdir>' % (name, name))
else:
content_type = saxutils.escape(content_type)
xml_output.append('<object><name>%s</name><hash>%s</hash>'\
'<bytes>%d</bytes><content_type>%s</content_type>'\
'<last_modified>%s</last_modified></object>' % \
(name, etag, size, content_type, created_at))
xml_output.append(
'<object><name>%s</name><hash>%s</hash>'
'<bytes>%d</bytes><content_type>%s</content_type>'
'<last_modified>%s</last_modified></object>' %
(name, etag, size, content_type, created_at))
container_list = ''.join([
'<?xml version="1.0" encoding="UTF-8"?>\n',
'<container name=%s>' % saxutils.quoteattr(container),
@ -421,7 +431,7 @@ class ContainerController(object):
except ValueError, err:
self.logger.increment('REPLICATE.errors')
return HTTPBadRequest(body=str(err), content_type='text/plain',
request=req)
request=req)
if self.mount_check and not check_mount(self.root, drive):
self.logger.increment('REPLICATE.errors')
return HTTPInsufficientStorage(drive=drive, request=req)
@ -450,7 +460,7 @@ class ContainerController(object):
not check_float(req.headers['x-timestamp']):
self.logger.increment('POST.errors')
return HTTPBadRequest(body='Missing or bad timestamp',
request=req, content_type='text/plain')
request=req, content_type='text/plain')
if 'x-container-sync-to' in req.headers:
err = validate_sync_to(req.headers['x-container-sync-to'],
self.allowed_sync_hosts)
@ -466,10 +476,10 @@ class ContainerController(object):
return HTTPNotFound(request=req)
timestamp = normalize_timestamp(req.headers['x-timestamp'])
metadata = {}
metadata.update((key, (value, timestamp))
for key, value in req.headers.iteritems()
metadata.update(
(key, (value, timestamp)) for key, value in req.headers.iteritems()
if key.lower() in self.save_headers or
key.lower().startswith('x-container-meta-'))
key.lower().startswith('x-container-meta-'))
if metadata:
if 'X-Container-Sync-To' in metadata:
if 'X-Container-Sync-To' not in broker.metadata or \
@ -497,8 +507,9 @@ class ContainerController(object):
else:
res = method(req)
except (Exception, Timeout):
self.logger.exception(_('ERROR __call__ error with %(method)s'
' %(path)s '), {'method': req.method, 'path': req.path})
self.logger.exception(_(
'ERROR __call__ error with %(method)s %(path)s '),
{'method': req.method, 'path': req.path})
res = HTTPInternalServerError(body=traceback.format_exc())
trans_time = '%.4f' % (time.time() - start_time)
log_message = '%s - - [%s] "%s %s" %s %s "%s" "%s" "%s" %s' % (

View File

@ -27,7 +27,7 @@ from swift.common.direct_client import direct_get_object
from swift.common.ring import Ring
from swift.common.db import ContainerBroker
from swift.common.utils import audit_location_generator, get_logger, \
hash_path, TRUE_VALUES, validate_sync_to, whataremyips
hash_path, config_true_value, validate_sync_to, whataremyips
from swift.common.daemon import Daemon
from swift.common.http import HTTP_UNAUTHORIZED, HTTP_NOT_FOUND
@ -144,8 +144,7 @@ class ContainerSync(Daemon):
self.devices = conf.get('devices', '/srv/node')
#: Indicates whether mount points should be verified as actual mount
#: points (normally true, false for tests and SAIO).
self.mount_check = \
conf.get('mount_check', 'true').lower() in TRUE_VALUES
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
#: Minimum time between full scans. This is to keep the daemon from
#: running wild on near empty systems.
self.interval = int(conf.get('interval', 300))
@ -154,7 +153,8 @@ class ContainerSync(Daemon):
#: it'll just be resumed next scan.
self.container_time = int(conf.get('container_time', 60))
#: The list of hosts we're allowed to send syncs to.
self.allowed_sync_hosts = [h.strip()
self.allowed_sync_hosts = [
h.strip()
for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
if h.strip()]
self.proxy = conf.get('sync_proxy')
@ -174,13 +174,13 @@ class ContainerSync(Daemon):
swift_dir = conf.get('swift_dir', '/etc/swift')
#: swift.common.ring.Ring for locating containers.
self.container_ring = container_ring or Ring(swift_dir,
ring_name='container')
ring_name='container')
#: swift.common.ring.Ring for locating objects.
self.object_ring = object_ring or Ring(swift_dir, ring_name='object')
self._myips = whataremyips()
self._myport = int(conf.get('bind_port', 6001))
swift.common.db.DB_PREALLOCATION = \
conf.get('db_preallocation', 'f').lower() in TRUE_VALUES
config_true_value(conf.get('db_preallocation', 'f'))
def run_forever(self):
"""
@ -351,9 +351,9 @@ class ContainerSync(Daemon):
if row['deleted']:
try:
delete_object(sync_to, name=row['name'],
headers={'x-timestamp': row['created_at'],
'x-container-sync-key': sync_key},
proxy=self.proxy)
headers={'x-timestamp': row['created_at'],
'x-container-sync-key': sync_key},
proxy=self.proxy)
except ClientException, err:
if err.http_status != HTTP_NOT_FOUND:
raise
@ -371,8 +371,8 @@ class ContainerSync(Daemon):
headers = body = None
for node in nodes:
try:
these_headers, this_body = direct_get_object(node,
part, info['account'], info['container'],
these_headers, this_body = direct_get_object(
node, part, info['account'], info['container'],
row['name'], resp_chunk_size=65536)
this_timestamp = float(these_headers['x-timestamp'])
if this_timestamp > timestamp:
@ -389,8 +389,9 @@ class ContainerSync(Daemon):
if timestamp < looking_for_timestamp:
if exc:
raise exc
raise Exception(_('Unknown exception trying to GET: '
'%(node)r %(account)r %(container)r %(object)r'),
raise Exception(
_('Unknown exception trying to GET: %(node)r '
'%(account)r %(container)r %(object)r'),
{'node': node, 'part': part,
'account': info['account'],
'container': info['container'],
@ -403,20 +404,21 @@ class ContainerSync(Daemon):
headers['x-timestamp'] = row['created_at']
headers['x-container-sync-key'] = sync_key
put_object(sync_to, name=row['name'], headers=headers,
contents=_Iter2FileLikeObject(body), proxy=self.proxy)
contents=_Iter2FileLikeObject(body),
proxy=self.proxy)
self.container_puts += 1
self.logger.increment('puts')
self.logger.timing_since('puts.timing', start_time)
except ClientException, err:
if err.http_status == HTTP_UNAUTHORIZED:
self.logger.info(_('Unauth %(sync_from)r '
'=> %(sync_to)r'),
self.logger.info(
_('Unauth %(sync_from)r => %(sync_to)r'),
{'sync_from': '%s/%s' %
(quote(info['account']), quote(info['container'])),
'sync_to': sync_to})
elif err.http_status == HTTP_NOT_FOUND:
self.logger.info(_('Not found %(sync_from)r '
'=> %(sync_to)r'),
self.logger.info(
_('Not found %(sync_from)r => %(sync_to)r'),
{'sync_from': '%s/%s' %
(quote(info['account']), quote(info['container'])),
'sync_to': sync_to})

View File

@ -29,7 +29,7 @@ from swift.common.bufferedhttp import http_connect
from swift.common.db import ContainerBroker
from swift.common.exceptions import ConnectionTimeout
from swift.common.ring import Ring
from swift.common.utils import get_logger, TRUE_VALUES, dump_recon_cache
from swift.common.utils import get_logger, config_true_value, dump_recon_cache
from swift.common.daemon import Daemon
from swift.common.http import is_success, HTTP_INTERNAL_SERVER_ERROR
@ -41,8 +41,7 @@ class ContainerUpdater(Daemon):
self.conf = conf
self.logger = get_logger(conf, log_route='container-updater')
self.devices = conf.get('devices', '/srv/node')
self.mount_check = conf.get('mount_check', 'true').lower() in \
('true', 't', '1', 'on', 'yes', 'y')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.interval = int(conf.get('interval', 300))
self.account_ring = None
@ -58,7 +57,7 @@ class ContainerUpdater(Daemon):
float(conf.get('account_suppression_time', 60))
self.new_account_suppressions = None
swift.common.db.DB_PREALLOCATION = \
conf.get('db_preallocation', 'f').lower() in TRUE_VALUES
config_true_value(conf.get('db_preallocation', 'f'))
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = os.path.join(self.recon_cache_path, "container.recon")
@ -112,7 +111,8 @@ class ContainerUpdater(Daemon):
begin = time.time()
now = time.time()
expired_suppressions = \
[a for a, u in self.account_suppressions.iteritems() if u < now]
[a for a, u in self.account_suppressions.iteritems()
if u < now]
for account in expired_suppressions:
del self.account_suppressions[account]
pid2filename = {}
@ -175,7 +175,8 @@ class ContainerUpdater(Daemon):
for path in self.get_paths():
self.container_sweep(path)
elapsed = time.time() - begin
self.logger.info(_('Container update single threaded sweep completed: '
self.logger.info(_(
'Container update single threaded sweep completed: '
'%(elapsed).02fs, %(success)s successes, %(fail)s failures, '
'%(no_change)s with no changes'),
{'elapsed': elapsed, 'success': self.successes,
@ -277,7 +278,8 @@ class ContainerUpdater(Daemon):
'X-Bytes-Used': bytes,
'X-Account-Override-Deleted': 'yes'})
except (Exception, Timeout):
self.logger.exception(_('ERROR account update failed with '
self.logger.exception(_(
'ERROR account update failed with '
'%(ip)s:%(port)s/%(device)s (will retry later): '), node)
return HTTP_INTERNAL_SERVER_ERROR
with Timeout(self.node_timeout):

View File

@ -20,7 +20,7 @@ from eventlet import Timeout
from swift.obj import server as object_server
from swift.common.utils import get_logger, audit_location_generator, \
ratelimit_sleep, TRUE_VALUES, dump_recon_cache
ratelimit_sleep, config_true_value, dump_recon_cache
from swift.common.exceptions import AuditException, DiskFileError, \
DiskFileNotExist
from swift.common.daemon import Daemon
@ -35,8 +35,7 @@ class AuditorWorker(object):
self.conf = conf
self.logger = get_logger(conf, log_route='object-auditor')
self.devices = conf.get('devices', '/srv/node')
self.mount_check = conf.get('mount_check', 'true').lower() in \
TRUE_VALUES
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.max_files_per_second = float(conf.get('files_per_second', 20))
self.max_bytes_per_second = float(conf.get('bytes_per_second',
10000000))

View File

@ -33,7 +33,7 @@ from eventlet.support.greenlets import GreenletExit
from swift.common.ring import Ring
from swift.common.utils import whataremyips, unlink_older_than, lock_path, \
compute_eta, get_logger, write_pickle, renamer, dump_recon_cache, \
rsync_ip, mkdirs, TRUE_VALUES, list_from_csv
rsync_ip, mkdirs, config_true_value, list_from_csv
from swift.common.bufferedhttp import http_connect
from swift.common.daemon import Daemon
from swift.common.http import HTTP_OK, HTTP_INSUFFICIENT_STORAGE
@ -246,10 +246,8 @@ class ObjectReplicator(Daemon):
self.conf = conf
self.logger = get_logger(conf, log_route='object-replicator')
self.devices_dir = conf.get('devices', '/srv/node')
self.mount_check = conf.get('mount_check', 'true').lower() in \
TRUE_VALUES
self.vm_test_mode = conf.get(
'vm_test_mode', 'no').lower() in TRUE_VALUES
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.vm_test_mode = config_true_value(conf.get('vm_test_mode', 'no'))
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.port = int(conf.get('bind_port', 6000))
self.concurrency = int(conf.get('concurrency', 1))

View File

@ -33,7 +33,7 @@ from eventlet import sleep, Timeout, tpool
from swift.common.utils import mkdirs, normalize_timestamp, public, \
storage_directory, hash_path, renamer, fallocate, fsync, \
split_path, drop_buffer_cache, get_logger, write_pickle, \
TRUE_VALUES, validate_device_partition
config_true_value, validate_device_partition
from swift.common.bufferedhttp import http_connect
from swift.common.constraints import check_object_creation, check_mount, \
check_float, check_utf8
@ -114,8 +114,8 @@ class DiskFile(object):
self.iter_hook = iter_hook
self.name = '/' + '/'.join((account, container, obj))
name_hash = hash_path(account, container, obj)
self.datadir = os.path.join(path, device,
storage_directory(DATADIR, partition, name_hash))
self.datadir = os.path.join(
path, device, storage_directory(DATADIR, partition, name_hash))
self.device_path = os.path.join(path, device)
self.tmpdir = os.path.join(path, device, 'tmp')
self.logger = logger
@ -172,7 +172,7 @@ class DiskFile(object):
read += len(chunk)
if read - dropped_cache > (1024 * 1024):
self.drop_cache(self.fp.fileno(), dropped_cache,
read - dropped_cache)
read - dropped_cache)
dropped_cache = read
yield chunk
if self.iter_hook:
@ -180,7 +180,7 @@ class DiskFile(object):
else:
self.read_to_eof = True
self.drop_cache(self.fp.fileno(), dropped_cache,
read - dropped_cache)
read - dropped_cache)
break
finally:
self.close()
@ -212,10 +212,10 @@ class DiskFile(object):
except DiskFileNotExist:
return
if (self.iter_etag and self.started_at_0 and self.read_to_eof and
'ETag' in self.metadata and
self.iter_etag.hexdigest() != self.metadata.get('ETag')):
self.quarantine()
if self.iter_etag and self.started_at_0 and self.read_to_eof and \
'ETag' in self.metadata and \
self.iter_etag.hexdigest() != self.metadata.get('ETag'):
self.quarantine()
def close(self, verify_file=True):
"""
@ -229,10 +229,11 @@ class DiskFile(object):
if verify_file:
self._handle_close_quarantine()
except (Exception, Timeout), e:
self.logger.error(_('ERROR DiskFile %(data_file)s in '
'%(data_dir)s close failure: %(exc)s : %(stack)'),
{'exc': e, 'stack': ''.join(traceback.format_stack()),
'data_file': self.data_file, 'data_dir': self.datadir})
self.logger.error(_(
'ERROR DiskFile %(data_file)s in '
'%(data_dir)s close failure: %(exc)s : %(stack)'),
{'exc': e, 'stack': ''.join(traceback.format_stack()),
'data_file': self.data_file, 'data_dir': self.datadir})
finally:
self.fp.close()
self.fp = None
@ -337,8 +338,9 @@ class DiskFile(object):
if 'Content-Length' in self.metadata:
metadata_size = int(self.metadata['Content-Length'])
if file_size != metadata_size:
raise DiskFileError('Content-Length of %s does not '
'match file size of %s' % (metadata_size, file_size))
raise DiskFileError(
'Content-Length of %s does not match file size '
'of %s' % (metadata_size, file_size))
return file_size
except OSError, err:
if err.errno != errno.ENOENT:
@ -358,17 +360,15 @@ class ObjectController(object):
"""
self.logger = get_logger(conf, log_route='object-server')
self.devices = conf.get('devices', '/srv/node/')
self.mount_check = conf.get('mount_check', 'true').lower() in \
TRUE_VALUES
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.node_timeout = int(conf.get('node_timeout', 3))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
self.keep_cache_size = int(conf.get('keep_cache_size', 5242880))
self.keep_cache_private = \
conf.get('keep_cache_private', 'false').lower() in TRUE_VALUES
self.log_requests = \
conf.get('log_requests', 'true').lower() in TRUE_VALUES
config_true_value(conf.get('keep_cache_private', 'false'))
self.log_requests = config_true_value(conf.get('log_requests', 'true'))
self.max_upload_time = int(conf.get('max_upload_time', 86400))
self.slow = int(conf.get('slow', 0))
self.bytes_per_sync = int(conf.get('mb_per_sync', 512)) * 1024 * 1024
@ -378,10 +378,10 @@ class ObjectController(object):
x-delete-at,
x-object-manifest,
'''
self.allowed_headers = set(i.strip().lower() for i in
conf.get('allowed_headers',
default_allowed_headers).split(',') if i.strip() and
i.strip().lower() not in DISALLOWED_HEADERS)
self.allowed_headers = set(
i.strip().lower() for i in
conf.get('allowed_headers', default_allowed_headers).split(',')
if i.strip() and i.strip().lower() not in DISALLOWED_HEADERS)
self.expiring_objects_account = \
(conf.get('auto_create_account_prefix') or '.') + \
'expiring_objects'
@ -410,20 +410,22 @@ class ObjectController(object):
with ConnectionTimeout(self.conn_timeout):
ip, port = host.rsplit(':', 1)
conn = http_connect(ip, port, contdevice, partition, op,
full_path, headers_out)
full_path, headers_out)
with Timeout(self.node_timeout):
response = conn.getresponse()
response.read()
if is_success(response.status):
return
else:
self.logger.error(_('ERROR Container update failed '
self.logger.error(_(
'ERROR Container update failed '
'(saving for async update later): %(status)d '
'response from %(ip)s:%(port)s/%(dev)s'),
{'status': response.status, 'ip': ip, 'port': port,
'dev': contdevice})
except (Exception, Timeout):
self.logger.exception(_('ERROR container update failed with '
self.logger.exception(_(
'ERROR container update failed with '
'%(ip)s:%(port)s/%(dev)s (saving for async update later)'),
{'ip': ip, 'port': port, 'dev': contdevice})
async_dir = os.path.join(self.devices, objdevice, ASYNCDIR)
@ -431,9 +433,9 @@ class ObjectController(object):
self.logger.increment('async_pendings')
write_pickle(
{'op': op, 'account': account, 'container': container,
'obj': obj, 'headers': headers_out},
'obj': obj, 'headers': headers_out},
os.path.join(async_dir, ohash[-3:], ohash + '-' +
normalize_timestamp(headers_out['x-timestamp'])),
normalize_timestamp(headers_out['x-timestamp'])),
os.path.join(self.devices, objdevice, 'tmp'))
def container_update(self, op, account, container, obj, headers_in,
@ -484,7 +486,8 @@ class ObjectController(object):
headers_out['x-size'] = '0'
headers_out['x-content-type'] = 'text/plain'
headers_out['x-etag'] = 'd41d8cd98f00b204e9800998ecf8427e'
self.async_update(op, self.expiring_objects_account,
self.async_update(
op, self.expiring_objects_account,
str(delete_at / self.expiring_objects_container_divisor *
self.expiring_objects_container_divisor),
'%s-%s/%s/%s' % (delete_at, account, container, obj),
@ -501,12 +504,12 @@ class ObjectController(object):
except ValueError, err:
self.logger.increment('POST.errors')
return HTTPBadRequest(body=str(err), request=request,
content_type='text/plain')
content_type='text/plain')
if 'x-timestamp' not in request.headers or \
not check_float(request.headers['x-timestamp']):
not check_float(request.headers['x-timestamp']):
self.logger.increment('POST.errors')
return HTTPBadRequest(body='Missing timestamp', request=request,
content_type='text/plain')
content_type='text/plain')
new_delete_at = int(request.headers.get('X-Delete-At') or 0)
if new_delete_at and new_delete_at < time.time():
self.logger.increment('POST.errors')
@ -533,7 +536,7 @@ class ObjectController(object):
return HTTPNotFound(request=request)
metadata = {'X-Timestamp': request.headers['x-timestamp']}
metadata.update(val for val in request.headers.iteritems()
if val[0].lower().startswith('x-object-meta-'))
if val[0].lower().startswith('x-object-meta-'))
for header_key in self.allowed_headers:
if header_key in request.headers:
header_caps = header_key.title()
@ -562,15 +565,15 @@ class ObjectController(object):
except ValueError, err:
self.logger.increment('PUT.errors')
return HTTPBadRequest(body=str(err), request=request,
content_type='text/plain')
content_type='text/plain')
if self.mount_check and not check_mount(self.devices, device):
self.logger.increment('PUT.errors')
return HTTPInsufficientStorage(drive=device, request=request)
if 'x-timestamp' not in request.headers or \
not check_float(request.headers['x-timestamp']):
not check_float(request.headers['x-timestamp']):
self.logger.increment('PUT.errors')
return HTTPBadRequest(body='Missing timestamp', request=request,
content_type='text/plain')
content_type='text/plain')
error_response = check_object_creation(request, obj)
if error_response:
self.logger.increment('PUT.errors')
@ -616,7 +619,7 @@ class ObjectController(object):
return HTTPClientDisconnect(request=request)
etag = etag.hexdigest()
if 'etag' in request.headers and \
request.headers['etag'].lower() != etag:
request.headers['etag'].lower() != etag:
return HTTPUnprocessableEntity(request=request)
metadata = {
'X-Timestamp': request.headers['x-timestamp'],
@ -625,8 +628,8 @@ class ObjectController(object):
'Content-Length': str(os.fstat(fd).st_size),
}
metadata.update(val for val in request.headers.iteritems()
if val[0].lower().startswith('x-object-meta-') and
len(val[0]) > 14)
if val[0].lower().startswith('x-object-meta-') and
len(val[0]) > 14)
for header_key in self.allowed_headers:
if header_key in request.headers:
header_caps = header_key.title()
@ -634,17 +637,19 @@ class ObjectController(object):
old_delete_at = int(file.metadata.get('X-Delete-At') or 0)
if old_delete_at != new_delete_at:
if new_delete_at:
self.delete_at_update('PUT', new_delete_at, account,
container, obj, request.headers, device)
self.delete_at_update(
'PUT', new_delete_at, account, container, obj,
request.headers, device)
if old_delete_at:
self.delete_at_update('DELETE', old_delete_at, account,
container, obj, request.headers, device)
self.delete_at_update(
'DELETE', old_delete_at, account, container, obj,
request.headers, device)
file.put(fd, tmppath, metadata)
file.unlinkold(metadata['X-Timestamp'])
if not orig_timestamp or \
orig_timestamp < request.headers['x-timestamp']:
self.container_update('PUT', account, container, obj,
request.headers,
self.container_update(
'PUT', account, container, obj, request.headers,
{'x-size': file.metadata['Content-Length'],
'x-content-type': file.metadata['Content-Type'],
'x-timestamp': file.metadata['X-Timestamp'],
@ -666,7 +671,7 @@ class ObjectController(object):
except ValueError, err:
self.logger.increment('GET.errors')
return HTTPBadRequest(body=str(err), request=request,
content_type='text/plain')
content_type='text/plain')
if self.mount_check and not check_mount(self.devices, device):
self.logger.increment('GET.errors')
return HTTPInsufficientStorage(drive=device, request=request)
@ -674,8 +679,9 @@ class ObjectController(object):
obj, self.logger, keep_data_fp=True,
disk_chunk_size=self.disk_chunk_size,
iter_hook=sleep)
if file.is_deleted() or ('X-Delete-At' in file.metadata and
int(file.metadata['X-Delete-At']) <= time.time()):
if file.is_deleted() or \
('X-Delete-At' in file.metadata and
int(file.metadata['X-Delete-At']) <= time.time()):
if request.headers.get('if-match') == '*':
self.logger.timing_since('GET.timing', start_time)
return HTTPPreconditionFailed(request=request)
@ -707,8 +713,9 @@ class ObjectController(object):
self.logger.increment('GET.errors')
return HTTPPreconditionFailed(request=request)
if if_unmodified_since and \
datetime.fromtimestamp(float(file.metadata['X-Timestamp']), UTC) > \
if_unmodified_since:
datetime.fromtimestamp(
float(file.metadata['X-Timestamp']), UTC) > \
if_unmodified_since:
file.close()
self.logger.timing_since('GET.timing', start_time)
return HTTPPreconditionFailed(request=request)
@ -719,15 +726,16 @@ class ObjectController(object):
self.logger.increment('GET.errors')
return HTTPPreconditionFailed(request=request)
if if_modified_since and \
datetime.fromtimestamp(float(file.metadata['X-Timestamp']), UTC) < \
if_modified_since:
datetime.fromtimestamp(
float(file.metadata['X-Timestamp']), UTC) < \
if_modified_since:
file.close()
self.logger.timing_since('GET.timing', start_time)
return HTTPNotModified(request=request)
response = Response(app_iter=file,
request=request, conditional_response=True)
response.headers['Content-Type'] = file.metadata.get('Content-Type',
'application/octet-stream')
request=request, conditional_response=True)
response.headers['Content-Type'] = file.metadata.get(
'Content-Type', 'application/octet-stream')
for key, value in file.metadata.iteritems():
if key.lower().startswith('x-object-meta-') or \
key.lower() in self.allowed_headers:
@ -765,8 +773,9 @@ class ObjectController(object):
return HTTPInsufficientStorage(drive=device, request=request)
file = DiskFile(self.devices, device, partition, account, container,
obj, self.logger, disk_chunk_size=self.disk_chunk_size)
if file.is_deleted() or ('X-Delete-At' in file.metadata and
int(file.metadata['X-Delete-At']) <= time.time()):
if file.is_deleted() or \
('X-Delete-At' in file.metadata and
int(file.metadata['X-Delete-At']) <= time.time()):
self.logger.timing_since('HEAD.timing', start_time)
return HTTPNotFound(request=request)
try:
@ -776,8 +785,8 @@ class ObjectController(object):
self.logger.timing_since('HEAD.timing', start_time)
return HTTPNotFound(request=request)
response = Response(request=request, conditional_response=True)
response.headers['Content-Type'] = file.metadata.get('Content-Type',
'application/octet-stream')
response.headers['Content-Type'] = file.metadata.get(
'Content-Type', 'application/octet-stream')
for key, value in file.metadata.iteritems():
if key.lower().startswith('x-object-meta-') or \
key.lower() in self.allowed_headers:
@ -803,12 +812,12 @@ class ObjectController(object):
except ValueError, e:
self.logger.increment('DELETE.errors')
return HTTPBadRequest(body=str(e), request=request,
content_type='text/plain')
content_type='text/plain')
if 'x-timestamp' not in request.headers or \
not check_float(request.headers['x-timestamp']):
not check_float(request.headers['x-timestamp']):
self.logger.increment('DELETE.errors')
return HTTPBadRequest(body='Missing timestamp', request=request,
content_type='text/plain')
content_type='text/plain')
if self.mount_check and not check_mount(self.devices, device):
self.logger.increment('DELETE.errors')
return HTTPInsufficientStorage(drive=device, request=request)
@ -819,7 +828,8 @@ class ObjectController(object):
int(request.headers['x-if-delete-at']) != \
int(file.metadata.get('X-Delete-At') or 0):
self.logger.timing_since('DELETE.timing', start_time)
return HTTPPreconditionFailed(request=request,
return HTTPPreconditionFailed(
request=request,
body='X-If-Delete-At and X-Delete-At do not match')
orig_timestamp = file.metadata.get('X-Timestamp')
if file.is_deleted():
@ -836,9 +846,10 @@ class ObjectController(object):
file.unlinkold(metadata['X-Timestamp'])
if not orig_timestamp or \
orig_timestamp < request.headers['x-timestamp']:
self.container_update('DELETE', account, container, obj,
request.headers, {'x-timestamp': metadata['X-Timestamp'],
'x-trans-id': request.headers.get('x-trans-id', '-')},
self.container_update(
'DELETE', account, container, obj, request.headers,
{'x-timestamp': metadata['X-Timestamp'],
'x-trans-id': request.headers.get('x-trans-id', '-')},
device)
resp = response_class(request=request)
self.logger.timing_since('DELETE.timing', start_time)
@ -889,7 +900,8 @@ class ObjectController(object):
else:
res = method(req)
except (Exception, Timeout):
self.logger.exception(_('ERROR __call__ error with %(method)s'
self.logger.exception(_(
'ERROR __call__ error with %(method)s'
' %(path)s '), {'method': req.method, 'path': req.path})
res = HTTPInternalServerError(body=traceback.format_exc())
trans_time = time.time() - start_time

View File

@ -26,7 +26,7 @@ from swift.common.bufferedhttp import http_connect
from swift.common.exceptions import ConnectionTimeout
from swift.common.ring import Ring
from swift.common.utils import get_logger, renamer, write_pickle, \
dump_recon_cache
dump_recon_cache, config_true_value
from swift.common.daemon import Daemon
from swift.obj.server import ASYNCDIR
from swift.common.http import is_success, HTTP_NOT_FOUND, \
@ -40,8 +40,7 @@ class ObjectUpdater(Daemon):
self.conf = conf
self.logger = get_logger(conf, log_route='object-updater')
self.devices = conf.get('devices', '/srv/node')
self.mount_check = conf.get('mount_check', 'true').lower() in \
('true', 't', '1', 'on', 'yes', 'y')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.interval = int(conf.get('interval', 300))
self.container_ring = None

View File

@ -31,7 +31,7 @@ from eventlet import spawn_n, GreenPile, Timeout
from eventlet.queue import Queue, Empty, Full
from eventlet.timeout import Timeout
from swift.common.utils import normalize_timestamp, TRUE_VALUES, public
from swift.common.utils import normalize_timestamp, config_true_value, public
from swift.common.bufferedhttp import http_connect
from swift.common.constraints import MAX_ACCOUNT_NAME_LENGTH
from swift.common.exceptions import ChunkReadTimeout, ConnectionTimeout
@ -604,7 +604,7 @@ class Controller(object):
reasons = []
bodies = []
sources = []
newest = req.headers.get('x-newest', 'f').lower() in TRUE_VALUES
newest = config_true_value(req.headers.get('x-newest', 'f'))
nodes = iter(nodes)
while len(statuses) < attempts:
try:

View File

@ -40,8 +40,8 @@ from eventlet import sleep, GreenPile, Timeout
from eventlet.queue import Queue
from eventlet.timeout import Timeout
from swift.common.utils import ContextPool, normalize_timestamp, TRUE_VALUES, \
public
from swift.common.utils import ContextPool, normalize_timestamp, \
config_true_value, public
from swift.common.bufferedhttp import http_connect
from swift.common.constraints import check_metadata, check_object_creation, \
CONTAINER_LISTING_LIMIT, MAX_FILE_SIZE
@ -117,12 +117,14 @@ class SegmentedIterable(object):
self.next_get_time = time.time() + \
1.0 / self.controller.app.rate_limit_segments_per_sec
shuffle(nodes)
resp = self.controller.GETorHEAD_base(req, _('Object'), partition,
resp = self.controller.GETorHEAD_base(
req, _('Object'), partition,
self.controller.iter_nodes(partition, nodes,
self.controller.app.object_ring), path,
len(nodes))
self.controller.app.object_ring),
path, len(nodes))
if not is_success(resp.status_int):
raise Exception(_('Could not load object segment %(path)s:' \
raise Exception(_(
'Could not load object segment %(path)s:'
' %(status)s') % {'path': path, 'status': resp.status_int})
self.segment_iter = resp.app_iter
# See NOTE: swift_conn at top of file about this.
@ -131,8 +133,9 @@ class SegmentedIterable(object):
raise
except (Exception, Timeout), err:
if not getattr(err, 'swift_logged', False):
self.controller.app.logger.exception(_('ERROR: While '
'processing manifest /%(acc)s/%(cont)s/%(obj)s'),
self.controller.app.logger.exception(_(
'ERROR: While processing manifest '
'/%(acc)s/%(cont)s/%(obj)s'),
{'acc': self.controller.account_name,
'cont': self.controller.container_name,
'obj': self.controller.object_name})
@ -162,8 +165,9 @@ class SegmentedIterable(object):
raise
except (Exception, Timeout), err:
if not getattr(err, 'swift_logged', False):
self.controller.app.logger.exception(_('ERROR: While '
'processing manifest /%(acc)s/%(cont)s/%(obj)s'),
self.controller.app.logger.exception(_(
'ERROR: While processing manifest '
'/%(acc)s/%(cont)s/%(obj)s'),
{'acc': self.controller.account_name,
'cont': self.controller.container_name,
'obj': self.controller.object_name})
@ -220,8 +224,9 @@ class SegmentedIterable(object):
raise
except (Exception, Timeout), err:
if not getattr(err, 'swift_logged', False):
self.controller.app.logger.exception(_('ERROR: While '
'processing manifest /%(acc)s/%(cont)s/%(obj)s'),
self.controller.app.logger.exception(_(
'ERROR: While processing manifest '
'/%(acc)s/%(cont)s/%(obj)s'),
{'acc': self.controller.account_name,
'cont': self.controller.container_name,
'obj': self.controller.object_name})
@ -255,8 +260,8 @@ class ObjectController(Controller):
'format=json&prefix=%s&marker=%s' % (quote(lprefix),
quote(marker))
shuffle(lnodes)
lresp = self.GETorHEAD_base(lreq, _('Container'),
lpartition, lnodes, lreq.path_info,
lresp = self.GETorHEAD_base(
lreq, _('Container'), lpartition, lnodes, lreq.path_info,
len(lnodes))
if 'swift.authorize' in env:
lreq.acl = lresp.headers.get('x-container-read')
@ -300,9 +305,10 @@ class ObjectController(Controller):
partition, nodes = self.app.object_ring.get_nodes(
self.account_name, self.container_name, self.object_name)
shuffle(nodes)
resp = self.GETorHEAD_base(req, _('Object'), partition,
self.iter_nodes(partition, nodes, self.app.object_ring),
req.path_info, len(nodes))
resp = self.GETorHEAD_base(
req, _('Object'), partition,
self.iter_nodes(partition, nodes, self.app.object_ring),
req.path_info, len(nodes))
if 'x-object-manifest' in resp.headers:
lcontainer, lprefix = \
@ -311,7 +317,7 @@ class ObjectController(Controller):
lprefix = unquote(lprefix)
try:
listing = list(self._listing_iter(lcontainer, lprefix,
req.environ))
req.environ))
except ListingIterNotFound:
return HTTPNotFound(request=req)
except ListingIterNotAuthorized, err:
@ -337,7 +343,8 @@ class ObjectController(Controller):
head_response.status_int = resp.status_int
return head_response
else:
resp.app_iter = SegmentedIterable(self, lcontainer,
resp.app_iter = SegmentedIterable(
self, lcontainer,
self._listing_iter(lcontainer, lprefix, req.environ),
resp)
@ -348,7 +355,7 @@ class ObjectController(Controller):
content_length = sum(o['bytes'] for o in listing)
last_modified = max(o['last_modified'] for o in listing)
last_modified = datetime(*map(int, re.split('[^\d]',
last_modified)[:-1]))
last_modified)[:-1]))
etag = md5(
''.join(o['hash'] for o in listing)).hexdigest()
else:
@ -396,11 +403,11 @@ class ObjectController(Controller):
req.headers['x-delete-at'] = '%d' % (time.time() + x_delete_after)
if self.app.object_post_as_copy:
req.method = 'PUT'
req.path_info = '/%s/%s/%s' % (self.account_name,
self.container_name, self.object_name)
req.path_info = '/%s/%s/%s' % (
self.account_name, self.container_name, self.object_name)
req.headers['Content-Length'] = 0
req.headers['X-Copy-From'] = quote('/%s/%s' % (self.container_name,
self.object_name))
self.object_name))
req.headers['X-Fresh-Metadata'] = 'true'
req.environ['swift_versioned_copy'] = True
resp = self.PUT(req)
@ -430,13 +437,15 @@ class ObjectController(Controller):
try:
x_delete_at = int(req.headers['x-delete-at'])
if x_delete_at < time.time():
return HTTPBadRequest(body='X-Delete-At in past',
request=req, content_type='text/plain')
return HTTPBadRequest(
body='X-Delete-At in past', request=req,
content_type='text/plain')
except ValueError:
return HTTPBadRequest(request=req,
content_type='text/plain',
body='Non-integer X-Delete-At')
delete_at_container = str(x_delete_at /
delete_at_container = str(
x_delete_at /
self.app.expiring_objects_container_divisor *
self.app.expiring_objects_container_divisor)
delete_at_part, delete_at_nodes = \
@ -475,7 +484,7 @@ class ObjectController(Controller):
except (Exception, ChunkWriteTimeout):
conn.failed = True
self.exception_occurred(conn.node, _('Object'),
_('Trying to write to %s') % path)
_('Trying to write to %s') % path)
conn.queue.task_done()
def _connect_put_node(self, nodes, part, path, headers,
@ -485,8 +494,9 @@ class ObjectController(Controller):
for node in nodes:
try:
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(node['ip'], node['port'],
node['device'], part, 'PUT', path, headers)
conn = http_connect(
node['ip'], node['port'], node['device'], part, 'PUT',
path, headers)
with Timeout(self.app.node_timeout):
resp = conn.getexpect()
if resp.status == HTTP_CONTINUE:
@ -501,7 +511,7 @@ class ObjectController(Controller):
self.error_limit(node)
except:
self.exception_occurred(node, _('Object'),
_('Expect: 100-continue on %s') % path)
_('Expect: 100-continue on %s') % path)
@public
@delay_denial
@ -533,12 +543,14 @@ class ObjectController(Controller):
try:
x_delete_at = int(req.headers['x-delete-at'])
if x_delete_at < time.time():
return HTTPBadRequest(body='X-Delete-At in past',
request=req, content_type='text/plain')
return HTTPBadRequest(
body='X-Delete-At in past', request=req,
content_type='text/plain')
except ValueError:
return HTTPBadRequest(request=req, content_type='text/plain',
body='Non-integer X-Delete-At')
delete_at_container = str(x_delete_at /
delete_at_container = str(
x_delete_at /
self.app.expiring_objects_container_divisor *
self.app.expiring_objects_container_divisor)
delete_at_part, delete_at_nodes = \
@ -549,12 +561,13 @@ class ObjectController(Controller):
partition, nodes = self.app.object_ring.get_nodes(
self.account_name, self.container_name, self.object_name)
# do a HEAD request for container sync and checking object versions
if 'x-timestamp' in req.headers or (object_versions and not
req.environ.get('swift_versioned_copy')):
if 'x-timestamp' in req.headers or \
(object_versions and not
req.environ.get('swift_versioned_copy')):
hreq = Request.blank(req.path_info, headers={'X-Newest': 'True'},
environ={'REQUEST_METHOD': 'HEAD'})
hresp = self.GETorHEAD_base(hreq, _('Object'), partition, nodes,
hreq.path_info, len(nodes))
hreq.path_info, len(nodes))
# Used by container sync feature
if 'x-timestamp' in req.headers:
try:
@ -565,7 +578,8 @@ class ObjectController(Controller):
float(req.headers['x-timestamp']):
return HTTPAccepted(request=req)
except ValueError:
return HTTPBadRequest(request=req, content_type='text/plain',
return HTTPBadRequest(
request=req, content_type='text/plain',
body='X-Timestamp should be a UNIX timestamp float value; '
'was %r' % req.headers['x-timestamp'])
else:
@ -575,7 +589,7 @@ class ObjectController(Controller):
if not req.headers.get('content-type'):
guessed_type, _junk = mimetypes.guess_type(req.path_info)
req.headers['Content-Type'] = guessed_type or \
'application/octet-stream'
'application/octet-stream'
content_type_manually_set = False
error_response = check_object_creation(req, self.object_name)
if error_response:
@ -602,9 +616,9 @@ class ObjectController(Controller):
'Destination': '%s/%s' % (lcontainer, vers_obj_name)}
copy_environ = {'REQUEST_METHOD': 'COPY',
'swift_versioned_copy': True
}
}
copy_req = Request.blank(req.path_info, headers=copy_headers,
environ=copy_environ)
environ=copy_environ)
copy_resp = self.COPY(copy_req)
if is_client_error(copy_resp.status_int):
# missing container or bad permissions
@ -629,9 +643,10 @@ class ObjectController(Controller):
src_container_name, src_obj_name = \
source_header.split('/', 3)[2:]
except ValueError:
return HTTPPreconditionFailed(request=req,
return HTTPPreconditionFailed(
request=req,
body='X-Copy-From header must be of the form'
'<container name>/<object name>')
'<container name>/<object name>')
source_req = req.copy_get()
source_req.path_info = source_header
source_req.headers['X-Newest'] = 'true'
@ -645,7 +660,7 @@ class ObjectController(Controller):
self.object_name = orig_obj_name
self.container_name = orig_container_name
new_req = Request.blank(req.path_info,
environ=req.environ, headers=req.headers)
environ=req.environ, headers=req.headers)
data_source = source_resp.app_iter
new_req.content_length = source_resp.content_length
if new_req.content_length is None:
@ -660,8 +675,8 @@ class ObjectController(Controller):
if not content_type_manually_set:
new_req.headers['Content-Type'] = \
source_resp.headers['Content-Type']
if new_req.headers.get('x-fresh-metadata', 'false').lower() \
not in TRUE_VALUES:
if not config_true_value(
new_req.headers.get('x-fresh-metadata', 'false')):
for k, v in source_resp.headers.items():
if k.lower().startswith('x-object-meta-'):
new_req.headers[k] = v
@ -692,7 +707,7 @@ class ObjectController(Controller):
if len(conns) <= len(nodes) / 2:
self.app.logger.error(
_('Object PUT returning 503, %(conns)s/%(nodes)s '
'required connections'),
'required connections'),
{'conns': len(conns), 'nodes': len(nodes) // 2 + 1})
return HTTPServiceUnavailable(request=req)
bytes_transferred = 0
@ -715,12 +730,14 @@ class ObjectController(Controller):
return HTTPRequestEntityTooLarge(request=req)
for conn in list(conns):
if not conn.failed:
conn.queue.put('%x\r\n%s\r\n' % (len(chunk), chunk)
if chunked else chunk)
conn.queue.put(
'%x\r\n%s\r\n' % (len(chunk), chunk)
if chunked else chunk)
else:
conns.remove(conn)
if len(conns) <= len(nodes) / 2:
self.app.logger.error(_('Object PUT exceptions during'
self.app.logger.error(_(
'Object PUT exceptions during'
' send, %(conns)s/%(nodes)s required connections'),
{'conns': len(conns), 'nodes': len(nodes) / 2 + 1})
return HTTPServiceUnavailable(request=req)
@ -758,14 +775,17 @@ class ObjectController(Controller):
reasons.append(response.reason)
bodies.append(response.read())
if response.status >= HTTP_INTERNAL_SERVER_ERROR:
self.error_occurred(conn.node,
_('ERROR %(status)d %(body)s From Object Server ' \
're: %(path)s') % {'status': response.status,
'body': bodies[-1][:1024], 'path': req.path})
self.error_occurred(
conn.node,
_('ERROR %(status)d %(body)s From Object Server '
're: %(path)s') %
{'status': response.status,
'body': bodies[-1][:1024], 'path': req.path})
elif is_success(response.status):
etags.add(response.getheader('etag').strip('"'))
except (Exception, Timeout):
self.exception_occurred(conn.node, _('Object'),
self.exception_occurred(
conn.node, _('Object'),
_('Trying to get final status of PUT to %s') % req.path)
if len(etags) > 1:
self.app.logger.error(
@ -777,10 +797,10 @@ class ObjectController(Controller):
reasons.append('')
bodies.append('')
resp = self.best_response(req, statuses, reasons, bodies,
_('Object PUT'), etag=etag)
_('Object PUT'), etag=etag)
if source_header:
resp.headers['X-Copied-From'] = quote(
source_header.split('/', 2)[2])
source_header.split('/', 2)[2])
if 'last-modified' in source_resp.headers:
resp.headers['X-Copied-From-Last-Modified'] = \
source_resp.headers['last-modified']
@ -829,12 +849,12 @@ class ObjectController(Controller):
self.container_name + '/' + self.object_name
copy_headers = {'X-Newest': 'True',
'Destination': orig_container + '/' + orig_obj
}
}
copy_environ = {'REQUEST_METHOD': 'COPY',
'swift_versioned_copy': True
}
}
creq = Request.blank(copy_path, headers=copy_headers,
environ=copy_environ)
environ=copy_environ)
copy_resp = self.COPY(creq)
if is_client_error(copy_resp.status_int):
# some user error, maybe permissions
@ -867,7 +887,8 @@ class ObjectController(Controller):
req.headers['X-Timestamp'] = \
normalize_timestamp(float(req.headers['x-timestamp']))
except ValueError:
return HTTPBadRequest(request=req, content_type='text/plain',
return HTTPBadRequest(
request=req, content_type='text/plain',
body='X-Timestamp should be a UNIX timestamp float value; '
'was %r' % req.headers['x-timestamp'])
else:
@ -881,7 +902,7 @@ class ObjectController(Controller):
nheaders['X-Container-Device'] = container['device']
headers.append(nheaders)
resp = self.make_requests(req, self.app.object_ring,
partition, 'DELETE', req.path_info, headers)
partition, 'DELETE', req.path_info, headers)
return resp
@public
@ -898,9 +919,10 @@ class ObjectController(Controller):
try:
_junk, dest_container, dest_object = dest.split('/', 2)
except ValueError:
return HTTPPreconditionFailed(request=req,
body='Destination header must be of the form '
'<container name>/<object name>')
return HTTPPreconditionFailed(
request=req,
body='Destination header must be of the form '
'<container name>/<object name>')
source = '/' + self.container_name + '/' + self.object_name
self.container_name = dest_container
self.object_name = dest_object

View File

@ -34,7 +34,7 @@ from eventlet import Timeout
from swift.common.ring import Ring
from swift.common.utils import cache_from_env, get_logger, \
get_remote_client, split_path, TRUE_VALUES
get_remote_client, split_path, config_true_value
from swift.common.constraints import check_utf8
from swift.proxy.controllers import AccountController, ObjectController, \
ContainerController, Controller
@ -72,9 +72,9 @@ class Application(object):
self.recheck_account_existence = \
int(conf.get('recheck_account_existence', 60))
self.allow_account_management = \
conf.get('allow_account_management', 'no').lower() in TRUE_VALUES
config_true_value(conf.get('allow_account_management', 'no'))
self.object_post_as_copy = \
conf.get('object_post_as_copy', 'true').lower() in TRUE_VALUES
config_true_value(conf.get('object_post_as_copy', 'true'))
self.resellers_conf = ConfigParser()
self.resellers_conf.read(os.path.join(swift_dir, 'resellers.conf'))
self.object_ring = object_ring or Ring(swift_dir, ring_name='object')
@ -86,7 +86,7 @@ class Application(object):
mimetypes.init(mimetypes.knownfiles +
[os.path.join(swift_dir, 'mime.types')])
self.account_autocreate = \
conf.get('account_autocreate', 'no').lower() in TRUE_VALUES
config_true_value(conf.get('account_autocreate', 'no'))
self.expiring_objects_account = \
(conf.get('auto_create_account_prefix') or '.') + \
'expiring_objects'
@ -105,8 +105,7 @@ class Application(object):
int(conf.get('rate_limit_after_segment', 10))
self.rate_limit_segments_per_sec = \
int(conf.get('rate_limit_segments_per_sec', 1))
self.log_handoffs = \
conf.get('log_handoffs', 'true').lower() in TRUE_VALUES
self.log_handoffs = config_true_value(conf.get('log_handoffs', 'true'))
self.cors_allow_origin = [
a.strip()
for a in conf.get('cors_allow_origin', '').split(',')

View File

@ -14,7 +14,7 @@ from shutil import rmtree
from test import get_config
from ConfigParser import MissingSectionHeaderError
from StringIO import StringIO
from swift.common.utils import readconf, TRUE_VALUES
from swift.common.utils import readconf, config_true_value
from logging import Handler
import logging.handlers
@ -198,7 +198,7 @@ def fake_syslog_handler():
logging.handlers.SysLogHandler = FakeLogger
if get_config('unit_test').get('fake_syslog', 'False').lower() in TRUE_VALUES:
if config_true_value(get_config('unit_test').get('fake_syslog', 'False')):
fake_syslog_handler()

View File

@ -901,6 +901,18 @@ log_name = %(yarr)s'''
for v in utils.TRUE_VALUES:
self.assertEquals(v, v.lower())
def test_config_true_value(self):
orig_trues = utils.TRUE_VALUES
try:
utils.TRUE_VALUES = 'hello world'.split()
for val in 'hello world HELLO WORLD'.split():
self.assertTrue(utils.config_true_value(val) is True)
self.assertTrue(utils.config_true_value(True) is True)
self.assertTrue(utils.config_true_value('foo') is False)
self.assertTrue(utils.config_true_value(False) is False)
finally:
utils.TRUE_VALUES = orig_trues
def test_streq_const_time(self):
self.assertTrue(utils.streq_const_time('abc123', 'abc123'))
self.assertFalse(utils.streq_const_time('a', 'aaaaa'))