Merge "changed TRUE_VALUES references to utils.config_true_value() call"
This commit is contained in:
commit
c7948ec5d9
@ -23,7 +23,7 @@ from optparse import OptionParser
|
|||||||
|
|
||||||
from swift.common.bench import (BenchController, DistributedBenchController,
|
from swift.common.bench import (BenchController, DistributedBenchController,
|
||||||
create_containers, delete_containers)
|
create_containers, delete_containers)
|
||||||
from swift.common.utils import readconf, LogAdapter, TRUE_VALUES
|
from swift.common.utils import readconf, LogAdapter, config_true_value
|
||||||
|
|
||||||
# The defaults should be sufficient to run swift-bench on a SAIO
|
# The defaults should be sufficient to run swift-bench on a SAIO
|
||||||
CONF_DEFAULTS = {
|
CONF_DEFAULTS = {
|
||||||
@ -138,7 +138,7 @@ if __name__ == '__main__':
|
|||||||
options.containers = ['%s_%d' % (options.container_name, i)
|
options.containers = ['%s_%d' % (options.container_name, i)
|
||||||
for i in xrange(int(options.num_containers))]
|
for i in xrange(int(options.num_containers))]
|
||||||
# check boolean options vs config parameter values
|
# check boolean options vs config parameter values
|
||||||
if str(options.delete).lower() in TRUE_VALUES:
|
if config_true_value(str(options.delete).lower()):
|
||||||
options.delete = 'yes'
|
options.delete = 'yes'
|
||||||
else:
|
else:
|
||||||
options.delete = 'no'
|
options.delete = 'no'
|
||||||
@ -170,5 +170,5 @@ if __name__ == '__main__':
|
|||||||
controller = controller_class(logger, options)
|
controller = controller_class(logger, options)
|
||||||
controller.run()
|
controller.run()
|
||||||
|
|
||||||
if options.delete.lower() in TRUE_VALUES:
|
if config_true_value(options.delete.lower()):
|
||||||
delete_containers(logger, options)
|
delete_containers(logger, options)
|
||||||
|
@ -30,7 +30,7 @@ from eventlet.pools import Pool
|
|||||||
from swift.common import direct_client
|
from swift.common import direct_client
|
||||||
from swiftclient import ClientException, Connection, get_auth
|
from swiftclient import ClientException, Connection, get_auth
|
||||||
from swift.common.ring import Ring
|
from swift.common.ring import Ring
|
||||||
from swift.common.utils import compute_eta, get_time_units, TRUE_VALUES
|
from swift.common.utils import compute_eta, get_time_units, config_true_value
|
||||||
|
|
||||||
|
|
||||||
unmounted = []
|
unmounted = []
|
||||||
@ -45,7 +45,8 @@ def get_error_log(prefix):
|
|||||||
global debug, unmounted, notfound
|
global debug, unmounted, notfound
|
||||||
if hasattr(msg_or_exc, 'http_status'):
|
if hasattr(msg_or_exc, 'http_status'):
|
||||||
identifier = '%s:%s/%s' % (msg_or_exc.http_host,
|
identifier = '%s:%s/%s' % (msg_or_exc.http_host,
|
||||||
msg_or_exc.http_port, msg_or_exc.http_device)
|
msg_or_exc.http_port,
|
||||||
|
msg_or_exc.http_device)
|
||||||
if msg_or_exc.http_status == 507:
|
if msg_or_exc.http_status == 507:
|
||||||
if identifier not in unmounted:
|
if identifier not in unmounted:
|
||||||
unmounted.append(identifier)
|
unmounted.append(identifier)
|
||||||
@ -68,8 +69,8 @@ def get_error_log(prefix):
|
|||||||
def container_dispersion_report(coropool, connpool, account, container_ring,
|
def container_dispersion_report(coropool, connpool, account, container_ring,
|
||||||
retries):
|
retries):
|
||||||
with connpool.item() as conn:
|
with connpool.item() as conn:
|
||||||
containers = [c['name'] for c in conn.get_account(prefix='dispersion_',
|
containers = [c['name'] for c in conn.get_account(
|
||||||
full_listing=True)[1]]
|
prefix='dispersion_', full_listing=True)[1]]
|
||||||
containers_listed = len(containers)
|
containers_listed = len(containers)
|
||||||
if not containers_listed:
|
if not containers_listed:
|
||||||
print >>stderr, 'No containers to query. Has ' \
|
print >>stderr, 'No containers to query. Has ' \
|
||||||
@ -88,9 +89,8 @@ def container_dispersion_report(coropool, connpool, account, container_ring,
|
|||||||
error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node)
|
error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node)
|
||||||
try:
|
try:
|
||||||
attempts, _junk = direct_client.retry(
|
attempts, _junk = direct_client.retry(
|
||||||
direct_client.direct_head_container, node,
|
direct_client.direct_head_container, node, part, account,
|
||||||
part, account, container, error_log=error_log,
|
container, error_log=error_log, retries=retries)
|
||||||
retries=retries)
|
|
||||||
retries_done[0] += attempts - 1
|
retries_done[0] += attempts - 1
|
||||||
found_count += 1
|
found_count += 1
|
||||||
except ClientException, err:
|
except ClientException, err:
|
||||||
@ -134,11 +134,12 @@ def container_dispersion_report(coropool, connpool, account, container_ring,
|
|||||||
missing_copies = container_ring.replica_count - copies
|
missing_copies = container_ring.replica_count - copies
|
||||||
if container_copies_found[copies]:
|
if container_copies_found[copies]:
|
||||||
print missing_string(container_copies_found[copies],
|
print missing_string(container_copies_found[copies],
|
||||||
missing_copies, container_ring.replica_count)
|
missing_copies,
|
||||||
|
container_ring.replica_count)
|
||||||
print '%.02f%% of container copies found (%d of %d)' % (
|
print '%.02f%% of container copies found (%d of %d)' % (
|
||||||
value, copies_found, copies_expected)
|
value, copies_found, copies_expected)
|
||||||
print 'Sample represents %.02f%% of the container partition space' % (
|
print 'Sample represents %.02f%% of the container partition space' % (
|
||||||
100.0 * distinct_partitions / container_ring.partition_count)
|
100.0 * distinct_partitions / container_ring.partition_count)
|
||||||
stdout.flush()
|
stdout.flush()
|
||||||
return None
|
return None
|
||||||
else:
|
else:
|
||||||
@ -159,8 +160,8 @@ def object_dispersion_report(coropool, connpool, account, object_ring,
|
|||||||
container = 'dispersion_objects'
|
container = 'dispersion_objects'
|
||||||
with connpool.item() as conn:
|
with connpool.item() as conn:
|
||||||
try:
|
try:
|
||||||
objects = [o['name'] for o in conn.get_container(container,
|
objects = [o['name'] for o in conn.get_container(
|
||||||
prefix='dispersion_', full_listing=True)[1]]
|
container, prefix='dispersion_', full_listing=True)[1]]
|
||||||
except ClientException, err:
|
except ClientException, err:
|
||||||
if err.http_status != 404:
|
if err.http_status != 404:
|
||||||
raise
|
raise
|
||||||
@ -186,9 +187,8 @@ def object_dispersion_report(coropool, connpool, account, object_ring,
|
|||||||
error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node)
|
error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node)
|
||||||
try:
|
try:
|
||||||
attempts, _junk = direct_client.retry(
|
attempts, _junk = direct_client.retry(
|
||||||
direct_client.direct_head_object, node, part,
|
direct_client.direct_head_object, node, part, account,
|
||||||
account, container, obj, error_log=error_log,
|
container, obj, error_log=error_log, retries=retries)
|
||||||
retries=retries)
|
|
||||||
retries_done[0] += attempts - 1
|
retries_done[0] += attempts - 1
|
||||||
found_count += 1
|
found_count += 1
|
||||||
except ClientException, err:
|
except ClientException, err:
|
||||||
@ -232,11 +232,11 @@ def object_dispersion_report(coropool, connpool, account, object_ring,
|
|||||||
missing_copies = object_ring.replica_count - copies
|
missing_copies = object_ring.replica_count - copies
|
||||||
if object_copies_found[copies]:
|
if object_copies_found[copies]:
|
||||||
print missing_string(object_copies_found[copies],
|
print missing_string(object_copies_found[copies],
|
||||||
missing_copies, object_ring.replica_count)
|
missing_copies, object_ring.replica_count)
|
||||||
print '%.02f%% of object copies found (%d of %d)' % \
|
print '%.02f%% of object copies found (%d of %d)' % \
|
||||||
(value, copies_found, copies_expected)
|
(value, copies_found, copies_expected)
|
||||||
print 'Sample represents %.02f%% of the object partition space' % (
|
print 'Sample represents %.02f%% of the object partition space' % (
|
||||||
100.0 * distinct_partitions / object_ring.partition_count)
|
100.0 * distinct_partitions / object_ring.partition_count)
|
||||||
stdout.flush()
|
stdout.flush()
|
||||||
return None
|
return None
|
||||||
else:
|
else:
|
||||||
@ -304,7 +304,7 @@ Usage: %prog [options] [conf_file]
|
|||||||
dispersion_coverage = int(conf.get('dispersion_coverage', 1))
|
dispersion_coverage = int(conf.get('dispersion_coverage', 1))
|
||||||
retries = int(conf.get('retries', 5))
|
retries = int(conf.get('retries', 5))
|
||||||
concurrency = int(conf.get('concurrency', 25))
|
concurrency = int(conf.get('concurrency', 25))
|
||||||
if options.dump_json or conf.get('dump_json', 'no').lower() in TRUE_VALUES:
|
if options.dump_json or config_true_value(conf.get('dump_json', 'no')):
|
||||||
json_output = True
|
json_output = True
|
||||||
if options.debug:
|
if options.debug:
|
||||||
debug = True
|
debug = True
|
||||||
@ -316,10 +316,9 @@ Usage: %prog [options] [conf_file]
|
|||||||
auth_version=conf.get('auth_version', '1.0'))
|
auth_version=conf.get('auth_version', '1.0'))
|
||||||
account = url.rsplit('/', 1)[1]
|
account = url.rsplit('/', 1)[1]
|
||||||
connpool = Pool(max_size=concurrency)
|
connpool = Pool(max_size=concurrency)
|
||||||
connpool.create = lambda: Connection(conf['auth_url'],
|
connpool.create = lambda: Connection(
|
||||||
conf['auth_user'], conf['auth_key'],
|
conf['auth_url'], conf['auth_user'], conf['auth_key'], retries=retries,
|
||||||
retries=retries,
|
preauthurl=url, preauthtoken=token)
|
||||||
preauthurl=url, preauthtoken=token)
|
|
||||||
|
|
||||||
container_ring = Ring(swift_dir, ring_name='container')
|
container_ring = Ring(swift_dir, ring_name='container')
|
||||||
object_ring = Ring(swift_dir, ring_name='object')
|
object_ring = Ring(swift_dir, ring_name='object')
|
||||||
|
@ -21,7 +21,7 @@ import swift.common.db
|
|||||||
from swift.account import server as account_server
|
from swift.account import server as account_server
|
||||||
from swift.common.db import AccountBroker
|
from swift.common.db import AccountBroker
|
||||||
from swift.common.utils import get_logger, audit_location_generator, \
|
from swift.common.utils import get_logger, audit_location_generator, \
|
||||||
TRUE_VALUES, dump_recon_cache
|
config_true_value, dump_recon_cache
|
||||||
from swift.common.daemon import Daemon
|
from swift.common.daemon import Daemon
|
||||||
|
|
||||||
from eventlet import Timeout
|
from eventlet import Timeout
|
||||||
@ -34,13 +34,12 @@ class AccountAuditor(Daemon):
|
|||||||
self.conf = conf
|
self.conf = conf
|
||||||
self.logger = get_logger(conf, log_route='account-auditor')
|
self.logger = get_logger(conf, log_route='account-auditor')
|
||||||
self.devices = conf.get('devices', '/srv/node')
|
self.devices = conf.get('devices', '/srv/node')
|
||||||
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
|
||||||
TRUE_VALUES
|
|
||||||
self.interval = int(conf.get('interval', 1800))
|
self.interval = int(conf.get('interval', 1800))
|
||||||
self.account_passes = 0
|
self.account_passes = 0
|
||||||
self.account_failures = 0
|
self.account_failures = 0
|
||||||
swift.common.db.DB_PREALLOCATION = \
|
swift.common.db.DB_PREALLOCATION = \
|
||||||
conf.get('db_preallocation', 'f').lower() in TRUE_VALUES
|
config_true_value(conf.get('db_preallocation', 'f'))
|
||||||
self.recon_cache_path = conf.get('recon_cache_path',
|
self.recon_cache_path = conf.get('recon_cache_path',
|
||||||
'/var/cache/swift')
|
'/var/cache/swift')
|
||||||
self.rcache = os.path.join(self.recon_cache_path, "account.recon")
|
self.rcache = os.path.join(self.recon_cache_path, "account.recon")
|
||||||
|
@ -27,7 +27,7 @@ from swift.common.db import AccountBroker
|
|||||||
from swift.common.direct_client import ClientException, \
|
from swift.common.direct_client import ClientException, \
|
||||||
direct_delete_container, direct_delete_object, direct_get_container
|
direct_delete_container, direct_delete_object, direct_get_container
|
||||||
from swift.common.ring import Ring
|
from swift.common.ring import Ring
|
||||||
from swift.common.utils import get_logger, whataremyips, TRUE_VALUES
|
from swift.common.utils import get_logger, whataremyips, config_true_value
|
||||||
from swift.common.daemon import Daemon
|
from swift.common.daemon import Daemon
|
||||||
|
|
||||||
|
|
||||||
@ -56,8 +56,7 @@ class AccountReaper(Daemon):
|
|||||||
self.conf = conf
|
self.conf = conf
|
||||||
self.logger = get_logger(conf, log_route='account-reaper')
|
self.logger = get_logger(conf, log_route='account-reaper')
|
||||||
self.devices = conf.get('devices', '/srv/node')
|
self.devices = conf.get('devices', '/srv/node')
|
||||||
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
|
||||||
TRUE_VALUES
|
|
||||||
self.interval = int(conf.get('interval', 3600))
|
self.interval = int(conf.get('interval', 3600))
|
||||||
self.swift_dir = conf.get('swift_dir', '/etc/swift')
|
self.swift_dir = conf.get('swift_dir', '/etc/swift')
|
||||||
self.account_ring = None
|
self.account_ring = None
|
||||||
@ -71,7 +70,7 @@ class AccountReaper(Daemon):
|
|||||||
sqrt(self.concurrency)
|
sqrt(self.concurrency)
|
||||||
self.container_pool = GreenPool(size=self.container_concurrency)
|
self.container_pool = GreenPool(size=self.container_concurrency)
|
||||||
swift.common.db.DB_PREALLOCATION = \
|
swift.common.db.DB_PREALLOCATION = \
|
||||||
conf.get('db_preallocation', 'f').lower() in TRUE_VALUES
|
config_true_value(conf.get('db_preallocation', 'f'))
|
||||||
self.delay_reaping = int(conf.get('delay_reaping') or 0)
|
self.delay_reaping = int(conf.get('delay_reaping') or 0)
|
||||||
|
|
||||||
def get_account_ring(self):
|
def get_account_ring(self):
|
||||||
|
@ -26,7 +26,7 @@ from eventlet import Timeout
|
|||||||
import swift.common.db
|
import swift.common.db
|
||||||
from swift.common.db import AccountBroker
|
from swift.common.db import AccountBroker
|
||||||
from swift.common.utils import get_logger, get_param, hash_path, public, \
|
from swift.common.utils import get_logger, get_param, hash_path, public, \
|
||||||
normalize_timestamp, split_path, storage_directory, TRUE_VALUES, \
|
normalize_timestamp, split_path, storage_directory, config_true_value, \
|
||||||
validate_device_partition, json
|
validate_device_partition, json
|
||||||
from swift.common.constraints import ACCOUNT_LISTING_LIMIT, \
|
from swift.common.constraints import ACCOUNT_LISTING_LIMIT, \
|
||||||
check_mount, check_float, check_utf8, FORMAT2CONTENT_TYPE
|
check_mount, check_float, check_utf8, FORMAT2CONTENT_TYPE
|
||||||
@ -47,15 +47,14 @@ class AccountController(object):
|
|||||||
def __init__(self, conf):
|
def __init__(self, conf):
|
||||||
self.logger = get_logger(conf, log_route='account-server')
|
self.logger = get_logger(conf, log_route='account-server')
|
||||||
self.root = conf.get('devices', '/srv/node')
|
self.root = conf.get('devices', '/srv/node')
|
||||||
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
|
||||||
TRUE_VALUES
|
|
||||||
self.replicator_rpc = ReplicatorRpc(self.root, DATADIR, AccountBroker,
|
self.replicator_rpc = ReplicatorRpc(self.root, DATADIR, AccountBroker,
|
||||||
self.mount_check,
|
self.mount_check,
|
||||||
logger=self.logger)
|
logger=self.logger)
|
||||||
self.auto_create_account_prefix = \
|
self.auto_create_account_prefix = \
|
||||||
conf.get('auto_create_account_prefix') or '.'
|
conf.get('auto_create_account_prefix') or '.'
|
||||||
swift.common.db.DB_PREALLOCATION = \
|
swift.common.db.DB_PREALLOCATION = \
|
||||||
conf.get('db_preallocation', 'f').lower() in TRUE_VALUES
|
config_true_value(conf.get('db_preallocation', 'f'))
|
||||||
|
|
||||||
def _get_account_broker(self, drive, part, account):
|
def _get_account_broker(self, drive, part, account):
|
||||||
hsh = hash_path(account)
|
hsh = hash_path(account)
|
||||||
|
@ -28,7 +28,7 @@ import eventlet
|
|||||||
import eventlet.pools
|
import eventlet.pools
|
||||||
from eventlet.green.httplib import CannotSendRequest
|
from eventlet.green.httplib import CannotSendRequest
|
||||||
|
|
||||||
from swift.common.utils import TRUE_VALUES, LogAdapter
|
from swift.common.utils import config_true_value, LogAdapter
|
||||||
import swiftclient as client
|
import swiftclient as client
|
||||||
from swift.common import direct_client
|
from swift.common import direct_client
|
||||||
from swift.common.http import HTTP_CONFLICT
|
from swift.common.http import HTTP_CONFLICT
|
||||||
@ -144,7 +144,7 @@ class Bench(object):
|
|||||||
self.user = conf.user
|
self.user = conf.user
|
||||||
self.key = conf.key
|
self.key = conf.key
|
||||||
self.auth_url = conf.auth
|
self.auth_url = conf.auth
|
||||||
self.use_proxy = conf.use_proxy.lower() in TRUE_VALUES
|
self.use_proxy = config_true_value(conf.use_proxy)
|
||||||
self.auth_version = conf.auth_version
|
self.auth_version = conf.auth_version
|
||||||
self.logger.info("Auth version: %s" % self.auth_version)
|
self.logger.info("Auth version: %s" % self.auth_version)
|
||||||
if self.use_proxy:
|
if self.use_proxy:
|
||||||
@ -314,7 +314,7 @@ class BenchController(object):
|
|||||||
self.logger = logger
|
self.logger = logger
|
||||||
self.conf = conf
|
self.conf = conf
|
||||||
self.names = []
|
self.names = []
|
||||||
self.delete = conf.delete.lower() in TRUE_VALUES
|
self.delete = config_true_value(conf.delete)
|
||||||
self.gets = int(conf.num_gets)
|
self.gets = int(conf.num_gets)
|
||||||
self.aborted = False
|
self.aborted = False
|
||||||
|
|
||||||
|
@ -75,8 +75,7 @@ def run_daemon(klass, conf_file, section_name='', once=False, **kwargs):
|
|||||||
log_name=kwargs.get('log_name'))
|
log_name=kwargs.get('log_name'))
|
||||||
|
|
||||||
# once on command line (i.e. daemonize=false) will over-ride config
|
# once on command line (i.e. daemonize=false) will over-ride config
|
||||||
once = once or \
|
once = once or not utils.config_true_value(conf.get('daemonize', 'true'))
|
||||||
conf.get('daemonize', 'true').lower() not in utils.TRUE_VALUES
|
|
||||||
|
|
||||||
# pre-configure logger
|
# pre-configure logger
|
||||||
if 'logger' in kwargs:
|
if 'logger' in kwargs:
|
||||||
@ -87,7 +86,7 @@ def run_daemon(klass, conf_file, section_name='', once=False, **kwargs):
|
|||||||
log_route=section_name)
|
log_route=section_name)
|
||||||
|
|
||||||
# disable fallocate if desired
|
# disable fallocate if desired
|
||||||
if conf.get('disable_fallocate', 'no').lower() in utils.TRUE_VALUES:
|
if utils.config_true_value(conf.get('disable_fallocate', 'no')):
|
||||||
utils.disable_fallocate()
|
utils.disable_fallocate()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -29,8 +29,8 @@ import simplejson
|
|||||||
|
|
||||||
import swift.common.db
|
import swift.common.db
|
||||||
from swift.common.utils import get_logger, whataremyips, storage_directory, \
|
from swift.common.utils import get_logger, whataremyips, storage_directory, \
|
||||||
renamer, mkdirs, lock_parent_directory, TRUE_VALUES, unlink_older_than, \
|
renamer, mkdirs, lock_parent_directory, config_true_value, \
|
||||||
dump_recon_cache, rsync_ip
|
unlink_older_than, dump_recon_cache, rsync_ip
|
||||||
from swift.common import ring
|
from swift.common import ring
|
||||||
from swift.common.http import HTTP_NOT_FOUND, HTTP_INSUFFICIENT_STORAGE
|
from swift.common.http import HTTP_NOT_FOUND, HTTP_INSUFFICIENT_STORAGE
|
||||||
from swift.common.bufferedhttp import BufferedHTTPConnection
|
from swift.common.bufferedhttp import BufferedHTTPConnection
|
||||||
@ -53,9 +53,9 @@ def quarantine_db(object_file, server_type):
|
|||||||
('container' or 'account')
|
('container' or 'account')
|
||||||
"""
|
"""
|
||||||
object_dir = os.path.dirname(object_file)
|
object_dir = os.path.dirname(object_file)
|
||||||
quarantine_dir = os.path.abspath(os.path.join(object_dir, '..',
|
quarantine_dir = os.path.abspath(
|
||||||
'..', '..', '..', 'quarantined', server_type + 's',
|
os.path.join(object_dir, '..', '..', '..', '..', 'quarantined',
|
||||||
os.path.basename(object_dir)))
|
server_type + 's', os.path.basename(object_dir)))
|
||||||
try:
|
try:
|
||||||
renamer(object_dir, quarantine_dir)
|
renamer(object_dir, quarantine_dir)
|
||||||
except OSError, e:
|
except OSError, e:
|
||||||
@ -88,7 +88,7 @@ class ReplConnection(BufferedHTTPConnection):
|
|||||||
try:
|
try:
|
||||||
body = simplejson.dumps(args)
|
body = simplejson.dumps(args)
|
||||||
self.request('REPLICATE', self.path, body,
|
self.request('REPLICATE', self.path, body,
|
||||||
{'Content-Type': 'application/json'})
|
{'Content-Type': 'application/json'})
|
||||||
response = self.getresponse()
|
response = self.getresponse()
|
||||||
response.data = response.read()
|
response.data = response.read()
|
||||||
return response
|
return response
|
||||||
@ -107,8 +107,7 @@ class Replicator(Daemon):
|
|||||||
self.conf = conf
|
self.conf = conf
|
||||||
self.logger = get_logger(conf, log_route='replicator')
|
self.logger = get_logger(conf, log_route='replicator')
|
||||||
self.root = conf.get('devices', '/srv/node')
|
self.root = conf.get('devices', '/srv/node')
|
||||||
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
|
||||||
('true', 't', '1', 'on', 'yes', 'y')
|
|
||||||
self.port = int(conf.get('bind_port', self.default_port))
|
self.port = int(conf.get('bind_port', self.default_port))
|
||||||
concurrency = int(conf.get('concurrency', 8))
|
concurrency = int(conf.get('concurrency', 8))
|
||||||
self.cpool = GreenPool(size=concurrency)
|
self.cpool = GreenPool(size=concurrency)
|
||||||
@ -118,13 +117,12 @@ class Replicator(Daemon):
|
|||||||
self.max_diffs = int(conf.get('max_diffs') or 100)
|
self.max_diffs = int(conf.get('max_diffs') or 100)
|
||||||
self.interval = int(conf.get('interval') or
|
self.interval = int(conf.get('interval') or
|
||||||
conf.get('run_pause') or 30)
|
conf.get('run_pause') or 30)
|
||||||
self.vm_test_mode = conf.get(
|
self.vm_test_mode = config_true_value(conf.get('vm_test_mode', 'no'))
|
||||||
'vm_test_mode', 'no').lower() in ('yes', 'true', 'on', '1')
|
|
||||||
self.node_timeout = int(conf.get('node_timeout', 10))
|
self.node_timeout = int(conf.get('node_timeout', 10))
|
||||||
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
|
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
|
||||||
self.reclaim_age = float(conf.get('reclaim_age', 86400 * 7))
|
self.reclaim_age = float(conf.get('reclaim_age', 86400 * 7))
|
||||||
swift.common.db.DB_PREALLOCATION = \
|
swift.common.db.DB_PREALLOCATION = \
|
||||||
conf.get('db_preallocation', 'f').lower() in TRUE_VALUES
|
config_true_value(conf.get('db_preallocation', 'f'))
|
||||||
self._zero_stats()
|
self._zero_stats()
|
||||||
self.recon_cache_path = conf.get('recon_cache_path',
|
self.recon_cache_path = conf.get('recon_cache_path',
|
||||||
'/var/cache/swift')
|
'/var/cache/swift')
|
||||||
@ -149,17 +147,18 @@ class Replicator(Daemon):
|
|||||||
{'count': self.stats['attempted'],
|
{'count': self.stats['attempted'],
|
||||||
'time': time.time() - self.stats['start'],
|
'time': time.time() - self.stats['start'],
|
||||||
'rate': self.stats['attempted'] /
|
'rate': self.stats['attempted'] /
|
||||||
(time.time() - self.stats['start'] + 0.0000001)})
|
(time.time() - self.stats['start'] + 0.0000001)})
|
||||||
self.logger.info(_('Removed %(remove)d dbs') % self.stats)
|
self.logger.info(_('Removed %(remove)d dbs') % self.stats)
|
||||||
self.logger.info(_('%(success)s successes, %(failure)s failures')
|
self.logger.info(_('%(success)s successes, %(failure)s failures')
|
||||||
% self.stats)
|
% self.stats)
|
||||||
dump_recon_cache({'replication_stats': self.stats,
|
dump_recon_cache(
|
||||||
'replication_time': time.time() - self.stats['start']
|
{'replication_stats': self.stats,
|
||||||
}, self.rcache, self.logger)
|
'replication_time': time.time() - self.stats['start']},
|
||||||
|
self.rcache, self.logger)
|
||||||
self.logger.info(' '.join(['%s:%s' % item for item in
|
self.logger.info(' '.join(['%s:%s' % item for item in
|
||||||
self.stats.items() if item[0] in
|
self.stats.items() if item[0] in
|
||||||
('no_change', 'hashmatch', 'rsync', 'diff', 'ts_repl', 'empty',
|
('no_change', 'hashmatch', 'rsync', 'diff', 'ts_repl',
|
||||||
'diff_capped')]))
|
'empty', 'diff_capped')]))
|
||||||
|
|
||||||
def _rsync_file(self, db_file, remote_file, whole_file=True):
|
def _rsync_file(self, db_file, remote_file, whole_file=True):
|
||||||
"""
|
"""
|
||||||
@ -185,7 +184,7 @@ class Replicator(Daemon):
|
|||||||
return proc.returncode == 0
|
return proc.returncode == 0
|
||||||
|
|
||||||
def _rsync_db(self, broker, device, http, local_id,
|
def _rsync_db(self, broker, device, http, local_id,
|
||||||
replicate_method='complete_rsync', replicate_timeout=None):
|
replicate_method='complete_rsync', replicate_timeout=None):
|
||||||
"""
|
"""
|
||||||
Sync a whole db using rsync.
|
Sync a whole db using rsync.
|
||||||
|
|
||||||
@ -198,18 +197,18 @@ class Replicator(Daemon):
|
|||||||
"""
|
"""
|
||||||
device_ip = rsync_ip(device['ip'])
|
device_ip = rsync_ip(device['ip'])
|
||||||
if self.vm_test_mode:
|
if self.vm_test_mode:
|
||||||
remote_file = '%s::%s%s/%s/tmp/%s' % (device_ip,
|
remote_file = '%s::%s%s/%s/tmp/%s' % (
|
||||||
self.server_type, device['port'], device['device'],
|
device_ip, self.server_type, device['port'], device['device'],
|
||||||
local_id)
|
local_id)
|
||||||
else:
|
else:
|
||||||
remote_file = '%s::%s/%s/tmp/%s' % (device_ip,
|
remote_file = '%s::%s/%s/tmp/%s' % (
|
||||||
self.server_type, device['device'], local_id)
|
device_ip, self.server_type, device['device'], local_id)
|
||||||
mtime = os.path.getmtime(broker.db_file)
|
mtime = os.path.getmtime(broker.db_file)
|
||||||
if not self._rsync_file(broker.db_file, remote_file):
|
if not self._rsync_file(broker.db_file, remote_file):
|
||||||
return False
|
return False
|
||||||
# perform block-level sync if the db was modified during the first sync
|
# perform block-level sync if the db was modified during the first sync
|
||||||
if os.path.exists(broker.db_file + '-journal') or \
|
if os.path.exists(broker.db_file + '-journal') or \
|
||||||
os.path.getmtime(broker.db_file) > mtime:
|
os.path.getmtime(broker.db_file) > mtime:
|
||||||
# grab a lock so nobody else can modify it
|
# grab a lock so nobody else can modify it
|
||||||
with broker.lock():
|
with broker.lock():
|
||||||
if not self._rsync_file(broker.db_file, remote_file, False):
|
if not self._rsync_file(broker.db_file, remote_file, False):
|
||||||
@ -243,13 +242,15 @@ class Replicator(Daemon):
|
|||||||
if not response or response.status >= 300 or response.status < 200:
|
if not response or response.status >= 300 or response.status < 200:
|
||||||
if response:
|
if response:
|
||||||
self.logger.error(_('ERROR Bad response %(status)s from '
|
self.logger.error(_('ERROR Bad response %(status)s from '
|
||||||
'%(host)s'),
|
'%(host)s'),
|
||||||
{'status': response.status, 'host': http.host})
|
{'status': response.status,
|
||||||
|
'host': http.host})
|
||||||
return False
|
return False
|
||||||
point = objects[-1]['ROWID']
|
point = objects[-1]['ROWID']
|
||||||
objects = broker.get_items_since(point, self.per_diff)
|
objects = broker.get_items_since(point, self.per_diff)
|
||||||
if objects:
|
if objects:
|
||||||
self.logger.debug(_('Synchronization for %s has fallen more than '
|
self.logger.debug(_(
|
||||||
|
'Synchronization for %s has fallen more than '
|
||||||
'%s rows behind; moving on and will try again next pass.') %
|
'%s rows behind; moving on and will try again next pass.') %
|
||||||
(broker.db_file, self.max_diffs * self.per_diff))
|
(broker.db_file, self.max_diffs * self.per_diff))
|
||||||
self.stats['diff_capped'] += 1
|
self.stats['diff_capped'] += 1
|
||||||
@ -259,7 +260,8 @@ class Replicator(Daemon):
|
|||||||
response = http.replicate('merge_syncs', sync_table)
|
response = http.replicate('merge_syncs', sync_table)
|
||||||
if response and response.status >= 200 and response.status < 300:
|
if response and response.status >= 200 and response.status < 300:
|
||||||
broker.merge_syncs([{'remote_id': remote_id,
|
broker.merge_syncs([{'remote_id': remote_id,
|
||||||
'sync_point': point}], incoming=False)
|
'sync_point': point}],
|
||||||
|
incoming=False)
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -283,7 +285,8 @@ class Replicator(Daemon):
|
|||||||
self.stats['hashmatch'] += 1
|
self.stats['hashmatch'] += 1
|
||||||
self.logger.increment('hashmatches')
|
self.logger.increment('hashmatches')
|
||||||
broker.merge_syncs([{'remote_id': rinfo['id'],
|
broker.merge_syncs([{'remote_id': rinfo['id'],
|
||||||
'sync_point': rinfo['point']}], incoming=False)
|
'sync_point': rinfo['point']}],
|
||||||
|
incoming=False)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def _http_connect(self, node, partition, db_file):
|
def _http_connect(self, node, partition, db_file):
|
||||||
@ -297,7 +300,8 @@ class Replicator(Daemon):
|
|||||||
:returns: ReplConnection object
|
:returns: ReplConnection object
|
||||||
"""
|
"""
|
||||||
return ReplConnection(node, partition,
|
return ReplConnection(node, partition,
|
||||||
os.path.basename(db_file).split('.', 1)[0], self.logger)
|
os.path.basename(db_file).split('.', 1)[0],
|
||||||
|
self.logger)
|
||||||
|
|
||||||
def _repl_to_node(self, node, broker, partition, info):
|
def _repl_to_node(self, node, broker, partition, info):
|
||||||
"""
|
"""
|
||||||
@ -319,8 +323,9 @@ class Replicator(Daemon):
|
|||||||
_('ERROR Unable to connect to remote server: %s'), node)
|
_('ERROR Unable to connect to remote server: %s'), node)
|
||||||
return False
|
return False
|
||||||
with Timeout(self.node_timeout):
|
with Timeout(self.node_timeout):
|
||||||
response = http.replicate('sync', info['max_row'], info['hash'],
|
response = http.replicate(
|
||||||
info['id'], info['created_at'], info['put_timestamp'],
|
'sync', info['max_row'], info['hash'], info['id'],
|
||||||
|
info['created_at'], info['put_timestamp'],
|
||||||
info['delete_timestamp'], info['metadata'])
|
info['delete_timestamp'], info['metadata'])
|
||||||
if not response:
|
if not response:
|
||||||
return False
|
return False
|
||||||
@ -341,11 +346,11 @@ class Replicator(Daemon):
|
|||||||
self.stats['remote_merge'] += 1
|
self.stats['remote_merge'] += 1
|
||||||
self.logger.increment('remote_merges')
|
self.logger.increment('remote_merges')
|
||||||
return self._rsync_db(broker, node, http, info['id'],
|
return self._rsync_db(broker, node, http, info['id'],
|
||||||
replicate_method='rsync_then_merge',
|
replicate_method='rsync_then_merge',
|
||||||
replicate_timeout=(info['count'] / 2000))
|
replicate_timeout=(info['count'] / 2000))
|
||||||
# else send diffs over to the remote server
|
# else send diffs over to the remote server
|
||||||
return self._usync_db(max(rinfo['point'], local_sync),
|
return self._usync_db(max(rinfo['point'], local_sync),
|
||||||
broker, http, rinfo['id'], info['id'])
|
broker, http, rinfo['id'], info['id'])
|
||||||
|
|
||||||
def _replicate_object(self, partition, object_file, node_id):
|
def _replicate_object(self, partition, object_file, node_id):
|
||||||
"""
|
"""
|
||||||
@ -412,7 +417,8 @@ class Replicator(Daemon):
|
|||||||
self.logger.error(_('ERROR Remote drive not mounted %s'), node)
|
self.logger.error(_('ERROR Remote drive not mounted %s'), node)
|
||||||
except (Exception, Timeout):
|
except (Exception, Timeout):
|
||||||
self.logger.exception(_('ERROR syncing %(file)s with node'
|
self.logger.exception(_('ERROR syncing %(file)s with node'
|
||||||
' %(node)s'), {'file': object_file, 'node': node})
|
' %(node)s'),
|
||||||
|
{'file': object_file, 'node': node})
|
||||||
self.stats['success' if success else 'failure'] += 1
|
self.stats['success' if success else 'failure'] += 1
|
||||||
self.logger.increment('successes' if success else 'failures')
|
self.logger.increment('successes' if success else 'failures')
|
||||||
responses.append(success)
|
responses.append(success)
|
||||||
@ -542,7 +548,8 @@ class ReplicatorRpc(object):
|
|||||||
not os.path.ismount(os.path.join(self.root, drive)):
|
not os.path.ismount(os.path.join(self.root, drive)):
|
||||||
return Response(status='507 %s is not mounted' % drive)
|
return Response(status='507 %s is not mounted' % drive)
|
||||||
db_file = os.path.join(self.root, drive,
|
db_file = os.path.join(self.root, drive,
|
||||||
storage_directory(self.datadir, partition, hsh), hsh + '.db')
|
storage_directory(self.datadir, partition, hsh),
|
||||||
|
hsh + '.db')
|
||||||
if op == 'rsync_then_merge':
|
if op == 'rsync_then_merge':
|
||||||
return self.rsync_then_merge(drive, db_file, args)
|
return self.rsync_then_merge(drive, db_file, args)
|
||||||
if op == 'complete_rsync':
|
if op == 'complete_rsync':
|
||||||
@ -577,23 +584,23 @@ class ReplicatorRpc(object):
|
|||||||
timespan = time.time() - timemark
|
timespan = time.time() - timemark
|
||||||
if timespan > DEBUG_TIMINGS_THRESHOLD:
|
if timespan > DEBUG_TIMINGS_THRESHOLD:
|
||||||
self.logger.debug(_('replicator-rpc-sync time for '
|
self.logger.debug(_('replicator-rpc-sync time for '
|
||||||
'update_metadata: %.02fs') % timespan)
|
'update_metadata: %.02fs') % timespan)
|
||||||
if info['put_timestamp'] != put_timestamp or \
|
if info['put_timestamp'] != put_timestamp or \
|
||||||
info['created_at'] != created_at or \
|
info['created_at'] != created_at or \
|
||||||
info['delete_timestamp'] != delete_timestamp:
|
info['delete_timestamp'] != delete_timestamp:
|
||||||
timemark = time.time()
|
timemark = time.time()
|
||||||
broker.merge_timestamps(
|
broker.merge_timestamps(
|
||||||
created_at, put_timestamp, delete_timestamp)
|
created_at, put_timestamp, delete_timestamp)
|
||||||
timespan = time.time() - timemark
|
timespan = time.time() - timemark
|
||||||
if timespan > DEBUG_TIMINGS_THRESHOLD:
|
if timespan > DEBUG_TIMINGS_THRESHOLD:
|
||||||
self.logger.debug(_('replicator-rpc-sync time for '
|
self.logger.debug(_('replicator-rpc-sync time for '
|
||||||
'merge_timestamps: %.02fs') % timespan)
|
'merge_timestamps: %.02fs') % timespan)
|
||||||
timemark = time.time()
|
timemark = time.time()
|
||||||
info['point'] = broker.get_sync(id_)
|
info['point'] = broker.get_sync(id_)
|
||||||
timespan = time.time() - timemark
|
timespan = time.time() - timemark
|
||||||
if timespan > DEBUG_TIMINGS_THRESHOLD:
|
if timespan > DEBUG_TIMINGS_THRESHOLD:
|
||||||
self.logger.debug(_('replicator-rpc-sync time for get_sync: '
|
self.logger.debug(_('replicator-rpc-sync time for get_sync: '
|
||||||
'%.02fs') % timespan)
|
'%.02fs') % timespan)
|
||||||
if hash_ == info['hash'] and info['point'] < remote_sync:
|
if hash_ == info['hash'] and info['point'] < remote_sync:
|
||||||
timemark = time.time()
|
timemark = time.time()
|
||||||
broker.merge_syncs([{'remote_id': id_,
|
broker.merge_syncs([{'remote_id': id_,
|
||||||
@ -602,7 +609,7 @@ class ReplicatorRpc(object):
|
|||||||
timespan = time.time() - timemark
|
timespan = time.time() - timemark
|
||||||
if timespan > DEBUG_TIMINGS_THRESHOLD:
|
if timespan > DEBUG_TIMINGS_THRESHOLD:
|
||||||
self.logger.debug(_('replicator-rpc-sync time for '
|
self.logger.debug(_('replicator-rpc-sync time for '
|
||||||
'merge_syncs: %.02fs') % timespan)
|
'merge_syncs: %.02fs') % timespan)
|
||||||
return Response(simplejson.dumps(info))
|
return Response(simplejson.dumps(info))
|
||||||
|
|
||||||
def merge_syncs(self, broker, args):
|
def merge_syncs(self, broker, args):
|
||||||
|
@ -91,12 +91,12 @@ class KeystoneAuth(object):
|
|||||||
self.reseller_admin_role = conf.get('reseller_admin_role',
|
self.reseller_admin_role = conf.get('reseller_admin_role',
|
||||||
'ResellerAdmin')
|
'ResellerAdmin')
|
||||||
config_is_admin = conf.get('is_admin', "false").lower()
|
config_is_admin = conf.get('is_admin', "false").lower()
|
||||||
self.is_admin = config_is_admin in swift_utils.TRUE_VALUES
|
self.is_admin = swift_utils.config_true_value(config_is_admin)
|
||||||
cfg_synchosts = conf.get('allowed_sync_hosts', '127.0.0.1')
|
cfg_synchosts = conf.get('allowed_sync_hosts', '127.0.0.1')
|
||||||
self.allowed_sync_hosts = [h.strip() for h in cfg_synchosts.split(',')
|
self.allowed_sync_hosts = [h.strip() for h in cfg_synchosts.split(',')
|
||||||
if h.strip()]
|
if h.strip()]
|
||||||
config_overrides = conf.get('allow_overrides', 't').lower()
|
config_overrides = conf.get('allow_overrides', 't').lower()
|
||||||
self.allow_overrides = config_overrides in swift_utils.TRUE_VALUES
|
self.allow_overrides = swift_utils.config_true_value(config_overrides)
|
||||||
|
|
||||||
def __call__(self, environ, start_response):
|
def __call__(self, environ, start_response):
|
||||||
identity = self._keystone_identity(environ)
|
identity = self._keystone_identity(environ)
|
||||||
|
@ -42,7 +42,7 @@ from urllib import quote, unquote
|
|||||||
|
|
||||||
from swift.common.swob import Request
|
from swift.common.swob import Request
|
||||||
from swift.common.utils import (get_logger, get_remote_client,
|
from swift.common.utils import (get_logger, get_remote_client,
|
||||||
get_valid_utf8_str, TRUE_VALUES)
|
get_valid_utf8_str, config_true_value)
|
||||||
|
|
||||||
|
|
||||||
class InputProxy(object):
|
class InputProxy(object):
|
||||||
@ -92,7 +92,7 @@ class ProxyLoggingMiddleware(object):
|
|||||||
|
|
||||||
def __init__(self, app, conf):
|
def __init__(self, app, conf):
|
||||||
self.app = app
|
self.app = app
|
||||||
self.log_hdrs = conf.get('log_headers', 'no').lower() in TRUE_VALUES
|
self.log_hdrs = config_true_value(conf.get('log_headers', 'no'))
|
||||||
|
|
||||||
# The leading access_* check is in case someone assumes that
|
# The leading access_* check is in case someone assumes that
|
||||||
# log_statsd_valid_http_methods behaves like the other log_statsd_*
|
# log_statsd_valid_http_methods behaves like the other log_statsd_*
|
||||||
|
@ -17,7 +17,7 @@ import errno
|
|||||||
import os
|
import os
|
||||||
|
|
||||||
from swift.common.swob import Request, Response
|
from swift.common.swob import Request, Response
|
||||||
from swift.common.utils import split_path, get_logger, TRUE_VALUES
|
from swift.common.utils import split_path, get_logger, config_true_value
|
||||||
from swift.common.constraints import check_mount
|
from swift.common.constraints import check_mount
|
||||||
from resource import getpagesize
|
from resource import getpagesize
|
||||||
from hashlib import md5
|
from hashlib import md5
|
||||||
@ -59,8 +59,7 @@ class ReconMiddleware(object):
|
|||||||
self.object_ring_path = os.path.join(swift_dir, 'object.ring.gz')
|
self.object_ring_path = os.path.join(swift_dir, 'object.ring.gz')
|
||||||
self.rings = [self.account_ring_path, self.container_ring_path,
|
self.rings = [self.account_ring_path, self.container_ring_path,
|
||||||
self.object_ring_path]
|
self.object_ring_path]
|
||||||
self.mount_check = conf.get('mount_check', 'true').lower() \
|
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
|
||||||
in TRUE_VALUES
|
|
||||||
|
|
||||||
def _from_recon_cache(self, cache_keys, cache_file, openr=open):
|
def _from_recon_cache(self, cache_keys, cache_file, openr=open):
|
||||||
"""retrieve values from a recon cache file
|
"""retrieve values from a recon cache file
|
||||||
@ -159,7 +158,7 @@ class ReconMiddleware(object):
|
|||||||
if recon_type == 'object':
|
if recon_type == 'object':
|
||||||
return self._from_recon_cache(['object_expiration_pass',
|
return self._from_recon_cache(['object_expiration_pass',
|
||||||
'expired_last_pass'],
|
'expired_last_pass'],
|
||||||
self.object_recon_cache)
|
self.object_recon_cache)
|
||||||
|
|
||||||
def get_auditor_info(self, recon_type):
|
def get_auditor_info(self, recon_type):
|
||||||
"""get auditor info"""
|
"""get auditor info"""
|
||||||
@ -186,8 +185,8 @@ class ReconMiddleware(object):
|
|||||||
"""list unmounted (failed?) devices"""
|
"""list unmounted (failed?) devices"""
|
||||||
mountlist = []
|
mountlist = []
|
||||||
for entry in os.listdir(self.devices):
|
for entry in os.listdir(self.devices):
|
||||||
mpoint = {'device': entry, \
|
mpoint = {'device': entry,
|
||||||
"mounted": check_mount(self.devices, entry)}
|
'mounted': check_mount(self.devices, entry)}
|
||||||
if not mpoint['mounted']:
|
if not mpoint['mounted']:
|
||||||
mountlist.append(mpoint)
|
mountlist.append(mpoint)
|
||||||
return mountlist
|
return mountlist
|
||||||
@ -202,11 +201,12 @@ class ReconMiddleware(object):
|
|||||||
capacity = disk.f_bsize * disk.f_blocks
|
capacity = disk.f_bsize * disk.f_blocks
|
||||||
available = disk.f_bsize * disk.f_bavail
|
available = disk.f_bsize * disk.f_bavail
|
||||||
used = disk.f_bsize * (disk.f_blocks - disk.f_bavail)
|
used = disk.f_bsize * (disk.f_blocks - disk.f_bavail)
|
||||||
devices.append({'device': entry, 'mounted': True, \
|
devices.append({'device': entry, 'mounted': True,
|
||||||
'size': capacity, 'used': used, 'avail': available})
|
'size': capacity, 'used': used,
|
||||||
|
'avail': available})
|
||||||
else:
|
else:
|
||||||
devices.append({'device': entry, 'mounted': False, \
|
devices.append({'device': entry, 'mounted': False,
|
||||||
'size': '', 'used': '', 'avail': ''})
|
'size': '', 'used': '', 'avail': ''})
|
||||||
return devices
|
return devices
|
||||||
|
|
||||||
def get_ring_md5(self, openr=open):
|
def get_ring_md5(self, openr=open):
|
||||||
|
@ -120,9 +120,9 @@ from urllib import unquote, quote as urllib_quote
|
|||||||
|
|
||||||
|
|
||||||
from swift.common.utils import cache_from_env, get_logger, human_readable, \
|
from swift.common.utils import cache_from_env, get_logger, human_readable, \
|
||||||
split_path, TRUE_VALUES
|
split_path, config_true_value
|
||||||
from swift.common.wsgi import make_pre_authed_env, make_pre_authed_request, \
|
from swift.common.wsgi import make_pre_authed_env, make_pre_authed_request, \
|
||||||
WSGIContext
|
WSGIContext
|
||||||
from swift.common.http import is_success, is_redirection, HTTP_NOT_FOUND
|
from swift.common.http import is_success, is_redirection, HTTP_NOT_FOUND
|
||||||
from swift.common.swob import Response, HTTPMovedPermanently, HTTPNotFound
|
from swift.common.swob import Response, HTTPMovedPermanently, HTTPNotFound
|
||||||
|
|
||||||
@ -179,10 +179,11 @@ class _StaticWebContext(WSGIContext):
|
|||||||
save_response_status = self._response_status
|
save_response_status = self._response_status
|
||||||
save_response_headers = self._response_headers
|
save_response_headers = self._response_headers
|
||||||
save_response_exc_info = self._response_exc_info
|
save_response_exc_info = self._response_exc_info
|
||||||
resp = self._app_call(make_pre_authed_env(env, 'GET',
|
resp = self._app_call(make_pre_authed_env(
|
||||||
'/%s/%s/%s/%s%s' % (self.version, self.account, self.container,
|
env, 'GET', '/%s/%s/%s/%s%s' % (
|
||||||
self._get_status_int(), self._error),
|
self.version, self.account, self.container,
|
||||||
self.agent))
|
self._get_status_int(), self._error),
|
||||||
|
self.agent))
|
||||||
if is_success(self._get_status_int()):
|
if is_success(self._get_status_int()):
|
||||||
start_response(save_response_status, self._response_headers,
|
start_response(save_response_status, self._response_headers,
|
||||||
self._response_exc_info)
|
self._response_exc_info)
|
||||||
@ -210,9 +211,10 @@ class _StaticWebContext(WSGIContext):
|
|||||||
(self._index, self._error, self._listings,
|
(self._index, self._error, self._listings,
|
||||||
self._listings_css) = cached_data
|
self._listings_css) = cached_data
|
||||||
return
|
return
|
||||||
resp = make_pre_authed_request(env, 'HEAD',
|
resp = make_pre_authed_request(
|
||||||
'/%s/%s/%s' % (self.version, self.account, self.container),
|
env, 'HEAD', '/%s/%s/%s' % (
|
||||||
agent=self.agent).get_response(self.app)
|
self.version, self.account, self.container),
|
||||||
|
agent=self.agent).get_response(self.app)
|
||||||
if is_success(resp.status_int):
|
if is_success(resp.status_int):
|
||||||
self._index = \
|
self._index = \
|
||||||
resp.headers.get('x-container-meta-web-index', '').strip()
|
resp.headers.get('x-container-meta-web-index', '').strip()
|
||||||
@ -225,9 +227,9 @@ class _StaticWebContext(WSGIContext):
|
|||||||
'').strip()
|
'').strip()
|
||||||
if memcache_client:
|
if memcache_client:
|
||||||
memcache_client.set(memcache_key,
|
memcache_client.set(memcache_key,
|
||||||
(self._index, self._error, self._listings,
|
(self._index, self._error, self._listings,
|
||||||
self._listings_css),
|
self._listings_css),
|
||||||
timeout=self.cache_timeout)
|
timeout=self.cache_timeout)
|
||||||
|
|
||||||
def _listing(self, env, start_response, prefix=None):
|
def _listing(self, env, start_response, prefix=None):
|
||||||
"""
|
"""
|
||||||
@ -237,12 +239,13 @@ class _StaticWebContext(WSGIContext):
|
|||||||
:param start_response: The original WSGI start_response hook.
|
:param start_response: The original WSGI start_response hook.
|
||||||
:param prefix: Any prefix desired for the container listing.
|
:param prefix: Any prefix desired for the container listing.
|
||||||
"""
|
"""
|
||||||
if self._listings.lower() not in TRUE_VALUES:
|
if not config_true_value(self._listings):
|
||||||
resp = HTTPNotFound()(env, self._start_response)
|
resp = HTTPNotFound()(env, self._start_response)
|
||||||
return self._error_response(resp, env, start_response)
|
return self._error_response(resp, env, start_response)
|
||||||
tmp_env = make_pre_authed_env(env, 'GET',
|
tmp_env = make_pre_authed_env(
|
||||||
'/%s/%s/%s' % (self.version, self.account, self.container),
|
env, 'GET', '/%s/%s/%s' % (
|
||||||
self.agent)
|
self.version, self.account, self.container),
|
||||||
|
self.agent)
|
||||||
tmp_env['QUERY_STRING'] = 'delimiter=/&format=json'
|
tmp_env['QUERY_STRING'] = 'delimiter=/&format=json'
|
||||||
if prefix:
|
if prefix:
|
||||||
tmp_env['QUERY_STRING'] += '&prefix=%s' % quote(prefix)
|
tmp_env['QUERY_STRING'] += '&prefix=%s' % quote(prefix)
|
||||||
@ -260,14 +263,14 @@ class _StaticWebContext(WSGIContext):
|
|||||||
return self._error_response(resp, env, start_response)
|
return self._error_response(resp, env, start_response)
|
||||||
headers = {'Content-Type': 'text/html; charset=UTF-8'}
|
headers = {'Content-Type': 'text/html; charset=UTF-8'}
|
||||||
body = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 ' \
|
body = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 ' \
|
||||||
'Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\n' \
|
'Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\n' \
|
||||||
'<html>\n' \
|
'<html>\n' \
|
||||||
' <head>\n' \
|
' <head>\n' \
|
||||||
' <title>Listing of %s</title>\n' % \
|
' <title>Listing of %s</title>\n' % \
|
||||||
cgi.escape(env['PATH_INFO'])
|
cgi.escape(env['PATH_INFO'])
|
||||||
if self._listings_css:
|
if self._listings_css:
|
||||||
body += ' <link rel="stylesheet" type="text/css" ' \
|
body += ' <link rel="stylesheet" type="text/css" ' \
|
||||||
'href="%s" />\n' % (self._build_css_path(prefix))
|
'href="%s" />\n' % (self._build_css_path(prefix))
|
||||||
else:
|
else:
|
||||||
body += ' <style type="text/css">\n' \
|
body += ' <style type="text/css">\n' \
|
||||||
' h1 {font-size: 1em; font-weight: bold;}\n' \
|
' h1 {font-size: 1em; font-weight: bold;}\n' \
|
||||||
@ -347,7 +350,7 @@ class _StaticWebContext(WSGIContext):
|
|||||||
"""
|
"""
|
||||||
self._get_container_info(env)
|
self._get_container_info(env)
|
||||||
if not self._listings and not self._index:
|
if not self._listings and not self._index:
|
||||||
if env.get('HTTP_X_WEB_MODE', 'f').lower() in TRUE_VALUES:
|
if config_true_value(env.get('HTTP_X_WEB_MODE', 'f')):
|
||||||
return HTTPNotFound()(env, start_response)
|
return HTTPNotFound()(env, start_response)
|
||||||
return self.app(env, start_response)
|
return self.app(env, start_response)
|
||||||
if env['PATH_INFO'][-1] != '/':
|
if env['PATH_INFO'][-1] != '/':
|
||||||
@ -366,7 +369,7 @@ class _StaticWebContext(WSGIContext):
|
|||||||
if status_int == HTTP_NOT_FOUND:
|
if status_int == HTTP_NOT_FOUND:
|
||||||
return self._listing(env, start_response)
|
return self._listing(env, start_response)
|
||||||
elif not is_success(self._get_status_int()) or \
|
elif not is_success(self._get_status_int()) or \
|
||||||
not is_redirection(self._get_status_int()):
|
not is_redirection(self._get_status_int()):
|
||||||
return self._error_response(resp, env, start_response)
|
return self._error_response(resp, env, start_response)
|
||||||
start_response(self._response_status, self._response_headers,
|
start_response(self._response_status, self._response_headers,
|
||||||
self._response_exc_info)
|
self._response_exc_info)
|
||||||
@ -415,10 +418,10 @@ class _StaticWebContext(WSGIContext):
|
|||||||
return resp
|
return resp
|
||||||
if status_int == HTTP_NOT_FOUND:
|
if status_int == HTTP_NOT_FOUND:
|
||||||
if env['PATH_INFO'][-1] != '/':
|
if env['PATH_INFO'][-1] != '/':
|
||||||
tmp_env = make_pre_authed_env(env, 'GET',
|
tmp_env = make_pre_authed_env(
|
||||||
'/%s/%s/%s' % (self.version, self.account,
|
env, 'GET', '/%s/%s/%s' % (
|
||||||
self.container),
|
self.version, self.account, self.container),
|
||||||
self.agent)
|
self.agent)
|
||||||
tmp_env['QUERY_STRING'] = 'limit=1&format=json&delimiter' \
|
tmp_env['QUERY_STRING'] = 'limit=1&format=json&delimiter' \
|
||||||
'=/&limit=1&prefix=%s' % quote(self.obj + '/')
|
'=/&limit=1&prefix=%s' % quote(self.obj + '/')
|
||||||
resp = self._app_call(tmp_env)
|
resp = self._app_call(tmp_env)
|
||||||
@ -427,8 +430,7 @@ class _StaticWebContext(WSGIContext):
|
|||||||
not json.loads(body):
|
not json.loads(body):
|
||||||
resp = HTTPNotFound()(env, self._start_response)
|
resp = HTTPNotFound()(env, self._start_response)
|
||||||
return self._error_response(resp, env, start_response)
|
return self._error_response(resp, env, start_response)
|
||||||
resp = HTTPMovedPermanently(location=env['PATH_INFO'] +
|
resp = HTTPMovedPermanently(location=env['PATH_INFO'] + '/')
|
||||||
'/')
|
|
||||||
self._log_response(env, resp.status_int)
|
self._log_response(env, resp.status_int)
|
||||||
return resp(env, start_response)
|
return resp(env, start_response)
|
||||||
return self._listing(env, start_response, self.obj)
|
return self._listing(env, start_response, self.obj)
|
||||||
@ -522,7 +524,7 @@ class StaticWeb(object):
|
|||||||
self.access_logger = get_logger(access_log_conf,
|
self.access_logger = get_logger(access_log_conf,
|
||||||
log_route='staticweb-access')
|
log_route='staticweb-access')
|
||||||
#: Indicates whether full HTTP headers should be logged or not.
|
#: Indicates whether full HTTP headers should be logged or not.
|
||||||
self.log_headers = conf.get('log_headers', 'no').lower() in TRUE_VALUES
|
self.log_headers = config_true_value(conf.get('log_headers', 'no'))
|
||||||
|
|
||||||
def __call__(self, env, start_response):
|
def __call__(self, env, start_response):
|
||||||
"""
|
"""
|
||||||
@ -547,7 +549,7 @@ class StaticWeb(object):
|
|||||||
if env['REQUEST_METHOD'] not in ('HEAD', 'GET'):
|
if env['REQUEST_METHOD'] not in ('HEAD', 'GET'):
|
||||||
return self.app(env, start_response)
|
return self.app(env, start_response)
|
||||||
if env.get('REMOTE_USER') and \
|
if env.get('REMOTE_USER') and \
|
||||||
env.get('HTTP_X_WEB_MODE', 'f').lower() not in TRUE_VALUES:
|
not config_true_value(env.get('HTTP_X_WEB_MODE', 'f')):
|
||||||
return self.app(env, start_response)
|
return self.app(env, start_response)
|
||||||
if not container:
|
if not container:
|
||||||
return self.app(env, start_response)
|
return self.app(env, start_response)
|
||||||
|
@ -28,7 +28,7 @@ from swift.common.swob import HTTPBadRequest, HTTPForbidden, HTTPNotFound, \
|
|||||||
|
|
||||||
from swift.common.middleware.acl import clean_acl, parse_acl, referrer_allowed
|
from swift.common.middleware.acl import clean_acl, parse_acl, referrer_allowed
|
||||||
from swift.common.utils import cache_from_env, get_logger, get_remote_client, \
|
from swift.common.utils import cache_from_env, get_logger, get_remote_client, \
|
||||||
split_path, TRUE_VALUES
|
split_path, config_true_value
|
||||||
from swift.common.http import HTTP_CLIENT_CLOSED_REQUEST
|
from swift.common.http import HTTP_CLIENT_CLOSED_REQUEST
|
||||||
|
|
||||||
|
|
||||||
@ -70,7 +70,7 @@ class TempAuth(object):
|
|||||||
self.app = app
|
self.app = app
|
||||||
self.conf = conf
|
self.conf = conf
|
||||||
self.logger = get_logger(conf, log_route='tempauth')
|
self.logger = get_logger(conf, log_route='tempauth')
|
||||||
self.log_headers = conf.get('log_headers', 'f').lower() in TRUE_VALUES
|
self.log_headers = config_true_value(conf.get('log_headers', 'f'))
|
||||||
self.reseller_prefix = conf.get('reseller_prefix', 'AUTH').strip()
|
self.reseller_prefix = conf.get('reseller_prefix', 'AUTH').strip()
|
||||||
if self.reseller_prefix and self.reseller_prefix[-1] != '_':
|
if self.reseller_prefix and self.reseller_prefix[-1] != '_':
|
||||||
self.reseller_prefix += '_'
|
self.reseller_prefix += '_'
|
||||||
@ -88,8 +88,8 @@ class TempAuth(object):
|
|||||||
h.strip()
|
h.strip()
|
||||||
for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
|
for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
|
||||||
if h.strip()]
|
if h.strip()]
|
||||||
self.allow_overrides = \
|
self.allow_overrides = config_true_value(
|
||||||
conf.get('allow_overrides', 't').lower() in TRUE_VALUES
|
conf.get('allow_overrides', 't'))
|
||||||
self.users = {}
|
self.users = {}
|
||||||
for conf_key in conf:
|
for conf_key in conf:
|
||||||
if conf_key.startswith('user_') or conf_key.startswith('user64_'):
|
if conf_key.startswith('user_') or conf_key.startswith('user64_'):
|
||||||
|
@ -85,6 +85,15 @@ if hash_conf.read('/etc/swift/swift.conf'):
|
|||||||
TRUE_VALUES = set(('true', '1', 'yes', 'on', 't', 'y'))
|
TRUE_VALUES = set(('true', '1', 'yes', 'on', 't', 'y'))
|
||||||
|
|
||||||
|
|
||||||
|
def config_true_value(value):
|
||||||
|
"""
|
||||||
|
Returns True if the value is either True or a string in TRUE_VALUES.
|
||||||
|
Returns False otherwise.
|
||||||
|
"""
|
||||||
|
return value is True or \
|
||||||
|
(isinstance(value, basestring) and value.lower() in TRUE_VALUES)
|
||||||
|
|
||||||
|
|
||||||
def noop_libc_function(*args):
|
def noop_libc_function(*args):
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
@ -31,7 +31,7 @@ from urllib import unquote
|
|||||||
|
|
||||||
from swift.common.swob import Request
|
from swift.common.swob import Request
|
||||||
from swift.common.utils import capture_stdio, disable_fallocate, \
|
from swift.common.utils import capture_stdio, disable_fallocate, \
|
||||||
drop_privileges, get_logger, NullLogger, TRUE_VALUES, \
|
drop_privileges, get_logger, NullLogger, config_true_value, \
|
||||||
validate_configuration
|
validate_configuration
|
||||||
|
|
||||||
|
|
||||||
@ -66,20 +66,20 @@ def get_socket(conf, default_port=8080):
|
|||||||
"""
|
"""
|
||||||
bind_addr = (conf.get('bind_ip', '0.0.0.0'),
|
bind_addr = (conf.get('bind_ip', '0.0.0.0'),
|
||||||
int(conf.get('bind_port', default_port)))
|
int(conf.get('bind_port', default_port)))
|
||||||
address_family = [addr[0] for addr in socket.getaddrinfo(bind_addr[0],
|
address_family = [addr[0] for addr in socket.getaddrinfo(
|
||||||
bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
|
bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
|
||||||
if addr[0] in (socket.AF_INET, socket.AF_INET6)][0]
|
if addr[0] in (socket.AF_INET, socket.AF_INET6)][0]
|
||||||
sock = None
|
sock = None
|
||||||
retry_until = time.time() + 30
|
retry_until = time.time() + 30
|
||||||
warn_ssl = False
|
warn_ssl = False
|
||||||
while not sock and time.time() < retry_until:
|
while not sock and time.time() < retry_until:
|
||||||
try:
|
try:
|
||||||
sock = listen(bind_addr, backlog=int(conf.get('backlog', 4096)),
|
sock = listen(bind_addr, backlog=int(conf.get('backlog', 4096)),
|
||||||
family=address_family)
|
family=address_family)
|
||||||
if 'cert_file' in conf:
|
if 'cert_file' in conf:
|
||||||
warn_ssl = True
|
warn_ssl = True
|
||||||
sock = ssl.wrap_socket(sock, certfile=conf['cert_file'],
|
sock = ssl.wrap_socket(sock, certfile=conf['cert_file'],
|
||||||
keyfile=conf['key_file'])
|
keyfile=conf['key_file'])
|
||||||
except socket.error, err:
|
except socket.error, err:
|
||||||
if err.args[0] != errno.EADDRINUSE:
|
if err.args[0] != errno.EADDRINUSE:
|
||||||
raise
|
raise
|
||||||
@ -124,10 +124,11 @@ def run_wsgi(conf_file, app_section, *args, **kwargs):
|
|||||||
logger = kwargs.pop('logger')
|
logger = kwargs.pop('logger')
|
||||||
else:
|
else:
|
||||||
logger = get_logger(conf, log_name,
|
logger = get_logger(conf, log_name,
|
||||||
log_to_console=kwargs.pop('verbose', False), log_route='wsgi')
|
log_to_console=kwargs.pop('verbose', False),
|
||||||
|
log_route='wsgi')
|
||||||
|
|
||||||
# disable fallocate if desired
|
# disable fallocate if desired
|
||||||
if conf.get('disable_fallocate', 'no').lower() in TRUE_VALUES:
|
if config_true_value(conf.get('disable_fallocate', 'no')):
|
||||||
disable_fallocate()
|
disable_fallocate()
|
||||||
|
|
||||||
# bind to address and port
|
# bind to address and port
|
||||||
|
@ -23,7 +23,7 @@ import swift.common.db
|
|||||||
from swift.container import server as container_server
|
from swift.container import server as container_server
|
||||||
from swift.common.db import ContainerBroker
|
from swift.common.db import ContainerBroker
|
||||||
from swift.common.utils import get_logger, audit_location_generator, \
|
from swift.common.utils import get_logger, audit_location_generator, \
|
||||||
TRUE_VALUES, dump_recon_cache
|
config_true_value, dump_recon_cache
|
||||||
from swift.common.daemon import Daemon
|
from swift.common.daemon import Daemon
|
||||||
|
|
||||||
|
|
||||||
@ -34,13 +34,12 @@ class ContainerAuditor(Daemon):
|
|||||||
self.conf = conf
|
self.conf = conf
|
||||||
self.logger = get_logger(conf, log_route='container-auditor')
|
self.logger = get_logger(conf, log_route='container-auditor')
|
||||||
self.devices = conf.get('devices', '/srv/node')
|
self.devices = conf.get('devices', '/srv/node')
|
||||||
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
|
||||||
('true', 't', '1', 'on', 'yes', 'y')
|
|
||||||
self.interval = int(conf.get('interval', 1800))
|
self.interval = int(conf.get('interval', 1800))
|
||||||
self.container_passes = 0
|
self.container_passes = 0
|
||||||
self.container_failures = 0
|
self.container_failures = 0
|
||||||
swift.common.db.DB_PREALLOCATION = \
|
swift.common.db.DB_PREALLOCATION = \
|
||||||
conf.get('db_preallocation', 'f').lower() in TRUE_VALUES
|
config_true_value(conf.get('db_preallocation', 'f'))
|
||||||
self.recon_cache_path = conf.get('recon_cache_path',
|
self.recon_cache_path = conf.get('recon_cache_path',
|
||||||
'/var/cache/swift')
|
'/var/cache/swift')
|
||||||
self.rcache = os.path.join(self.recon_cache_path, "container.recon")
|
self.rcache = os.path.join(self.recon_cache_path, "container.recon")
|
||||||
@ -59,12 +58,11 @@ class ContainerAuditor(Daemon):
|
|||||||
{'time': time.ctime(reported),
|
{'time': time.ctime(reported),
|
||||||
'pass': self.container_passes,
|
'pass': self.container_passes,
|
||||||
'fail': self.container_failures})
|
'fail': self.container_failures})
|
||||||
dump_recon_cache({'container_audits_since': reported,
|
dump_recon_cache(
|
||||||
'container_audits_passed':
|
{'container_audits_since': reported,
|
||||||
self.container_passes,
|
'container_audits_passed': self.container_passes,
|
||||||
'container_audits_failed':
|
'container_audits_failed': self.container_failures},
|
||||||
self.container_failures},
|
self.rcache, self.logger)
|
||||||
self.rcache, self.logger)
|
|
||||||
reported = time.time()
|
reported = time.time()
|
||||||
self.container_passes = 0
|
self.container_passes = 0
|
||||||
self.container_failures = 0
|
self.container_failures = 0
|
||||||
@ -99,7 +97,7 @@ class ContainerAuditor(Daemon):
|
|||||||
self.logger.info(
|
self.logger.info(
|
||||||
_('Container audit "once" mode completed: %.02fs'), elapsed)
|
_('Container audit "once" mode completed: %.02fs'), elapsed)
|
||||||
dump_recon_cache({'container_auditor_pass_completed': elapsed},
|
dump_recon_cache({'container_auditor_pass_completed': elapsed},
|
||||||
self.recon_container)
|
self.recon_container)
|
||||||
|
|
||||||
def container_audit(self, path):
|
def container_audit(self, path):
|
||||||
"""
|
"""
|
||||||
@ -121,5 +119,5 @@ class ContainerAuditor(Daemon):
|
|||||||
self.logger.increment('failures')
|
self.logger.increment('failures')
|
||||||
self.container_failures += 1
|
self.container_failures += 1
|
||||||
self.logger.exception(_('ERROR Could not get container info %s'),
|
self.logger.exception(_('ERROR Could not get container info %s'),
|
||||||
(broker.db_file))
|
broker.db_file)
|
||||||
self.logger.timing_since('timing', start_time)
|
self.logger.timing_since('timing', start_time)
|
||||||
|
@ -28,7 +28,7 @@ import swift.common.db
|
|||||||
from swift.common.db import ContainerBroker
|
from swift.common.db import ContainerBroker
|
||||||
from swift.common.utils import get_logger, get_param, hash_path, public, \
|
from swift.common.utils import get_logger, get_param, hash_path, public, \
|
||||||
normalize_timestamp, storage_directory, split_path, validate_sync_to, \
|
normalize_timestamp, storage_directory, split_path, validate_sync_to, \
|
||||||
TRUE_VALUES, validate_device_partition, json
|
config_true_value, validate_device_partition, json
|
||||||
from swift.common.constraints import CONTAINER_LISTING_LIMIT, \
|
from swift.common.constraints import CONTAINER_LISTING_LIMIT, \
|
||||||
check_mount, check_float, check_utf8, FORMAT2CONTENT_TYPE
|
check_mount, check_float, check_utf8, FORMAT2CONTENT_TYPE
|
||||||
from swift.common.bufferedhttp import http_connect
|
from swift.common.bufferedhttp import http_connect
|
||||||
@ -53,21 +53,22 @@ class ContainerController(object):
|
|||||||
def __init__(self, conf):
|
def __init__(self, conf):
|
||||||
self.logger = get_logger(conf, log_route='container-server')
|
self.logger = get_logger(conf, log_route='container-server')
|
||||||
self.root = conf.get('devices', '/srv/node/')
|
self.root = conf.get('devices', '/srv/node/')
|
||||||
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
|
||||||
TRUE_VALUES
|
|
||||||
self.node_timeout = int(conf.get('node_timeout', 3))
|
self.node_timeout = int(conf.get('node_timeout', 3))
|
||||||
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
|
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
|
||||||
self.allowed_sync_hosts = [h.strip()
|
self.allowed_sync_hosts = [
|
||||||
|
h.strip()
|
||||||
for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
|
for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
|
||||||
if h.strip()]
|
if h.strip()]
|
||||||
self.replicator_rpc = ReplicatorRpc(self.root, DATADIR,
|
self.replicator_rpc = ReplicatorRpc(
|
||||||
ContainerBroker, self.mount_check, logger=self.logger)
|
self.root, DATADIR, ContainerBroker, self.mount_check,
|
||||||
|
logger=self.logger)
|
||||||
self.auto_create_account_prefix = \
|
self.auto_create_account_prefix = \
|
||||||
conf.get('auto_create_account_prefix') or '.'
|
conf.get('auto_create_account_prefix') or '.'
|
||||||
if conf.get('allow_versions', 'f').lower() in TRUE_VALUES:
|
if config_true_value(conf.get('allow_versions', 'f')):
|
||||||
self.save_headers.append('x-versions-location')
|
self.save_headers.append('x-versions-location')
|
||||||
swift.common.db.DB_PREALLOCATION = \
|
swift.common.db.DB_PREALLOCATION = \
|
||||||
conf.get('db_preallocation', 'f').lower() in TRUE_VALUES
|
config_true_value(conf.get('db_preallocation', 'f'))
|
||||||
|
|
||||||
def _get_container_broker(self, drive, part, account, container):
|
def _get_container_broker(self, drive, part, account, container):
|
||||||
"""
|
"""
|
||||||
@ -103,7 +104,8 @@ class ContainerController(object):
|
|||||||
account_ip, account_port = account_host.rsplit(':', 1)
|
account_ip, account_port = account_host.rsplit(':', 1)
|
||||||
new_path = '/' + '/'.join([account, container])
|
new_path = '/' + '/'.join([account, container])
|
||||||
info = broker.get_info()
|
info = broker.get_info()
|
||||||
account_headers = {'x-put-timestamp': info['put_timestamp'],
|
account_headers = {
|
||||||
|
'x-put-timestamp': info['put_timestamp'],
|
||||||
'x-delete-timestamp': info['delete_timestamp'],
|
'x-delete-timestamp': info['delete_timestamp'],
|
||||||
'x-object-count': info['object_count'],
|
'x-object-count': info['object_count'],
|
||||||
'x-bytes-used': info['bytes_used'],
|
'x-bytes-used': info['bytes_used'],
|
||||||
@ -113,16 +115,17 @@ class ContainerController(object):
|
|||||||
account_headers['x-account-override-deleted'] = 'yes'
|
account_headers['x-account-override-deleted'] = 'yes'
|
||||||
try:
|
try:
|
||||||
with ConnectionTimeout(self.conn_timeout):
|
with ConnectionTimeout(self.conn_timeout):
|
||||||
conn = http_connect(account_ip, account_port,
|
conn = http_connect(
|
||||||
account_device, account_partition, 'PUT', new_path,
|
account_ip, account_port, account_device,
|
||||||
account_headers)
|
account_partition, 'PUT', new_path, account_headers)
|
||||||
with Timeout(self.node_timeout):
|
with Timeout(self.node_timeout):
|
||||||
account_response = conn.getresponse()
|
account_response = conn.getresponse()
|
||||||
account_response.read()
|
account_response.read()
|
||||||
if account_response.status == HTTP_NOT_FOUND:
|
if account_response.status == HTTP_NOT_FOUND:
|
||||||
return HTTPNotFound(request=req)
|
return HTTPNotFound(request=req)
|
||||||
elif not is_success(account_response.status):
|
elif not is_success(account_response.status):
|
||||||
self.logger.error(_('ERROR Account update failed '
|
self.logger.error(_(
|
||||||
|
'ERROR Account update failed '
|
||||||
'with %(ip)s:%(port)s/%(device)s (will retry '
|
'with %(ip)s:%(port)s/%(device)s (will retry '
|
||||||
'later): Response %(status)s %(reason)s'),
|
'later): Response %(status)s %(reason)s'),
|
||||||
{'ip': account_ip, 'port': account_port,
|
{'ip': account_ip, 'port': account_port,
|
||||||
@ -130,7 +133,8 @@ class ContainerController(object):
|
|||||||
'status': account_response.status,
|
'status': account_response.status,
|
||||||
'reason': account_response.reason})
|
'reason': account_response.reason})
|
||||||
except (Exception, Timeout):
|
except (Exception, Timeout):
|
||||||
self.logger.exception(_('ERROR account update failed with '
|
self.logger.exception(_(
|
||||||
|
'ERROR account update failed with '
|
||||||
'%(ip)s:%(port)s/%(device)s (will retry later)'),
|
'%(ip)s:%(port)s/%(device)s (will retry later)'),
|
||||||
{'ip': account_ip, 'port': account_port,
|
{'ip': account_ip, 'port': account_port,
|
||||||
'device': account_device})
|
'device': account_device})
|
||||||
@ -147,12 +151,12 @@ class ContainerController(object):
|
|||||||
except ValueError, err:
|
except ValueError, err:
|
||||||
self.logger.increment('DELETE.errors')
|
self.logger.increment('DELETE.errors')
|
||||||
return HTTPBadRequest(body=str(err), content_type='text/plain',
|
return HTTPBadRequest(body=str(err), content_type='text/plain',
|
||||||
request=req)
|
request=req)
|
||||||
if 'x-timestamp' not in req.headers or \
|
if 'x-timestamp' not in req.headers or \
|
||||||
not check_float(req.headers['x-timestamp']):
|
not check_float(req.headers['x-timestamp']):
|
||||||
self.logger.increment('DELETE.errors')
|
self.logger.increment('DELETE.errors')
|
||||||
return HTTPBadRequest(body='Missing timestamp', request=req,
|
return HTTPBadRequest(body='Missing timestamp', request=req,
|
||||||
content_type='text/plain')
|
content_type='text/plain')
|
||||||
if self.mount_check and not check_mount(self.root, drive):
|
if self.mount_check and not check_mount(self.root, drive):
|
||||||
self.logger.increment('DELETE.errors')
|
self.logger.increment('DELETE.errors')
|
||||||
return HTTPInsufficientStorage(drive=drive, request=req)
|
return HTTPInsufficientStorage(drive=drive, request=req)
|
||||||
@ -174,7 +178,7 @@ class ContainerController(object):
|
|||||||
self.logger.increment('DELETE.errors')
|
self.logger.increment('DELETE.errors')
|
||||||
return HTTPConflict(request=req)
|
return HTTPConflict(request=req)
|
||||||
existed = float(broker.get_info()['put_timestamp']) and \
|
existed = float(broker.get_info()['put_timestamp']) and \
|
||||||
not broker.is_deleted()
|
not broker.is_deleted()
|
||||||
broker.delete_db(req.headers['X-Timestamp'])
|
broker.delete_db(req.headers['X-Timestamp'])
|
||||||
if not broker.is_deleted():
|
if not broker.is_deleted():
|
||||||
self.logger.increment('DELETE.errors')
|
self.logger.increment('DELETE.errors')
|
||||||
@ -198,12 +202,12 @@ class ContainerController(object):
|
|||||||
except ValueError, err:
|
except ValueError, err:
|
||||||
self.logger.increment('PUT.errors')
|
self.logger.increment('PUT.errors')
|
||||||
return HTTPBadRequest(body=str(err), content_type='text/plain',
|
return HTTPBadRequest(body=str(err), content_type='text/plain',
|
||||||
request=req)
|
request=req)
|
||||||
if 'x-timestamp' not in req.headers or \
|
if 'x-timestamp' not in req.headers or \
|
||||||
not check_float(req.headers['x-timestamp']):
|
not check_float(req.headers['x-timestamp']):
|
||||||
self.logger.increment('PUT.errors')
|
self.logger.increment('PUT.errors')
|
||||||
return HTTPBadRequest(body='Missing timestamp', request=req,
|
return HTTPBadRequest(body='Missing timestamp', request=req,
|
||||||
content_type='text/plain')
|
content_type='text/plain')
|
||||||
if 'x-container-sync-to' in req.headers:
|
if 'x-container-sync-to' in req.headers:
|
||||||
err = validate_sync_to(req.headers['x-container-sync-to'],
|
err = validate_sync_to(req.headers['x-container-sync-to'],
|
||||||
self.allowed_sync_hosts)
|
self.allowed_sync_hosts)
|
||||||
@ -223,7 +227,8 @@ class ContainerController(object):
|
|||||||
self.logger.timing_since('PUT.timing', start_time)
|
self.logger.timing_since('PUT.timing', start_time)
|
||||||
return HTTPNotFound()
|
return HTTPNotFound()
|
||||||
broker.put_object(obj, timestamp, int(req.headers['x-size']),
|
broker.put_object(obj, timestamp, int(req.headers['x-size']),
|
||||||
req.headers['x-content-type'], req.headers['x-etag'])
|
req.headers['x-content-type'],
|
||||||
|
req.headers['x-etag'])
|
||||||
self.logger.timing_since('PUT.timing', start_time)
|
self.logger.timing_since('PUT.timing', start_time)
|
||||||
return HTTPCreated(request=req)
|
return HTTPCreated(request=req)
|
||||||
else: # put container
|
else: # put container
|
||||||
@ -237,10 +242,11 @@ class ContainerController(object):
|
|||||||
self.logger.increment('PUT.errors')
|
self.logger.increment('PUT.errors')
|
||||||
return HTTPConflict(request=req)
|
return HTTPConflict(request=req)
|
||||||
metadata = {}
|
metadata = {}
|
||||||
metadata.update((key, (value, timestamp))
|
metadata.update(
|
||||||
|
(key, (value, timestamp))
|
||||||
for key, value in req.headers.iteritems()
|
for key, value in req.headers.iteritems()
|
||||||
if key.lower() in self.save_headers or
|
if key.lower() in self.save_headers or
|
||||||
key.lower().startswith('x-container-meta-'))
|
key.lower().startswith('x-container-meta-'))
|
||||||
if metadata:
|
if metadata:
|
||||||
if 'X-Container-Sync-To' in metadata:
|
if 'X-Container-Sync-To' in metadata:
|
||||||
if 'X-Container-Sync-To' not in broker.metadata or \
|
if 'X-Container-Sync-To' not in broker.metadata or \
|
||||||
@ -268,7 +274,7 @@ class ContainerController(object):
|
|||||||
except ValueError, err:
|
except ValueError, err:
|
||||||
self.logger.increment('HEAD.errors')
|
self.logger.increment('HEAD.errors')
|
||||||
return HTTPBadRequest(body=str(err), content_type='text/plain',
|
return HTTPBadRequest(body=str(err), content_type='text/plain',
|
||||||
request=req)
|
request=req)
|
||||||
if self.mount_check and not check_mount(self.root, drive):
|
if self.mount_check and not check_mount(self.root, drive):
|
||||||
self.logger.increment('HEAD.errors')
|
self.logger.increment('HEAD.errors')
|
||||||
return HTTPInsufficientStorage(drive=drive, request=req)
|
return HTTPInsufficientStorage(drive=drive, request=req)
|
||||||
@ -285,7 +291,8 @@ class ContainerController(object):
|
|||||||
'X-Timestamp': info['created_at'],
|
'X-Timestamp': info['created_at'],
|
||||||
'X-PUT-Timestamp': info['put_timestamp'],
|
'X-PUT-Timestamp': info['put_timestamp'],
|
||||||
}
|
}
|
||||||
headers.update((key, value)
|
headers.update(
|
||||||
|
(key, value)
|
||||||
for key, (value, timestamp) in broker.metadata.iteritems()
|
for key, (value, timestamp) in broker.metadata.iteritems()
|
||||||
if value != '' and (key.lower() in self.save_headers or
|
if value != '' and (key.lower() in self.save_headers or
|
||||||
key.lower().startswith('x-container-meta-')))
|
key.lower().startswith('x-container-meta-')))
|
||||||
@ -303,7 +310,7 @@ class ContainerController(object):
|
|||||||
except ValueError, err:
|
except ValueError, err:
|
||||||
self.logger.increment('GET.errors')
|
self.logger.increment('GET.errors')
|
||||||
return HTTPBadRequest(body=str(err), content_type='text/plain',
|
return HTTPBadRequest(body=str(err), content_type='text/plain',
|
||||||
request=req)
|
request=req)
|
||||||
if self.mount_check and not check_mount(self.root, drive):
|
if self.mount_check and not check_mount(self.root, drive):
|
||||||
self.logger.increment('GET.errors')
|
self.logger.increment('GET.errors')
|
||||||
return HTTPInsufficientStorage(drive=drive, request=req)
|
return HTTPInsufficientStorage(drive=drive, request=req)
|
||||||
@ -320,7 +327,8 @@ class ContainerController(object):
|
|||||||
'X-Timestamp': info['created_at'],
|
'X-Timestamp': info['created_at'],
|
||||||
'X-PUT-Timestamp': info['put_timestamp'],
|
'X-PUT-Timestamp': info['put_timestamp'],
|
||||||
}
|
}
|
||||||
resp_headers.update((key, value)
|
resp_headers.update(
|
||||||
|
(key, value)
|
||||||
for key, (value, timestamp) in broker.metadata.iteritems()
|
for key, (value, timestamp) in broker.metadata.iteritems()
|
||||||
if value != '' and (key.lower() in self.save_headers or
|
if value != '' and (key.lower() in self.save_headers or
|
||||||
key.lower().startswith('x-container-meta-')))
|
key.lower().startswith('x-container-meta-')))
|
||||||
@ -338,7 +346,8 @@ class ContainerController(object):
|
|||||||
if given_limit and given_limit.isdigit():
|
if given_limit and given_limit.isdigit():
|
||||||
limit = int(given_limit)
|
limit = int(given_limit)
|
||||||
if limit > CONTAINER_LISTING_LIMIT:
|
if limit > CONTAINER_LISTING_LIMIT:
|
||||||
return HTTPPreconditionFailed(request=req,
|
return HTTPPreconditionFailed(
|
||||||
|
request=req,
|
||||||
body='Maximum limit is %d' % CONTAINER_LISTING_LIMIT)
|
body='Maximum limit is %d' % CONTAINER_LISTING_LIMIT)
|
||||||
query_format = get_param(req, 'format')
|
query_format = get_param(req, 'format')
|
||||||
except UnicodeDecodeError, err:
|
except UnicodeDecodeError, err:
|
||||||
@ -350,9 +359,9 @@ class ContainerController(object):
|
|||||||
FORMAT2CONTENT_TYPE['plain'])
|
FORMAT2CONTENT_TYPE['plain'])
|
||||||
try:
|
try:
|
||||||
out_content_type = req.accept.best_match(
|
out_content_type = req.accept.best_match(
|
||||||
['text/plain', 'application/json',
|
['text/plain', 'application/json', 'application/xml',
|
||||||
'application/xml', 'text/xml'],
|
'text/xml'],
|
||||||
default_match='text/plain')
|
default_match='text/plain')
|
||||||
except AssertionError, err:
|
except AssertionError, err:
|
||||||
self.logger.increment('GET.errors')
|
self.logger.increment('GET.errors')
|
||||||
return HTTPBadRequest(body='bad accept header: %s' % req.accept,
|
return HTTPBadRequest(body='bad accept header: %s' % req.accept,
|
||||||
@ -389,10 +398,11 @@ class ContainerController(object):
|
|||||||
'</subdir>' % (name, name))
|
'</subdir>' % (name, name))
|
||||||
else:
|
else:
|
||||||
content_type = saxutils.escape(content_type)
|
content_type = saxutils.escape(content_type)
|
||||||
xml_output.append('<object><name>%s</name><hash>%s</hash>'\
|
xml_output.append(
|
||||||
'<bytes>%d</bytes><content_type>%s</content_type>'\
|
'<object><name>%s</name><hash>%s</hash>'
|
||||||
'<last_modified>%s</last_modified></object>' % \
|
'<bytes>%d</bytes><content_type>%s</content_type>'
|
||||||
(name, etag, size, content_type, created_at))
|
'<last_modified>%s</last_modified></object>' %
|
||||||
|
(name, etag, size, content_type, created_at))
|
||||||
container_list = ''.join([
|
container_list = ''.join([
|
||||||
'<?xml version="1.0" encoding="UTF-8"?>\n',
|
'<?xml version="1.0" encoding="UTF-8"?>\n',
|
||||||
'<container name=%s>' % saxutils.quoteattr(container),
|
'<container name=%s>' % saxutils.quoteattr(container),
|
||||||
@ -421,7 +431,7 @@ class ContainerController(object):
|
|||||||
except ValueError, err:
|
except ValueError, err:
|
||||||
self.logger.increment('REPLICATE.errors')
|
self.logger.increment('REPLICATE.errors')
|
||||||
return HTTPBadRequest(body=str(err), content_type='text/plain',
|
return HTTPBadRequest(body=str(err), content_type='text/plain',
|
||||||
request=req)
|
request=req)
|
||||||
if self.mount_check and not check_mount(self.root, drive):
|
if self.mount_check and not check_mount(self.root, drive):
|
||||||
self.logger.increment('REPLICATE.errors')
|
self.logger.increment('REPLICATE.errors')
|
||||||
return HTTPInsufficientStorage(drive=drive, request=req)
|
return HTTPInsufficientStorage(drive=drive, request=req)
|
||||||
@ -450,7 +460,7 @@ class ContainerController(object):
|
|||||||
not check_float(req.headers['x-timestamp']):
|
not check_float(req.headers['x-timestamp']):
|
||||||
self.logger.increment('POST.errors')
|
self.logger.increment('POST.errors')
|
||||||
return HTTPBadRequest(body='Missing or bad timestamp',
|
return HTTPBadRequest(body='Missing or bad timestamp',
|
||||||
request=req, content_type='text/plain')
|
request=req, content_type='text/plain')
|
||||||
if 'x-container-sync-to' in req.headers:
|
if 'x-container-sync-to' in req.headers:
|
||||||
err = validate_sync_to(req.headers['x-container-sync-to'],
|
err = validate_sync_to(req.headers['x-container-sync-to'],
|
||||||
self.allowed_sync_hosts)
|
self.allowed_sync_hosts)
|
||||||
@ -466,10 +476,10 @@ class ContainerController(object):
|
|||||||
return HTTPNotFound(request=req)
|
return HTTPNotFound(request=req)
|
||||||
timestamp = normalize_timestamp(req.headers['x-timestamp'])
|
timestamp = normalize_timestamp(req.headers['x-timestamp'])
|
||||||
metadata = {}
|
metadata = {}
|
||||||
metadata.update((key, (value, timestamp))
|
metadata.update(
|
||||||
for key, value in req.headers.iteritems()
|
(key, (value, timestamp)) for key, value in req.headers.iteritems()
|
||||||
if key.lower() in self.save_headers or
|
if key.lower() in self.save_headers or
|
||||||
key.lower().startswith('x-container-meta-'))
|
key.lower().startswith('x-container-meta-'))
|
||||||
if metadata:
|
if metadata:
|
||||||
if 'X-Container-Sync-To' in metadata:
|
if 'X-Container-Sync-To' in metadata:
|
||||||
if 'X-Container-Sync-To' not in broker.metadata or \
|
if 'X-Container-Sync-To' not in broker.metadata or \
|
||||||
@ -497,8 +507,9 @@ class ContainerController(object):
|
|||||||
else:
|
else:
|
||||||
res = method(req)
|
res = method(req)
|
||||||
except (Exception, Timeout):
|
except (Exception, Timeout):
|
||||||
self.logger.exception(_('ERROR __call__ error with %(method)s'
|
self.logger.exception(_(
|
||||||
' %(path)s '), {'method': req.method, 'path': req.path})
|
'ERROR __call__ error with %(method)s %(path)s '),
|
||||||
|
{'method': req.method, 'path': req.path})
|
||||||
res = HTTPInternalServerError(body=traceback.format_exc())
|
res = HTTPInternalServerError(body=traceback.format_exc())
|
||||||
trans_time = '%.4f' % (time.time() - start_time)
|
trans_time = '%.4f' % (time.time() - start_time)
|
||||||
log_message = '%s - - [%s] "%s %s" %s %s "%s" "%s" "%s" %s' % (
|
log_message = '%s - - [%s] "%s %s" %s %s "%s" "%s" "%s" %s' % (
|
||||||
|
@ -27,7 +27,7 @@ from swift.common.direct_client import direct_get_object
|
|||||||
from swift.common.ring import Ring
|
from swift.common.ring import Ring
|
||||||
from swift.common.db import ContainerBroker
|
from swift.common.db import ContainerBroker
|
||||||
from swift.common.utils import audit_location_generator, get_logger, \
|
from swift.common.utils import audit_location_generator, get_logger, \
|
||||||
hash_path, TRUE_VALUES, validate_sync_to, whataremyips
|
hash_path, config_true_value, validate_sync_to, whataremyips
|
||||||
from swift.common.daemon import Daemon
|
from swift.common.daemon import Daemon
|
||||||
from swift.common.http import HTTP_UNAUTHORIZED, HTTP_NOT_FOUND
|
from swift.common.http import HTTP_UNAUTHORIZED, HTTP_NOT_FOUND
|
||||||
|
|
||||||
@ -144,8 +144,7 @@ class ContainerSync(Daemon):
|
|||||||
self.devices = conf.get('devices', '/srv/node')
|
self.devices = conf.get('devices', '/srv/node')
|
||||||
#: Indicates whether mount points should be verified as actual mount
|
#: Indicates whether mount points should be verified as actual mount
|
||||||
#: points (normally true, false for tests and SAIO).
|
#: points (normally true, false for tests and SAIO).
|
||||||
self.mount_check = \
|
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
|
||||||
conf.get('mount_check', 'true').lower() in TRUE_VALUES
|
|
||||||
#: Minimum time between full scans. This is to keep the daemon from
|
#: Minimum time between full scans. This is to keep the daemon from
|
||||||
#: running wild on near empty systems.
|
#: running wild on near empty systems.
|
||||||
self.interval = int(conf.get('interval', 300))
|
self.interval = int(conf.get('interval', 300))
|
||||||
@ -154,7 +153,8 @@ class ContainerSync(Daemon):
|
|||||||
#: it'll just be resumed next scan.
|
#: it'll just be resumed next scan.
|
||||||
self.container_time = int(conf.get('container_time', 60))
|
self.container_time = int(conf.get('container_time', 60))
|
||||||
#: The list of hosts we're allowed to send syncs to.
|
#: The list of hosts we're allowed to send syncs to.
|
||||||
self.allowed_sync_hosts = [h.strip()
|
self.allowed_sync_hosts = [
|
||||||
|
h.strip()
|
||||||
for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
|
for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
|
||||||
if h.strip()]
|
if h.strip()]
|
||||||
self.proxy = conf.get('sync_proxy')
|
self.proxy = conf.get('sync_proxy')
|
||||||
@ -174,13 +174,13 @@ class ContainerSync(Daemon):
|
|||||||
swift_dir = conf.get('swift_dir', '/etc/swift')
|
swift_dir = conf.get('swift_dir', '/etc/swift')
|
||||||
#: swift.common.ring.Ring for locating containers.
|
#: swift.common.ring.Ring for locating containers.
|
||||||
self.container_ring = container_ring or Ring(swift_dir,
|
self.container_ring = container_ring or Ring(swift_dir,
|
||||||
ring_name='container')
|
ring_name='container')
|
||||||
#: swift.common.ring.Ring for locating objects.
|
#: swift.common.ring.Ring for locating objects.
|
||||||
self.object_ring = object_ring or Ring(swift_dir, ring_name='object')
|
self.object_ring = object_ring or Ring(swift_dir, ring_name='object')
|
||||||
self._myips = whataremyips()
|
self._myips = whataremyips()
|
||||||
self._myport = int(conf.get('bind_port', 6001))
|
self._myport = int(conf.get('bind_port', 6001))
|
||||||
swift.common.db.DB_PREALLOCATION = \
|
swift.common.db.DB_PREALLOCATION = \
|
||||||
conf.get('db_preallocation', 'f').lower() in TRUE_VALUES
|
config_true_value(conf.get('db_preallocation', 'f'))
|
||||||
|
|
||||||
def run_forever(self):
|
def run_forever(self):
|
||||||
"""
|
"""
|
||||||
@ -351,9 +351,9 @@ class ContainerSync(Daemon):
|
|||||||
if row['deleted']:
|
if row['deleted']:
|
||||||
try:
|
try:
|
||||||
delete_object(sync_to, name=row['name'],
|
delete_object(sync_to, name=row['name'],
|
||||||
headers={'x-timestamp': row['created_at'],
|
headers={'x-timestamp': row['created_at'],
|
||||||
'x-container-sync-key': sync_key},
|
'x-container-sync-key': sync_key},
|
||||||
proxy=self.proxy)
|
proxy=self.proxy)
|
||||||
except ClientException, err:
|
except ClientException, err:
|
||||||
if err.http_status != HTTP_NOT_FOUND:
|
if err.http_status != HTTP_NOT_FOUND:
|
||||||
raise
|
raise
|
||||||
@ -371,8 +371,8 @@ class ContainerSync(Daemon):
|
|||||||
headers = body = None
|
headers = body = None
|
||||||
for node in nodes:
|
for node in nodes:
|
||||||
try:
|
try:
|
||||||
these_headers, this_body = direct_get_object(node,
|
these_headers, this_body = direct_get_object(
|
||||||
part, info['account'], info['container'],
|
node, part, info['account'], info['container'],
|
||||||
row['name'], resp_chunk_size=65536)
|
row['name'], resp_chunk_size=65536)
|
||||||
this_timestamp = float(these_headers['x-timestamp'])
|
this_timestamp = float(these_headers['x-timestamp'])
|
||||||
if this_timestamp > timestamp:
|
if this_timestamp > timestamp:
|
||||||
@ -389,8 +389,9 @@ class ContainerSync(Daemon):
|
|||||||
if timestamp < looking_for_timestamp:
|
if timestamp < looking_for_timestamp:
|
||||||
if exc:
|
if exc:
|
||||||
raise exc
|
raise exc
|
||||||
raise Exception(_('Unknown exception trying to GET: '
|
raise Exception(
|
||||||
'%(node)r %(account)r %(container)r %(object)r'),
|
_('Unknown exception trying to GET: %(node)r '
|
||||||
|
'%(account)r %(container)r %(object)r'),
|
||||||
{'node': node, 'part': part,
|
{'node': node, 'part': part,
|
||||||
'account': info['account'],
|
'account': info['account'],
|
||||||
'container': info['container'],
|
'container': info['container'],
|
||||||
@ -403,20 +404,21 @@ class ContainerSync(Daemon):
|
|||||||
headers['x-timestamp'] = row['created_at']
|
headers['x-timestamp'] = row['created_at']
|
||||||
headers['x-container-sync-key'] = sync_key
|
headers['x-container-sync-key'] = sync_key
|
||||||
put_object(sync_to, name=row['name'], headers=headers,
|
put_object(sync_to, name=row['name'], headers=headers,
|
||||||
contents=_Iter2FileLikeObject(body), proxy=self.proxy)
|
contents=_Iter2FileLikeObject(body),
|
||||||
|
proxy=self.proxy)
|
||||||
self.container_puts += 1
|
self.container_puts += 1
|
||||||
self.logger.increment('puts')
|
self.logger.increment('puts')
|
||||||
self.logger.timing_since('puts.timing', start_time)
|
self.logger.timing_since('puts.timing', start_time)
|
||||||
except ClientException, err:
|
except ClientException, err:
|
||||||
if err.http_status == HTTP_UNAUTHORIZED:
|
if err.http_status == HTTP_UNAUTHORIZED:
|
||||||
self.logger.info(_('Unauth %(sync_from)r '
|
self.logger.info(
|
||||||
'=> %(sync_to)r'),
|
_('Unauth %(sync_from)r => %(sync_to)r'),
|
||||||
{'sync_from': '%s/%s' %
|
{'sync_from': '%s/%s' %
|
||||||
(quote(info['account']), quote(info['container'])),
|
(quote(info['account']), quote(info['container'])),
|
||||||
'sync_to': sync_to})
|
'sync_to': sync_to})
|
||||||
elif err.http_status == HTTP_NOT_FOUND:
|
elif err.http_status == HTTP_NOT_FOUND:
|
||||||
self.logger.info(_('Not found %(sync_from)r '
|
self.logger.info(
|
||||||
'=> %(sync_to)r'),
|
_('Not found %(sync_from)r => %(sync_to)r'),
|
||||||
{'sync_from': '%s/%s' %
|
{'sync_from': '%s/%s' %
|
||||||
(quote(info['account']), quote(info['container'])),
|
(quote(info['account']), quote(info['container'])),
|
||||||
'sync_to': sync_to})
|
'sync_to': sync_to})
|
||||||
|
@ -29,7 +29,7 @@ from swift.common.bufferedhttp import http_connect
|
|||||||
from swift.common.db import ContainerBroker
|
from swift.common.db import ContainerBroker
|
||||||
from swift.common.exceptions import ConnectionTimeout
|
from swift.common.exceptions import ConnectionTimeout
|
||||||
from swift.common.ring import Ring
|
from swift.common.ring import Ring
|
||||||
from swift.common.utils import get_logger, TRUE_VALUES, dump_recon_cache
|
from swift.common.utils import get_logger, config_true_value, dump_recon_cache
|
||||||
from swift.common.daemon import Daemon
|
from swift.common.daemon import Daemon
|
||||||
from swift.common.http import is_success, HTTP_INTERNAL_SERVER_ERROR
|
from swift.common.http import is_success, HTTP_INTERNAL_SERVER_ERROR
|
||||||
|
|
||||||
@ -41,8 +41,7 @@ class ContainerUpdater(Daemon):
|
|||||||
self.conf = conf
|
self.conf = conf
|
||||||
self.logger = get_logger(conf, log_route='container-updater')
|
self.logger = get_logger(conf, log_route='container-updater')
|
||||||
self.devices = conf.get('devices', '/srv/node')
|
self.devices = conf.get('devices', '/srv/node')
|
||||||
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
|
||||||
('true', 't', '1', 'on', 'yes', 'y')
|
|
||||||
self.swift_dir = conf.get('swift_dir', '/etc/swift')
|
self.swift_dir = conf.get('swift_dir', '/etc/swift')
|
||||||
self.interval = int(conf.get('interval', 300))
|
self.interval = int(conf.get('interval', 300))
|
||||||
self.account_ring = None
|
self.account_ring = None
|
||||||
@ -58,7 +57,7 @@ class ContainerUpdater(Daemon):
|
|||||||
float(conf.get('account_suppression_time', 60))
|
float(conf.get('account_suppression_time', 60))
|
||||||
self.new_account_suppressions = None
|
self.new_account_suppressions = None
|
||||||
swift.common.db.DB_PREALLOCATION = \
|
swift.common.db.DB_PREALLOCATION = \
|
||||||
conf.get('db_preallocation', 'f').lower() in TRUE_VALUES
|
config_true_value(conf.get('db_preallocation', 'f'))
|
||||||
self.recon_cache_path = conf.get('recon_cache_path',
|
self.recon_cache_path = conf.get('recon_cache_path',
|
||||||
'/var/cache/swift')
|
'/var/cache/swift')
|
||||||
self.rcache = os.path.join(self.recon_cache_path, "container.recon")
|
self.rcache = os.path.join(self.recon_cache_path, "container.recon")
|
||||||
@ -112,7 +111,8 @@ class ContainerUpdater(Daemon):
|
|||||||
begin = time.time()
|
begin = time.time()
|
||||||
now = time.time()
|
now = time.time()
|
||||||
expired_suppressions = \
|
expired_suppressions = \
|
||||||
[a for a, u in self.account_suppressions.iteritems() if u < now]
|
[a for a, u in self.account_suppressions.iteritems()
|
||||||
|
if u < now]
|
||||||
for account in expired_suppressions:
|
for account in expired_suppressions:
|
||||||
del self.account_suppressions[account]
|
del self.account_suppressions[account]
|
||||||
pid2filename = {}
|
pid2filename = {}
|
||||||
@ -175,7 +175,8 @@ class ContainerUpdater(Daemon):
|
|||||||
for path in self.get_paths():
|
for path in self.get_paths():
|
||||||
self.container_sweep(path)
|
self.container_sweep(path)
|
||||||
elapsed = time.time() - begin
|
elapsed = time.time() - begin
|
||||||
self.logger.info(_('Container update single threaded sweep completed: '
|
self.logger.info(_(
|
||||||
|
'Container update single threaded sweep completed: '
|
||||||
'%(elapsed).02fs, %(success)s successes, %(fail)s failures, '
|
'%(elapsed).02fs, %(success)s successes, %(fail)s failures, '
|
||||||
'%(no_change)s with no changes'),
|
'%(no_change)s with no changes'),
|
||||||
{'elapsed': elapsed, 'success': self.successes,
|
{'elapsed': elapsed, 'success': self.successes,
|
||||||
@ -277,7 +278,8 @@ class ContainerUpdater(Daemon):
|
|||||||
'X-Bytes-Used': bytes,
|
'X-Bytes-Used': bytes,
|
||||||
'X-Account-Override-Deleted': 'yes'})
|
'X-Account-Override-Deleted': 'yes'})
|
||||||
except (Exception, Timeout):
|
except (Exception, Timeout):
|
||||||
self.logger.exception(_('ERROR account update failed with '
|
self.logger.exception(_(
|
||||||
|
'ERROR account update failed with '
|
||||||
'%(ip)s:%(port)s/%(device)s (will retry later): '), node)
|
'%(ip)s:%(port)s/%(device)s (will retry later): '), node)
|
||||||
return HTTP_INTERNAL_SERVER_ERROR
|
return HTTP_INTERNAL_SERVER_ERROR
|
||||||
with Timeout(self.node_timeout):
|
with Timeout(self.node_timeout):
|
||||||
|
@ -20,7 +20,7 @@ from eventlet import Timeout
|
|||||||
|
|
||||||
from swift.obj import server as object_server
|
from swift.obj import server as object_server
|
||||||
from swift.common.utils import get_logger, audit_location_generator, \
|
from swift.common.utils import get_logger, audit_location_generator, \
|
||||||
ratelimit_sleep, TRUE_VALUES, dump_recon_cache
|
ratelimit_sleep, config_true_value, dump_recon_cache
|
||||||
from swift.common.exceptions import AuditException, DiskFileError, \
|
from swift.common.exceptions import AuditException, DiskFileError, \
|
||||||
DiskFileNotExist
|
DiskFileNotExist
|
||||||
from swift.common.daemon import Daemon
|
from swift.common.daemon import Daemon
|
||||||
@ -35,8 +35,7 @@ class AuditorWorker(object):
|
|||||||
self.conf = conf
|
self.conf = conf
|
||||||
self.logger = get_logger(conf, log_route='object-auditor')
|
self.logger = get_logger(conf, log_route='object-auditor')
|
||||||
self.devices = conf.get('devices', '/srv/node')
|
self.devices = conf.get('devices', '/srv/node')
|
||||||
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
|
||||||
TRUE_VALUES
|
|
||||||
self.max_files_per_second = float(conf.get('files_per_second', 20))
|
self.max_files_per_second = float(conf.get('files_per_second', 20))
|
||||||
self.max_bytes_per_second = float(conf.get('bytes_per_second',
|
self.max_bytes_per_second = float(conf.get('bytes_per_second',
|
||||||
10000000))
|
10000000))
|
||||||
|
@ -33,7 +33,7 @@ from eventlet.support.greenlets import GreenletExit
|
|||||||
from swift.common.ring import Ring
|
from swift.common.ring import Ring
|
||||||
from swift.common.utils import whataremyips, unlink_older_than, lock_path, \
|
from swift.common.utils import whataremyips, unlink_older_than, lock_path, \
|
||||||
compute_eta, get_logger, write_pickle, renamer, dump_recon_cache, \
|
compute_eta, get_logger, write_pickle, renamer, dump_recon_cache, \
|
||||||
rsync_ip, mkdirs, TRUE_VALUES, list_from_csv
|
rsync_ip, mkdirs, config_true_value, list_from_csv
|
||||||
from swift.common.bufferedhttp import http_connect
|
from swift.common.bufferedhttp import http_connect
|
||||||
from swift.common.daemon import Daemon
|
from swift.common.daemon import Daemon
|
||||||
from swift.common.http import HTTP_OK, HTTP_INSUFFICIENT_STORAGE
|
from swift.common.http import HTTP_OK, HTTP_INSUFFICIENT_STORAGE
|
||||||
@ -246,10 +246,8 @@ class ObjectReplicator(Daemon):
|
|||||||
self.conf = conf
|
self.conf = conf
|
||||||
self.logger = get_logger(conf, log_route='object-replicator')
|
self.logger = get_logger(conf, log_route='object-replicator')
|
||||||
self.devices_dir = conf.get('devices', '/srv/node')
|
self.devices_dir = conf.get('devices', '/srv/node')
|
||||||
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
|
||||||
TRUE_VALUES
|
self.vm_test_mode = config_true_value(conf.get('vm_test_mode', 'no'))
|
||||||
self.vm_test_mode = conf.get(
|
|
||||||
'vm_test_mode', 'no').lower() in TRUE_VALUES
|
|
||||||
self.swift_dir = conf.get('swift_dir', '/etc/swift')
|
self.swift_dir = conf.get('swift_dir', '/etc/swift')
|
||||||
self.port = int(conf.get('bind_port', 6000))
|
self.port = int(conf.get('bind_port', 6000))
|
||||||
self.concurrency = int(conf.get('concurrency', 1))
|
self.concurrency = int(conf.get('concurrency', 1))
|
||||||
|
@ -33,7 +33,7 @@ from eventlet import sleep, Timeout, tpool
|
|||||||
from swift.common.utils import mkdirs, normalize_timestamp, public, \
|
from swift.common.utils import mkdirs, normalize_timestamp, public, \
|
||||||
storage_directory, hash_path, renamer, fallocate, fsync, \
|
storage_directory, hash_path, renamer, fallocate, fsync, \
|
||||||
split_path, drop_buffer_cache, get_logger, write_pickle, \
|
split_path, drop_buffer_cache, get_logger, write_pickle, \
|
||||||
TRUE_VALUES, validate_device_partition
|
config_true_value, validate_device_partition
|
||||||
from swift.common.bufferedhttp import http_connect
|
from swift.common.bufferedhttp import http_connect
|
||||||
from swift.common.constraints import check_object_creation, check_mount, \
|
from swift.common.constraints import check_object_creation, check_mount, \
|
||||||
check_float, check_utf8
|
check_float, check_utf8
|
||||||
@ -114,8 +114,8 @@ class DiskFile(object):
|
|||||||
self.iter_hook = iter_hook
|
self.iter_hook = iter_hook
|
||||||
self.name = '/' + '/'.join((account, container, obj))
|
self.name = '/' + '/'.join((account, container, obj))
|
||||||
name_hash = hash_path(account, container, obj)
|
name_hash = hash_path(account, container, obj)
|
||||||
self.datadir = os.path.join(path, device,
|
self.datadir = os.path.join(
|
||||||
storage_directory(DATADIR, partition, name_hash))
|
path, device, storage_directory(DATADIR, partition, name_hash))
|
||||||
self.device_path = os.path.join(path, device)
|
self.device_path = os.path.join(path, device)
|
||||||
self.tmpdir = os.path.join(path, device, 'tmp')
|
self.tmpdir = os.path.join(path, device, 'tmp')
|
||||||
self.logger = logger
|
self.logger = logger
|
||||||
@ -172,7 +172,7 @@ class DiskFile(object):
|
|||||||
read += len(chunk)
|
read += len(chunk)
|
||||||
if read - dropped_cache > (1024 * 1024):
|
if read - dropped_cache > (1024 * 1024):
|
||||||
self.drop_cache(self.fp.fileno(), dropped_cache,
|
self.drop_cache(self.fp.fileno(), dropped_cache,
|
||||||
read - dropped_cache)
|
read - dropped_cache)
|
||||||
dropped_cache = read
|
dropped_cache = read
|
||||||
yield chunk
|
yield chunk
|
||||||
if self.iter_hook:
|
if self.iter_hook:
|
||||||
@ -180,7 +180,7 @@ class DiskFile(object):
|
|||||||
else:
|
else:
|
||||||
self.read_to_eof = True
|
self.read_to_eof = True
|
||||||
self.drop_cache(self.fp.fileno(), dropped_cache,
|
self.drop_cache(self.fp.fileno(), dropped_cache,
|
||||||
read - dropped_cache)
|
read - dropped_cache)
|
||||||
break
|
break
|
||||||
finally:
|
finally:
|
||||||
self.close()
|
self.close()
|
||||||
@ -212,10 +212,10 @@ class DiskFile(object):
|
|||||||
except DiskFileNotExist:
|
except DiskFileNotExist:
|
||||||
return
|
return
|
||||||
|
|
||||||
if (self.iter_etag and self.started_at_0 and self.read_to_eof and
|
if self.iter_etag and self.started_at_0 and self.read_to_eof and \
|
||||||
'ETag' in self.metadata and
|
'ETag' in self.metadata and \
|
||||||
self.iter_etag.hexdigest() != self.metadata.get('ETag')):
|
self.iter_etag.hexdigest() != self.metadata.get('ETag'):
|
||||||
self.quarantine()
|
self.quarantine()
|
||||||
|
|
||||||
def close(self, verify_file=True):
|
def close(self, verify_file=True):
|
||||||
"""
|
"""
|
||||||
@ -229,10 +229,11 @@ class DiskFile(object):
|
|||||||
if verify_file:
|
if verify_file:
|
||||||
self._handle_close_quarantine()
|
self._handle_close_quarantine()
|
||||||
except (Exception, Timeout), e:
|
except (Exception, Timeout), e:
|
||||||
self.logger.error(_('ERROR DiskFile %(data_file)s in '
|
self.logger.error(_(
|
||||||
'%(data_dir)s close failure: %(exc)s : %(stack)'),
|
'ERROR DiskFile %(data_file)s in '
|
||||||
{'exc': e, 'stack': ''.join(traceback.format_stack()),
|
'%(data_dir)s close failure: %(exc)s : %(stack)'),
|
||||||
'data_file': self.data_file, 'data_dir': self.datadir})
|
{'exc': e, 'stack': ''.join(traceback.format_stack()),
|
||||||
|
'data_file': self.data_file, 'data_dir': self.datadir})
|
||||||
finally:
|
finally:
|
||||||
self.fp.close()
|
self.fp.close()
|
||||||
self.fp = None
|
self.fp = None
|
||||||
@ -337,8 +338,9 @@ class DiskFile(object):
|
|||||||
if 'Content-Length' in self.metadata:
|
if 'Content-Length' in self.metadata:
|
||||||
metadata_size = int(self.metadata['Content-Length'])
|
metadata_size = int(self.metadata['Content-Length'])
|
||||||
if file_size != metadata_size:
|
if file_size != metadata_size:
|
||||||
raise DiskFileError('Content-Length of %s does not '
|
raise DiskFileError(
|
||||||
'match file size of %s' % (metadata_size, file_size))
|
'Content-Length of %s does not match file size '
|
||||||
|
'of %s' % (metadata_size, file_size))
|
||||||
return file_size
|
return file_size
|
||||||
except OSError, err:
|
except OSError, err:
|
||||||
if err.errno != errno.ENOENT:
|
if err.errno != errno.ENOENT:
|
||||||
@ -358,17 +360,15 @@ class ObjectController(object):
|
|||||||
"""
|
"""
|
||||||
self.logger = get_logger(conf, log_route='object-server')
|
self.logger = get_logger(conf, log_route='object-server')
|
||||||
self.devices = conf.get('devices', '/srv/node/')
|
self.devices = conf.get('devices', '/srv/node/')
|
||||||
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
|
||||||
TRUE_VALUES
|
|
||||||
self.node_timeout = int(conf.get('node_timeout', 3))
|
self.node_timeout = int(conf.get('node_timeout', 3))
|
||||||
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
|
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
|
||||||
self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
|
self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
|
||||||
self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
|
self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
|
||||||
self.keep_cache_size = int(conf.get('keep_cache_size', 5242880))
|
self.keep_cache_size = int(conf.get('keep_cache_size', 5242880))
|
||||||
self.keep_cache_private = \
|
self.keep_cache_private = \
|
||||||
conf.get('keep_cache_private', 'false').lower() in TRUE_VALUES
|
config_true_value(conf.get('keep_cache_private', 'false'))
|
||||||
self.log_requests = \
|
self.log_requests = config_true_value(conf.get('log_requests', 'true'))
|
||||||
conf.get('log_requests', 'true').lower() in TRUE_VALUES
|
|
||||||
self.max_upload_time = int(conf.get('max_upload_time', 86400))
|
self.max_upload_time = int(conf.get('max_upload_time', 86400))
|
||||||
self.slow = int(conf.get('slow', 0))
|
self.slow = int(conf.get('slow', 0))
|
||||||
self.bytes_per_sync = int(conf.get('mb_per_sync', 512)) * 1024 * 1024
|
self.bytes_per_sync = int(conf.get('mb_per_sync', 512)) * 1024 * 1024
|
||||||
@ -378,10 +378,10 @@ class ObjectController(object):
|
|||||||
x-delete-at,
|
x-delete-at,
|
||||||
x-object-manifest,
|
x-object-manifest,
|
||||||
'''
|
'''
|
||||||
self.allowed_headers = set(i.strip().lower() for i in
|
self.allowed_headers = set(
|
||||||
conf.get('allowed_headers',
|
i.strip().lower() for i in
|
||||||
default_allowed_headers).split(',') if i.strip() and
|
conf.get('allowed_headers', default_allowed_headers).split(',')
|
||||||
i.strip().lower() not in DISALLOWED_HEADERS)
|
if i.strip() and i.strip().lower() not in DISALLOWED_HEADERS)
|
||||||
self.expiring_objects_account = \
|
self.expiring_objects_account = \
|
||||||
(conf.get('auto_create_account_prefix') or '.') + \
|
(conf.get('auto_create_account_prefix') or '.') + \
|
||||||
'expiring_objects'
|
'expiring_objects'
|
||||||
@ -410,20 +410,22 @@ class ObjectController(object):
|
|||||||
with ConnectionTimeout(self.conn_timeout):
|
with ConnectionTimeout(self.conn_timeout):
|
||||||
ip, port = host.rsplit(':', 1)
|
ip, port = host.rsplit(':', 1)
|
||||||
conn = http_connect(ip, port, contdevice, partition, op,
|
conn = http_connect(ip, port, contdevice, partition, op,
|
||||||
full_path, headers_out)
|
full_path, headers_out)
|
||||||
with Timeout(self.node_timeout):
|
with Timeout(self.node_timeout):
|
||||||
response = conn.getresponse()
|
response = conn.getresponse()
|
||||||
response.read()
|
response.read()
|
||||||
if is_success(response.status):
|
if is_success(response.status):
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
self.logger.error(_('ERROR Container update failed '
|
self.logger.error(_(
|
||||||
|
'ERROR Container update failed '
|
||||||
'(saving for async update later): %(status)d '
|
'(saving for async update later): %(status)d '
|
||||||
'response from %(ip)s:%(port)s/%(dev)s'),
|
'response from %(ip)s:%(port)s/%(dev)s'),
|
||||||
{'status': response.status, 'ip': ip, 'port': port,
|
{'status': response.status, 'ip': ip, 'port': port,
|
||||||
'dev': contdevice})
|
'dev': contdevice})
|
||||||
except (Exception, Timeout):
|
except (Exception, Timeout):
|
||||||
self.logger.exception(_('ERROR container update failed with '
|
self.logger.exception(_(
|
||||||
|
'ERROR container update failed with '
|
||||||
'%(ip)s:%(port)s/%(dev)s (saving for async update later)'),
|
'%(ip)s:%(port)s/%(dev)s (saving for async update later)'),
|
||||||
{'ip': ip, 'port': port, 'dev': contdevice})
|
{'ip': ip, 'port': port, 'dev': contdevice})
|
||||||
async_dir = os.path.join(self.devices, objdevice, ASYNCDIR)
|
async_dir = os.path.join(self.devices, objdevice, ASYNCDIR)
|
||||||
@ -431,9 +433,9 @@ class ObjectController(object):
|
|||||||
self.logger.increment('async_pendings')
|
self.logger.increment('async_pendings')
|
||||||
write_pickle(
|
write_pickle(
|
||||||
{'op': op, 'account': account, 'container': container,
|
{'op': op, 'account': account, 'container': container,
|
||||||
'obj': obj, 'headers': headers_out},
|
'obj': obj, 'headers': headers_out},
|
||||||
os.path.join(async_dir, ohash[-3:], ohash + '-' +
|
os.path.join(async_dir, ohash[-3:], ohash + '-' +
|
||||||
normalize_timestamp(headers_out['x-timestamp'])),
|
normalize_timestamp(headers_out['x-timestamp'])),
|
||||||
os.path.join(self.devices, objdevice, 'tmp'))
|
os.path.join(self.devices, objdevice, 'tmp'))
|
||||||
|
|
||||||
def container_update(self, op, account, container, obj, headers_in,
|
def container_update(self, op, account, container, obj, headers_in,
|
||||||
@ -484,7 +486,8 @@ class ObjectController(object):
|
|||||||
headers_out['x-size'] = '0'
|
headers_out['x-size'] = '0'
|
||||||
headers_out['x-content-type'] = 'text/plain'
|
headers_out['x-content-type'] = 'text/plain'
|
||||||
headers_out['x-etag'] = 'd41d8cd98f00b204e9800998ecf8427e'
|
headers_out['x-etag'] = 'd41d8cd98f00b204e9800998ecf8427e'
|
||||||
self.async_update(op, self.expiring_objects_account,
|
self.async_update(
|
||||||
|
op, self.expiring_objects_account,
|
||||||
str(delete_at / self.expiring_objects_container_divisor *
|
str(delete_at / self.expiring_objects_container_divisor *
|
||||||
self.expiring_objects_container_divisor),
|
self.expiring_objects_container_divisor),
|
||||||
'%s-%s/%s/%s' % (delete_at, account, container, obj),
|
'%s-%s/%s/%s' % (delete_at, account, container, obj),
|
||||||
@ -501,12 +504,12 @@ class ObjectController(object):
|
|||||||
except ValueError, err:
|
except ValueError, err:
|
||||||
self.logger.increment('POST.errors')
|
self.logger.increment('POST.errors')
|
||||||
return HTTPBadRequest(body=str(err), request=request,
|
return HTTPBadRequest(body=str(err), request=request,
|
||||||
content_type='text/plain')
|
content_type='text/plain')
|
||||||
if 'x-timestamp' not in request.headers or \
|
if 'x-timestamp' not in request.headers or \
|
||||||
not check_float(request.headers['x-timestamp']):
|
not check_float(request.headers['x-timestamp']):
|
||||||
self.logger.increment('POST.errors')
|
self.logger.increment('POST.errors')
|
||||||
return HTTPBadRequest(body='Missing timestamp', request=request,
|
return HTTPBadRequest(body='Missing timestamp', request=request,
|
||||||
content_type='text/plain')
|
content_type='text/plain')
|
||||||
new_delete_at = int(request.headers.get('X-Delete-At') or 0)
|
new_delete_at = int(request.headers.get('X-Delete-At') or 0)
|
||||||
if new_delete_at and new_delete_at < time.time():
|
if new_delete_at and new_delete_at < time.time():
|
||||||
self.logger.increment('POST.errors')
|
self.logger.increment('POST.errors')
|
||||||
@ -533,7 +536,7 @@ class ObjectController(object):
|
|||||||
return HTTPNotFound(request=request)
|
return HTTPNotFound(request=request)
|
||||||
metadata = {'X-Timestamp': request.headers['x-timestamp']}
|
metadata = {'X-Timestamp': request.headers['x-timestamp']}
|
||||||
metadata.update(val for val in request.headers.iteritems()
|
metadata.update(val for val in request.headers.iteritems()
|
||||||
if val[0].lower().startswith('x-object-meta-'))
|
if val[0].lower().startswith('x-object-meta-'))
|
||||||
for header_key in self.allowed_headers:
|
for header_key in self.allowed_headers:
|
||||||
if header_key in request.headers:
|
if header_key in request.headers:
|
||||||
header_caps = header_key.title()
|
header_caps = header_key.title()
|
||||||
@ -562,15 +565,15 @@ class ObjectController(object):
|
|||||||
except ValueError, err:
|
except ValueError, err:
|
||||||
self.logger.increment('PUT.errors')
|
self.logger.increment('PUT.errors')
|
||||||
return HTTPBadRequest(body=str(err), request=request,
|
return HTTPBadRequest(body=str(err), request=request,
|
||||||
content_type='text/plain')
|
content_type='text/plain')
|
||||||
if self.mount_check and not check_mount(self.devices, device):
|
if self.mount_check and not check_mount(self.devices, device):
|
||||||
self.logger.increment('PUT.errors')
|
self.logger.increment('PUT.errors')
|
||||||
return HTTPInsufficientStorage(drive=device, request=request)
|
return HTTPInsufficientStorage(drive=device, request=request)
|
||||||
if 'x-timestamp' not in request.headers or \
|
if 'x-timestamp' not in request.headers or \
|
||||||
not check_float(request.headers['x-timestamp']):
|
not check_float(request.headers['x-timestamp']):
|
||||||
self.logger.increment('PUT.errors')
|
self.logger.increment('PUT.errors')
|
||||||
return HTTPBadRequest(body='Missing timestamp', request=request,
|
return HTTPBadRequest(body='Missing timestamp', request=request,
|
||||||
content_type='text/plain')
|
content_type='text/plain')
|
||||||
error_response = check_object_creation(request, obj)
|
error_response = check_object_creation(request, obj)
|
||||||
if error_response:
|
if error_response:
|
||||||
self.logger.increment('PUT.errors')
|
self.logger.increment('PUT.errors')
|
||||||
@ -616,7 +619,7 @@ class ObjectController(object):
|
|||||||
return HTTPClientDisconnect(request=request)
|
return HTTPClientDisconnect(request=request)
|
||||||
etag = etag.hexdigest()
|
etag = etag.hexdigest()
|
||||||
if 'etag' in request.headers and \
|
if 'etag' in request.headers and \
|
||||||
request.headers['etag'].lower() != etag:
|
request.headers['etag'].lower() != etag:
|
||||||
return HTTPUnprocessableEntity(request=request)
|
return HTTPUnprocessableEntity(request=request)
|
||||||
metadata = {
|
metadata = {
|
||||||
'X-Timestamp': request.headers['x-timestamp'],
|
'X-Timestamp': request.headers['x-timestamp'],
|
||||||
@ -625,8 +628,8 @@ class ObjectController(object):
|
|||||||
'Content-Length': str(os.fstat(fd).st_size),
|
'Content-Length': str(os.fstat(fd).st_size),
|
||||||
}
|
}
|
||||||
metadata.update(val for val in request.headers.iteritems()
|
metadata.update(val for val in request.headers.iteritems()
|
||||||
if val[0].lower().startswith('x-object-meta-') and
|
if val[0].lower().startswith('x-object-meta-') and
|
||||||
len(val[0]) > 14)
|
len(val[0]) > 14)
|
||||||
for header_key in self.allowed_headers:
|
for header_key in self.allowed_headers:
|
||||||
if header_key in request.headers:
|
if header_key in request.headers:
|
||||||
header_caps = header_key.title()
|
header_caps = header_key.title()
|
||||||
@ -634,17 +637,19 @@ class ObjectController(object):
|
|||||||
old_delete_at = int(file.metadata.get('X-Delete-At') or 0)
|
old_delete_at = int(file.metadata.get('X-Delete-At') or 0)
|
||||||
if old_delete_at != new_delete_at:
|
if old_delete_at != new_delete_at:
|
||||||
if new_delete_at:
|
if new_delete_at:
|
||||||
self.delete_at_update('PUT', new_delete_at, account,
|
self.delete_at_update(
|
||||||
container, obj, request.headers, device)
|
'PUT', new_delete_at, account, container, obj,
|
||||||
|
request.headers, device)
|
||||||
if old_delete_at:
|
if old_delete_at:
|
||||||
self.delete_at_update('DELETE', old_delete_at, account,
|
self.delete_at_update(
|
||||||
container, obj, request.headers, device)
|
'DELETE', old_delete_at, account, container, obj,
|
||||||
|
request.headers, device)
|
||||||
file.put(fd, tmppath, metadata)
|
file.put(fd, tmppath, metadata)
|
||||||
file.unlinkold(metadata['X-Timestamp'])
|
file.unlinkold(metadata['X-Timestamp'])
|
||||||
if not orig_timestamp or \
|
if not orig_timestamp or \
|
||||||
orig_timestamp < request.headers['x-timestamp']:
|
orig_timestamp < request.headers['x-timestamp']:
|
||||||
self.container_update('PUT', account, container, obj,
|
self.container_update(
|
||||||
request.headers,
|
'PUT', account, container, obj, request.headers,
|
||||||
{'x-size': file.metadata['Content-Length'],
|
{'x-size': file.metadata['Content-Length'],
|
||||||
'x-content-type': file.metadata['Content-Type'],
|
'x-content-type': file.metadata['Content-Type'],
|
||||||
'x-timestamp': file.metadata['X-Timestamp'],
|
'x-timestamp': file.metadata['X-Timestamp'],
|
||||||
@ -666,7 +671,7 @@ class ObjectController(object):
|
|||||||
except ValueError, err:
|
except ValueError, err:
|
||||||
self.logger.increment('GET.errors')
|
self.logger.increment('GET.errors')
|
||||||
return HTTPBadRequest(body=str(err), request=request,
|
return HTTPBadRequest(body=str(err), request=request,
|
||||||
content_type='text/plain')
|
content_type='text/plain')
|
||||||
if self.mount_check and not check_mount(self.devices, device):
|
if self.mount_check and not check_mount(self.devices, device):
|
||||||
self.logger.increment('GET.errors')
|
self.logger.increment('GET.errors')
|
||||||
return HTTPInsufficientStorage(drive=device, request=request)
|
return HTTPInsufficientStorage(drive=device, request=request)
|
||||||
@ -674,8 +679,9 @@ class ObjectController(object):
|
|||||||
obj, self.logger, keep_data_fp=True,
|
obj, self.logger, keep_data_fp=True,
|
||||||
disk_chunk_size=self.disk_chunk_size,
|
disk_chunk_size=self.disk_chunk_size,
|
||||||
iter_hook=sleep)
|
iter_hook=sleep)
|
||||||
if file.is_deleted() or ('X-Delete-At' in file.metadata and
|
if file.is_deleted() or \
|
||||||
int(file.metadata['X-Delete-At']) <= time.time()):
|
('X-Delete-At' in file.metadata and
|
||||||
|
int(file.metadata['X-Delete-At']) <= time.time()):
|
||||||
if request.headers.get('if-match') == '*':
|
if request.headers.get('if-match') == '*':
|
||||||
self.logger.timing_since('GET.timing', start_time)
|
self.logger.timing_since('GET.timing', start_time)
|
||||||
return HTTPPreconditionFailed(request=request)
|
return HTTPPreconditionFailed(request=request)
|
||||||
@ -707,8 +713,9 @@ class ObjectController(object):
|
|||||||
self.logger.increment('GET.errors')
|
self.logger.increment('GET.errors')
|
||||||
return HTTPPreconditionFailed(request=request)
|
return HTTPPreconditionFailed(request=request)
|
||||||
if if_unmodified_since and \
|
if if_unmodified_since and \
|
||||||
datetime.fromtimestamp(float(file.metadata['X-Timestamp']), UTC) > \
|
datetime.fromtimestamp(
|
||||||
if_unmodified_since:
|
float(file.metadata['X-Timestamp']), UTC) > \
|
||||||
|
if_unmodified_since:
|
||||||
file.close()
|
file.close()
|
||||||
self.logger.timing_since('GET.timing', start_time)
|
self.logger.timing_since('GET.timing', start_time)
|
||||||
return HTTPPreconditionFailed(request=request)
|
return HTTPPreconditionFailed(request=request)
|
||||||
@ -719,15 +726,16 @@ class ObjectController(object):
|
|||||||
self.logger.increment('GET.errors')
|
self.logger.increment('GET.errors')
|
||||||
return HTTPPreconditionFailed(request=request)
|
return HTTPPreconditionFailed(request=request)
|
||||||
if if_modified_since and \
|
if if_modified_since and \
|
||||||
datetime.fromtimestamp(float(file.metadata['X-Timestamp']), UTC) < \
|
datetime.fromtimestamp(
|
||||||
if_modified_since:
|
float(file.metadata['X-Timestamp']), UTC) < \
|
||||||
|
if_modified_since:
|
||||||
file.close()
|
file.close()
|
||||||
self.logger.timing_since('GET.timing', start_time)
|
self.logger.timing_since('GET.timing', start_time)
|
||||||
return HTTPNotModified(request=request)
|
return HTTPNotModified(request=request)
|
||||||
response = Response(app_iter=file,
|
response = Response(app_iter=file,
|
||||||
request=request, conditional_response=True)
|
request=request, conditional_response=True)
|
||||||
response.headers['Content-Type'] = file.metadata.get('Content-Type',
|
response.headers['Content-Type'] = file.metadata.get(
|
||||||
'application/octet-stream')
|
'Content-Type', 'application/octet-stream')
|
||||||
for key, value in file.metadata.iteritems():
|
for key, value in file.metadata.iteritems():
|
||||||
if key.lower().startswith('x-object-meta-') or \
|
if key.lower().startswith('x-object-meta-') or \
|
||||||
key.lower() in self.allowed_headers:
|
key.lower() in self.allowed_headers:
|
||||||
@ -765,8 +773,9 @@ class ObjectController(object):
|
|||||||
return HTTPInsufficientStorage(drive=device, request=request)
|
return HTTPInsufficientStorage(drive=device, request=request)
|
||||||
file = DiskFile(self.devices, device, partition, account, container,
|
file = DiskFile(self.devices, device, partition, account, container,
|
||||||
obj, self.logger, disk_chunk_size=self.disk_chunk_size)
|
obj, self.logger, disk_chunk_size=self.disk_chunk_size)
|
||||||
if file.is_deleted() or ('X-Delete-At' in file.metadata and
|
if file.is_deleted() or \
|
||||||
int(file.metadata['X-Delete-At']) <= time.time()):
|
('X-Delete-At' in file.metadata and
|
||||||
|
int(file.metadata['X-Delete-At']) <= time.time()):
|
||||||
self.logger.timing_since('HEAD.timing', start_time)
|
self.logger.timing_since('HEAD.timing', start_time)
|
||||||
return HTTPNotFound(request=request)
|
return HTTPNotFound(request=request)
|
||||||
try:
|
try:
|
||||||
@ -776,8 +785,8 @@ class ObjectController(object):
|
|||||||
self.logger.timing_since('HEAD.timing', start_time)
|
self.logger.timing_since('HEAD.timing', start_time)
|
||||||
return HTTPNotFound(request=request)
|
return HTTPNotFound(request=request)
|
||||||
response = Response(request=request, conditional_response=True)
|
response = Response(request=request, conditional_response=True)
|
||||||
response.headers['Content-Type'] = file.metadata.get('Content-Type',
|
response.headers['Content-Type'] = file.metadata.get(
|
||||||
'application/octet-stream')
|
'Content-Type', 'application/octet-stream')
|
||||||
for key, value in file.metadata.iteritems():
|
for key, value in file.metadata.iteritems():
|
||||||
if key.lower().startswith('x-object-meta-') or \
|
if key.lower().startswith('x-object-meta-') or \
|
||||||
key.lower() in self.allowed_headers:
|
key.lower() in self.allowed_headers:
|
||||||
@ -803,12 +812,12 @@ class ObjectController(object):
|
|||||||
except ValueError, e:
|
except ValueError, e:
|
||||||
self.logger.increment('DELETE.errors')
|
self.logger.increment('DELETE.errors')
|
||||||
return HTTPBadRequest(body=str(e), request=request,
|
return HTTPBadRequest(body=str(e), request=request,
|
||||||
content_type='text/plain')
|
content_type='text/plain')
|
||||||
if 'x-timestamp' not in request.headers or \
|
if 'x-timestamp' not in request.headers or \
|
||||||
not check_float(request.headers['x-timestamp']):
|
not check_float(request.headers['x-timestamp']):
|
||||||
self.logger.increment('DELETE.errors')
|
self.logger.increment('DELETE.errors')
|
||||||
return HTTPBadRequest(body='Missing timestamp', request=request,
|
return HTTPBadRequest(body='Missing timestamp', request=request,
|
||||||
content_type='text/plain')
|
content_type='text/plain')
|
||||||
if self.mount_check and not check_mount(self.devices, device):
|
if self.mount_check and not check_mount(self.devices, device):
|
||||||
self.logger.increment('DELETE.errors')
|
self.logger.increment('DELETE.errors')
|
||||||
return HTTPInsufficientStorage(drive=device, request=request)
|
return HTTPInsufficientStorage(drive=device, request=request)
|
||||||
@ -819,7 +828,8 @@ class ObjectController(object):
|
|||||||
int(request.headers['x-if-delete-at']) != \
|
int(request.headers['x-if-delete-at']) != \
|
||||||
int(file.metadata.get('X-Delete-At') or 0):
|
int(file.metadata.get('X-Delete-At') or 0):
|
||||||
self.logger.timing_since('DELETE.timing', start_time)
|
self.logger.timing_since('DELETE.timing', start_time)
|
||||||
return HTTPPreconditionFailed(request=request,
|
return HTTPPreconditionFailed(
|
||||||
|
request=request,
|
||||||
body='X-If-Delete-At and X-Delete-At do not match')
|
body='X-If-Delete-At and X-Delete-At do not match')
|
||||||
orig_timestamp = file.metadata.get('X-Timestamp')
|
orig_timestamp = file.metadata.get('X-Timestamp')
|
||||||
if file.is_deleted():
|
if file.is_deleted():
|
||||||
@ -836,9 +846,10 @@ class ObjectController(object):
|
|||||||
file.unlinkold(metadata['X-Timestamp'])
|
file.unlinkold(metadata['X-Timestamp'])
|
||||||
if not orig_timestamp or \
|
if not orig_timestamp or \
|
||||||
orig_timestamp < request.headers['x-timestamp']:
|
orig_timestamp < request.headers['x-timestamp']:
|
||||||
self.container_update('DELETE', account, container, obj,
|
self.container_update(
|
||||||
request.headers, {'x-timestamp': metadata['X-Timestamp'],
|
'DELETE', account, container, obj, request.headers,
|
||||||
'x-trans-id': request.headers.get('x-trans-id', '-')},
|
{'x-timestamp': metadata['X-Timestamp'],
|
||||||
|
'x-trans-id': request.headers.get('x-trans-id', '-')},
|
||||||
device)
|
device)
|
||||||
resp = response_class(request=request)
|
resp = response_class(request=request)
|
||||||
self.logger.timing_since('DELETE.timing', start_time)
|
self.logger.timing_since('DELETE.timing', start_time)
|
||||||
@ -889,7 +900,8 @@ class ObjectController(object):
|
|||||||
else:
|
else:
|
||||||
res = method(req)
|
res = method(req)
|
||||||
except (Exception, Timeout):
|
except (Exception, Timeout):
|
||||||
self.logger.exception(_('ERROR __call__ error with %(method)s'
|
self.logger.exception(_(
|
||||||
|
'ERROR __call__ error with %(method)s'
|
||||||
' %(path)s '), {'method': req.method, 'path': req.path})
|
' %(path)s '), {'method': req.method, 'path': req.path})
|
||||||
res = HTTPInternalServerError(body=traceback.format_exc())
|
res = HTTPInternalServerError(body=traceback.format_exc())
|
||||||
trans_time = time.time() - start_time
|
trans_time = time.time() - start_time
|
||||||
|
@ -26,7 +26,7 @@ from swift.common.bufferedhttp import http_connect
|
|||||||
from swift.common.exceptions import ConnectionTimeout
|
from swift.common.exceptions import ConnectionTimeout
|
||||||
from swift.common.ring import Ring
|
from swift.common.ring import Ring
|
||||||
from swift.common.utils import get_logger, renamer, write_pickle, \
|
from swift.common.utils import get_logger, renamer, write_pickle, \
|
||||||
dump_recon_cache
|
dump_recon_cache, config_true_value
|
||||||
from swift.common.daemon import Daemon
|
from swift.common.daemon import Daemon
|
||||||
from swift.obj.server import ASYNCDIR
|
from swift.obj.server import ASYNCDIR
|
||||||
from swift.common.http import is_success, HTTP_NOT_FOUND, \
|
from swift.common.http import is_success, HTTP_NOT_FOUND, \
|
||||||
@ -40,8 +40,7 @@ class ObjectUpdater(Daemon):
|
|||||||
self.conf = conf
|
self.conf = conf
|
||||||
self.logger = get_logger(conf, log_route='object-updater')
|
self.logger = get_logger(conf, log_route='object-updater')
|
||||||
self.devices = conf.get('devices', '/srv/node')
|
self.devices = conf.get('devices', '/srv/node')
|
||||||
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
|
||||||
('true', 't', '1', 'on', 'yes', 'y')
|
|
||||||
self.swift_dir = conf.get('swift_dir', '/etc/swift')
|
self.swift_dir = conf.get('swift_dir', '/etc/swift')
|
||||||
self.interval = int(conf.get('interval', 300))
|
self.interval = int(conf.get('interval', 300))
|
||||||
self.container_ring = None
|
self.container_ring = None
|
||||||
|
@ -31,7 +31,7 @@ from eventlet import spawn_n, GreenPile, Timeout
|
|||||||
from eventlet.queue import Queue, Empty, Full
|
from eventlet.queue import Queue, Empty, Full
|
||||||
from eventlet.timeout import Timeout
|
from eventlet.timeout import Timeout
|
||||||
|
|
||||||
from swift.common.utils import normalize_timestamp, TRUE_VALUES, public
|
from swift.common.utils import normalize_timestamp, config_true_value, public
|
||||||
from swift.common.bufferedhttp import http_connect
|
from swift.common.bufferedhttp import http_connect
|
||||||
from swift.common.constraints import MAX_ACCOUNT_NAME_LENGTH
|
from swift.common.constraints import MAX_ACCOUNT_NAME_LENGTH
|
||||||
from swift.common.exceptions import ChunkReadTimeout, ConnectionTimeout
|
from swift.common.exceptions import ChunkReadTimeout, ConnectionTimeout
|
||||||
@ -604,7 +604,7 @@ class Controller(object):
|
|||||||
reasons = []
|
reasons = []
|
||||||
bodies = []
|
bodies = []
|
||||||
sources = []
|
sources = []
|
||||||
newest = req.headers.get('x-newest', 'f').lower() in TRUE_VALUES
|
newest = config_true_value(req.headers.get('x-newest', 'f'))
|
||||||
nodes = iter(nodes)
|
nodes = iter(nodes)
|
||||||
while len(statuses) < attempts:
|
while len(statuses) < attempts:
|
||||||
try:
|
try:
|
||||||
|
@ -40,8 +40,8 @@ from eventlet import sleep, GreenPile, Timeout
|
|||||||
from eventlet.queue import Queue
|
from eventlet.queue import Queue
|
||||||
from eventlet.timeout import Timeout
|
from eventlet.timeout import Timeout
|
||||||
|
|
||||||
from swift.common.utils import ContextPool, normalize_timestamp, TRUE_VALUES, \
|
from swift.common.utils import ContextPool, normalize_timestamp, \
|
||||||
public
|
config_true_value, public
|
||||||
from swift.common.bufferedhttp import http_connect
|
from swift.common.bufferedhttp import http_connect
|
||||||
from swift.common.constraints import check_metadata, check_object_creation, \
|
from swift.common.constraints import check_metadata, check_object_creation, \
|
||||||
CONTAINER_LISTING_LIMIT, MAX_FILE_SIZE
|
CONTAINER_LISTING_LIMIT, MAX_FILE_SIZE
|
||||||
@ -117,12 +117,14 @@ class SegmentedIterable(object):
|
|||||||
self.next_get_time = time.time() + \
|
self.next_get_time = time.time() + \
|
||||||
1.0 / self.controller.app.rate_limit_segments_per_sec
|
1.0 / self.controller.app.rate_limit_segments_per_sec
|
||||||
shuffle(nodes)
|
shuffle(nodes)
|
||||||
resp = self.controller.GETorHEAD_base(req, _('Object'), partition,
|
resp = self.controller.GETorHEAD_base(
|
||||||
|
req, _('Object'), partition,
|
||||||
self.controller.iter_nodes(partition, nodes,
|
self.controller.iter_nodes(partition, nodes,
|
||||||
self.controller.app.object_ring), path,
|
self.controller.app.object_ring),
|
||||||
len(nodes))
|
path, len(nodes))
|
||||||
if not is_success(resp.status_int):
|
if not is_success(resp.status_int):
|
||||||
raise Exception(_('Could not load object segment %(path)s:' \
|
raise Exception(_(
|
||||||
|
'Could not load object segment %(path)s:'
|
||||||
' %(status)s') % {'path': path, 'status': resp.status_int})
|
' %(status)s') % {'path': path, 'status': resp.status_int})
|
||||||
self.segment_iter = resp.app_iter
|
self.segment_iter = resp.app_iter
|
||||||
# See NOTE: swift_conn at top of file about this.
|
# See NOTE: swift_conn at top of file about this.
|
||||||
@ -131,8 +133,9 @@ class SegmentedIterable(object):
|
|||||||
raise
|
raise
|
||||||
except (Exception, Timeout), err:
|
except (Exception, Timeout), err:
|
||||||
if not getattr(err, 'swift_logged', False):
|
if not getattr(err, 'swift_logged', False):
|
||||||
self.controller.app.logger.exception(_('ERROR: While '
|
self.controller.app.logger.exception(_(
|
||||||
'processing manifest /%(acc)s/%(cont)s/%(obj)s'),
|
'ERROR: While processing manifest '
|
||||||
|
'/%(acc)s/%(cont)s/%(obj)s'),
|
||||||
{'acc': self.controller.account_name,
|
{'acc': self.controller.account_name,
|
||||||
'cont': self.controller.container_name,
|
'cont': self.controller.container_name,
|
||||||
'obj': self.controller.object_name})
|
'obj': self.controller.object_name})
|
||||||
@ -162,8 +165,9 @@ class SegmentedIterable(object):
|
|||||||
raise
|
raise
|
||||||
except (Exception, Timeout), err:
|
except (Exception, Timeout), err:
|
||||||
if not getattr(err, 'swift_logged', False):
|
if not getattr(err, 'swift_logged', False):
|
||||||
self.controller.app.logger.exception(_('ERROR: While '
|
self.controller.app.logger.exception(_(
|
||||||
'processing manifest /%(acc)s/%(cont)s/%(obj)s'),
|
'ERROR: While processing manifest '
|
||||||
|
'/%(acc)s/%(cont)s/%(obj)s'),
|
||||||
{'acc': self.controller.account_name,
|
{'acc': self.controller.account_name,
|
||||||
'cont': self.controller.container_name,
|
'cont': self.controller.container_name,
|
||||||
'obj': self.controller.object_name})
|
'obj': self.controller.object_name})
|
||||||
@ -220,8 +224,9 @@ class SegmentedIterable(object):
|
|||||||
raise
|
raise
|
||||||
except (Exception, Timeout), err:
|
except (Exception, Timeout), err:
|
||||||
if not getattr(err, 'swift_logged', False):
|
if not getattr(err, 'swift_logged', False):
|
||||||
self.controller.app.logger.exception(_('ERROR: While '
|
self.controller.app.logger.exception(_(
|
||||||
'processing manifest /%(acc)s/%(cont)s/%(obj)s'),
|
'ERROR: While processing manifest '
|
||||||
|
'/%(acc)s/%(cont)s/%(obj)s'),
|
||||||
{'acc': self.controller.account_name,
|
{'acc': self.controller.account_name,
|
||||||
'cont': self.controller.container_name,
|
'cont': self.controller.container_name,
|
||||||
'obj': self.controller.object_name})
|
'obj': self.controller.object_name})
|
||||||
@ -255,8 +260,8 @@ class ObjectController(Controller):
|
|||||||
'format=json&prefix=%s&marker=%s' % (quote(lprefix),
|
'format=json&prefix=%s&marker=%s' % (quote(lprefix),
|
||||||
quote(marker))
|
quote(marker))
|
||||||
shuffle(lnodes)
|
shuffle(lnodes)
|
||||||
lresp = self.GETorHEAD_base(lreq, _('Container'),
|
lresp = self.GETorHEAD_base(
|
||||||
lpartition, lnodes, lreq.path_info,
|
lreq, _('Container'), lpartition, lnodes, lreq.path_info,
|
||||||
len(lnodes))
|
len(lnodes))
|
||||||
if 'swift.authorize' in env:
|
if 'swift.authorize' in env:
|
||||||
lreq.acl = lresp.headers.get('x-container-read')
|
lreq.acl = lresp.headers.get('x-container-read')
|
||||||
@ -300,9 +305,10 @@ class ObjectController(Controller):
|
|||||||
partition, nodes = self.app.object_ring.get_nodes(
|
partition, nodes = self.app.object_ring.get_nodes(
|
||||||
self.account_name, self.container_name, self.object_name)
|
self.account_name, self.container_name, self.object_name)
|
||||||
shuffle(nodes)
|
shuffle(nodes)
|
||||||
resp = self.GETorHEAD_base(req, _('Object'), partition,
|
resp = self.GETorHEAD_base(
|
||||||
self.iter_nodes(partition, nodes, self.app.object_ring),
|
req, _('Object'), partition,
|
||||||
req.path_info, len(nodes))
|
self.iter_nodes(partition, nodes, self.app.object_ring),
|
||||||
|
req.path_info, len(nodes))
|
||||||
|
|
||||||
if 'x-object-manifest' in resp.headers:
|
if 'x-object-manifest' in resp.headers:
|
||||||
lcontainer, lprefix = \
|
lcontainer, lprefix = \
|
||||||
@ -311,7 +317,7 @@ class ObjectController(Controller):
|
|||||||
lprefix = unquote(lprefix)
|
lprefix = unquote(lprefix)
|
||||||
try:
|
try:
|
||||||
listing = list(self._listing_iter(lcontainer, lprefix,
|
listing = list(self._listing_iter(lcontainer, lprefix,
|
||||||
req.environ))
|
req.environ))
|
||||||
except ListingIterNotFound:
|
except ListingIterNotFound:
|
||||||
return HTTPNotFound(request=req)
|
return HTTPNotFound(request=req)
|
||||||
except ListingIterNotAuthorized, err:
|
except ListingIterNotAuthorized, err:
|
||||||
@ -337,7 +343,8 @@ class ObjectController(Controller):
|
|||||||
head_response.status_int = resp.status_int
|
head_response.status_int = resp.status_int
|
||||||
return head_response
|
return head_response
|
||||||
else:
|
else:
|
||||||
resp.app_iter = SegmentedIterable(self, lcontainer,
|
resp.app_iter = SegmentedIterable(
|
||||||
|
self, lcontainer,
|
||||||
self._listing_iter(lcontainer, lprefix, req.environ),
|
self._listing_iter(lcontainer, lprefix, req.environ),
|
||||||
resp)
|
resp)
|
||||||
|
|
||||||
@ -348,7 +355,7 @@ class ObjectController(Controller):
|
|||||||
content_length = sum(o['bytes'] for o in listing)
|
content_length = sum(o['bytes'] for o in listing)
|
||||||
last_modified = max(o['last_modified'] for o in listing)
|
last_modified = max(o['last_modified'] for o in listing)
|
||||||
last_modified = datetime(*map(int, re.split('[^\d]',
|
last_modified = datetime(*map(int, re.split('[^\d]',
|
||||||
last_modified)[:-1]))
|
last_modified)[:-1]))
|
||||||
etag = md5(
|
etag = md5(
|
||||||
''.join(o['hash'] for o in listing)).hexdigest()
|
''.join(o['hash'] for o in listing)).hexdigest()
|
||||||
else:
|
else:
|
||||||
@ -396,11 +403,11 @@ class ObjectController(Controller):
|
|||||||
req.headers['x-delete-at'] = '%d' % (time.time() + x_delete_after)
|
req.headers['x-delete-at'] = '%d' % (time.time() + x_delete_after)
|
||||||
if self.app.object_post_as_copy:
|
if self.app.object_post_as_copy:
|
||||||
req.method = 'PUT'
|
req.method = 'PUT'
|
||||||
req.path_info = '/%s/%s/%s' % (self.account_name,
|
req.path_info = '/%s/%s/%s' % (
|
||||||
self.container_name, self.object_name)
|
self.account_name, self.container_name, self.object_name)
|
||||||
req.headers['Content-Length'] = 0
|
req.headers['Content-Length'] = 0
|
||||||
req.headers['X-Copy-From'] = quote('/%s/%s' % (self.container_name,
|
req.headers['X-Copy-From'] = quote('/%s/%s' % (self.container_name,
|
||||||
self.object_name))
|
self.object_name))
|
||||||
req.headers['X-Fresh-Metadata'] = 'true'
|
req.headers['X-Fresh-Metadata'] = 'true'
|
||||||
req.environ['swift_versioned_copy'] = True
|
req.environ['swift_versioned_copy'] = True
|
||||||
resp = self.PUT(req)
|
resp = self.PUT(req)
|
||||||
@ -430,13 +437,15 @@ class ObjectController(Controller):
|
|||||||
try:
|
try:
|
||||||
x_delete_at = int(req.headers['x-delete-at'])
|
x_delete_at = int(req.headers['x-delete-at'])
|
||||||
if x_delete_at < time.time():
|
if x_delete_at < time.time():
|
||||||
return HTTPBadRequest(body='X-Delete-At in past',
|
return HTTPBadRequest(
|
||||||
request=req, content_type='text/plain')
|
body='X-Delete-At in past', request=req,
|
||||||
|
content_type='text/plain')
|
||||||
except ValueError:
|
except ValueError:
|
||||||
return HTTPBadRequest(request=req,
|
return HTTPBadRequest(request=req,
|
||||||
content_type='text/plain',
|
content_type='text/plain',
|
||||||
body='Non-integer X-Delete-At')
|
body='Non-integer X-Delete-At')
|
||||||
delete_at_container = str(x_delete_at /
|
delete_at_container = str(
|
||||||
|
x_delete_at /
|
||||||
self.app.expiring_objects_container_divisor *
|
self.app.expiring_objects_container_divisor *
|
||||||
self.app.expiring_objects_container_divisor)
|
self.app.expiring_objects_container_divisor)
|
||||||
delete_at_part, delete_at_nodes = \
|
delete_at_part, delete_at_nodes = \
|
||||||
@ -475,7 +484,7 @@ class ObjectController(Controller):
|
|||||||
except (Exception, ChunkWriteTimeout):
|
except (Exception, ChunkWriteTimeout):
|
||||||
conn.failed = True
|
conn.failed = True
|
||||||
self.exception_occurred(conn.node, _('Object'),
|
self.exception_occurred(conn.node, _('Object'),
|
||||||
_('Trying to write to %s') % path)
|
_('Trying to write to %s') % path)
|
||||||
conn.queue.task_done()
|
conn.queue.task_done()
|
||||||
|
|
||||||
def _connect_put_node(self, nodes, part, path, headers,
|
def _connect_put_node(self, nodes, part, path, headers,
|
||||||
@ -485,8 +494,9 @@ class ObjectController(Controller):
|
|||||||
for node in nodes:
|
for node in nodes:
|
||||||
try:
|
try:
|
||||||
with ConnectionTimeout(self.app.conn_timeout):
|
with ConnectionTimeout(self.app.conn_timeout):
|
||||||
conn = http_connect(node['ip'], node['port'],
|
conn = http_connect(
|
||||||
node['device'], part, 'PUT', path, headers)
|
node['ip'], node['port'], node['device'], part, 'PUT',
|
||||||
|
path, headers)
|
||||||
with Timeout(self.app.node_timeout):
|
with Timeout(self.app.node_timeout):
|
||||||
resp = conn.getexpect()
|
resp = conn.getexpect()
|
||||||
if resp.status == HTTP_CONTINUE:
|
if resp.status == HTTP_CONTINUE:
|
||||||
@ -501,7 +511,7 @@ class ObjectController(Controller):
|
|||||||
self.error_limit(node)
|
self.error_limit(node)
|
||||||
except:
|
except:
|
||||||
self.exception_occurred(node, _('Object'),
|
self.exception_occurred(node, _('Object'),
|
||||||
_('Expect: 100-continue on %s') % path)
|
_('Expect: 100-continue on %s') % path)
|
||||||
|
|
||||||
@public
|
@public
|
||||||
@delay_denial
|
@delay_denial
|
||||||
@ -533,12 +543,14 @@ class ObjectController(Controller):
|
|||||||
try:
|
try:
|
||||||
x_delete_at = int(req.headers['x-delete-at'])
|
x_delete_at = int(req.headers['x-delete-at'])
|
||||||
if x_delete_at < time.time():
|
if x_delete_at < time.time():
|
||||||
return HTTPBadRequest(body='X-Delete-At in past',
|
return HTTPBadRequest(
|
||||||
request=req, content_type='text/plain')
|
body='X-Delete-At in past', request=req,
|
||||||
|
content_type='text/plain')
|
||||||
except ValueError:
|
except ValueError:
|
||||||
return HTTPBadRequest(request=req, content_type='text/plain',
|
return HTTPBadRequest(request=req, content_type='text/plain',
|
||||||
body='Non-integer X-Delete-At')
|
body='Non-integer X-Delete-At')
|
||||||
delete_at_container = str(x_delete_at /
|
delete_at_container = str(
|
||||||
|
x_delete_at /
|
||||||
self.app.expiring_objects_container_divisor *
|
self.app.expiring_objects_container_divisor *
|
||||||
self.app.expiring_objects_container_divisor)
|
self.app.expiring_objects_container_divisor)
|
||||||
delete_at_part, delete_at_nodes = \
|
delete_at_part, delete_at_nodes = \
|
||||||
@ -549,12 +561,13 @@ class ObjectController(Controller):
|
|||||||
partition, nodes = self.app.object_ring.get_nodes(
|
partition, nodes = self.app.object_ring.get_nodes(
|
||||||
self.account_name, self.container_name, self.object_name)
|
self.account_name, self.container_name, self.object_name)
|
||||||
# do a HEAD request for container sync and checking object versions
|
# do a HEAD request for container sync and checking object versions
|
||||||
if 'x-timestamp' in req.headers or (object_versions and not
|
if 'x-timestamp' in req.headers or \
|
||||||
req.environ.get('swift_versioned_copy')):
|
(object_versions and not
|
||||||
|
req.environ.get('swift_versioned_copy')):
|
||||||
hreq = Request.blank(req.path_info, headers={'X-Newest': 'True'},
|
hreq = Request.blank(req.path_info, headers={'X-Newest': 'True'},
|
||||||
environ={'REQUEST_METHOD': 'HEAD'})
|
environ={'REQUEST_METHOD': 'HEAD'})
|
||||||
hresp = self.GETorHEAD_base(hreq, _('Object'), partition, nodes,
|
hresp = self.GETorHEAD_base(hreq, _('Object'), partition, nodes,
|
||||||
hreq.path_info, len(nodes))
|
hreq.path_info, len(nodes))
|
||||||
# Used by container sync feature
|
# Used by container sync feature
|
||||||
if 'x-timestamp' in req.headers:
|
if 'x-timestamp' in req.headers:
|
||||||
try:
|
try:
|
||||||
@ -565,7 +578,8 @@ class ObjectController(Controller):
|
|||||||
float(req.headers['x-timestamp']):
|
float(req.headers['x-timestamp']):
|
||||||
return HTTPAccepted(request=req)
|
return HTTPAccepted(request=req)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
return HTTPBadRequest(request=req, content_type='text/plain',
|
return HTTPBadRequest(
|
||||||
|
request=req, content_type='text/plain',
|
||||||
body='X-Timestamp should be a UNIX timestamp float value; '
|
body='X-Timestamp should be a UNIX timestamp float value; '
|
||||||
'was %r' % req.headers['x-timestamp'])
|
'was %r' % req.headers['x-timestamp'])
|
||||||
else:
|
else:
|
||||||
@ -575,7 +589,7 @@ class ObjectController(Controller):
|
|||||||
if not req.headers.get('content-type'):
|
if not req.headers.get('content-type'):
|
||||||
guessed_type, _junk = mimetypes.guess_type(req.path_info)
|
guessed_type, _junk = mimetypes.guess_type(req.path_info)
|
||||||
req.headers['Content-Type'] = guessed_type or \
|
req.headers['Content-Type'] = guessed_type or \
|
||||||
'application/octet-stream'
|
'application/octet-stream'
|
||||||
content_type_manually_set = False
|
content_type_manually_set = False
|
||||||
error_response = check_object_creation(req, self.object_name)
|
error_response = check_object_creation(req, self.object_name)
|
||||||
if error_response:
|
if error_response:
|
||||||
@ -602,9 +616,9 @@ class ObjectController(Controller):
|
|||||||
'Destination': '%s/%s' % (lcontainer, vers_obj_name)}
|
'Destination': '%s/%s' % (lcontainer, vers_obj_name)}
|
||||||
copy_environ = {'REQUEST_METHOD': 'COPY',
|
copy_environ = {'REQUEST_METHOD': 'COPY',
|
||||||
'swift_versioned_copy': True
|
'swift_versioned_copy': True
|
||||||
}
|
}
|
||||||
copy_req = Request.blank(req.path_info, headers=copy_headers,
|
copy_req = Request.blank(req.path_info, headers=copy_headers,
|
||||||
environ=copy_environ)
|
environ=copy_environ)
|
||||||
copy_resp = self.COPY(copy_req)
|
copy_resp = self.COPY(copy_req)
|
||||||
if is_client_error(copy_resp.status_int):
|
if is_client_error(copy_resp.status_int):
|
||||||
# missing container or bad permissions
|
# missing container or bad permissions
|
||||||
@ -629,9 +643,10 @@ class ObjectController(Controller):
|
|||||||
src_container_name, src_obj_name = \
|
src_container_name, src_obj_name = \
|
||||||
source_header.split('/', 3)[2:]
|
source_header.split('/', 3)[2:]
|
||||||
except ValueError:
|
except ValueError:
|
||||||
return HTTPPreconditionFailed(request=req,
|
return HTTPPreconditionFailed(
|
||||||
|
request=req,
|
||||||
body='X-Copy-From header must be of the form'
|
body='X-Copy-From header must be of the form'
|
||||||
'<container name>/<object name>')
|
'<container name>/<object name>')
|
||||||
source_req = req.copy_get()
|
source_req = req.copy_get()
|
||||||
source_req.path_info = source_header
|
source_req.path_info = source_header
|
||||||
source_req.headers['X-Newest'] = 'true'
|
source_req.headers['X-Newest'] = 'true'
|
||||||
@ -645,7 +660,7 @@ class ObjectController(Controller):
|
|||||||
self.object_name = orig_obj_name
|
self.object_name = orig_obj_name
|
||||||
self.container_name = orig_container_name
|
self.container_name = orig_container_name
|
||||||
new_req = Request.blank(req.path_info,
|
new_req = Request.blank(req.path_info,
|
||||||
environ=req.environ, headers=req.headers)
|
environ=req.environ, headers=req.headers)
|
||||||
data_source = source_resp.app_iter
|
data_source = source_resp.app_iter
|
||||||
new_req.content_length = source_resp.content_length
|
new_req.content_length = source_resp.content_length
|
||||||
if new_req.content_length is None:
|
if new_req.content_length is None:
|
||||||
@ -660,8 +675,8 @@ class ObjectController(Controller):
|
|||||||
if not content_type_manually_set:
|
if not content_type_manually_set:
|
||||||
new_req.headers['Content-Type'] = \
|
new_req.headers['Content-Type'] = \
|
||||||
source_resp.headers['Content-Type']
|
source_resp.headers['Content-Type']
|
||||||
if new_req.headers.get('x-fresh-metadata', 'false').lower() \
|
if not config_true_value(
|
||||||
not in TRUE_VALUES:
|
new_req.headers.get('x-fresh-metadata', 'false')):
|
||||||
for k, v in source_resp.headers.items():
|
for k, v in source_resp.headers.items():
|
||||||
if k.lower().startswith('x-object-meta-'):
|
if k.lower().startswith('x-object-meta-'):
|
||||||
new_req.headers[k] = v
|
new_req.headers[k] = v
|
||||||
@ -692,7 +707,7 @@ class ObjectController(Controller):
|
|||||||
if len(conns) <= len(nodes) / 2:
|
if len(conns) <= len(nodes) / 2:
|
||||||
self.app.logger.error(
|
self.app.logger.error(
|
||||||
_('Object PUT returning 503, %(conns)s/%(nodes)s '
|
_('Object PUT returning 503, %(conns)s/%(nodes)s '
|
||||||
'required connections'),
|
'required connections'),
|
||||||
{'conns': len(conns), 'nodes': len(nodes) // 2 + 1})
|
{'conns': len(conns), 'nodes': len(nodes) // 2 + 1})
|
||||||
return HTTPServiceUnavailable(request=req)
|
return HTTPServiceUnavailable(request=req)
|
||||||
bytes_transferred = 0
|
bytes_transferred = 0
|
||||||
@ -715,12 +730,14 @@ class ObjectController(Controller):
|
|||||||
return HTTPRequestEntityTooLarge(request=req)
|
return HTTPRequestEntityTooLarge(request=req)
|
||||||
for conn in list(conns):
|
for conn in list(conns):
|
||||||
if not conn.failed:
|
if not conn.failed:
|
||||||
conn.queue.put('%x\r\n%s\r\n' % (len(chunk), chunk)
|
conn.queue.put(
|
||||||
if chunked else chunk)
|
'%x\r\n%s\r\n' % (len(chunk), chunk)
|
||||||
|
if chunked else chunk)
|
||||||
else:
|
else:
|
||||||
conns.remove(conn)
|
conns.remove(conn)
|
||||||
if len(conns) <= len(nodes) / 2:
|
if len(conns) <= len(nodes) / 2:
|
||||||
self.app.logger.error(_('Object PUT exceptions during'
|
self.app.logger.error(_(
|
||||||
|
'Object PUT exceptions during'
|
||||||
' send, %(conns)s/%(nodes)s required connections'),
|
' send, %(conns)s/%(nodes)s required connections'),
|
||||||
{'conns': len(conns), 'nodes': len(nodes) / 2 + 1})
|
{'conns': len(conns), 'nodes': len(nodes) / 2 + 1})
|
||||||
return HTTPServiceUnavailable(request=req)
|
return HTTPServiceUnavailable(request=req)
|
||||||
@ -758,14 +775,17 @@ class ObjectController(Controller):
|
|||||||
reasons.append(response.reason)
|
reasons.append(response.reason)
|
||||||
bodies.append(response.read())
|
bodies.append(response.read())
|
||||||
if response.status >= HTTP_INTERNAL_SERVER_ERROR:
|
if response.status >= HTTP_INTERNAL_SERVER_ERROR:
|
||||||
self.error_occurred(conn.node,
|
self.error_occurred(
|
||||||
_('ERROR %(status)d %(body)s From Object Server ' \
|
conn.node,
|
||||||
're: %(path)s') % {'status': response.status,
|
_('ERROR %(status)d %(body)s From Object Server '
|
||||||
'body': bodies[-1][:1024], 'path': req.path})
|
're: %(path)s') %
|
||||||
|
{'status': response.status,
|
||||||
|
'body': bodies[-1][:1024], 'path': req.path})
|
||||||
elif is_success(response.status):
|
elif is_success(response.status):
|
||||||
etags.add(response.getheader('etag').strip('"'))
|
etags.add(response.getheader('etag').strip('"'))
|
||||||
except (Exception, Timeout):
|
except (Exception, Timeout):
|
||||||
self.exception_occurred(conn.node, _('Object'),
|
self.exception_occurred(
|
||||||
|
conn.node, _('Object'),
|
||||||
_('Trying to get final status of PUT to %s') % req.path)
|
_('Trying to get final status of PUT to %s') % req.path)
|
||||||
if len(etags) > 1:
|
if len(etags) > 1:
|
||||||
self.app.logger.error(
|
self.app.logger.error(
|
||||||
@ -777,10 +797,10 @@ class ObjectController(Controller):
|
|||||||
reasons.append('')
|
reasons.append('')
|
||||||
bodies.append('')
|
bodies.append('')
|
||||||
resp = self.best_response(req, statuses, reasons, bodies,
|
resp = self.best_response(req, statuses, reasons, bodies,
|
||||||
_('Object PUT'), etag=etag)
|
_('Object PUT'), etag=etag)
|
||||||
if source_header:
|
if source_header:
|
||||||
resp.headers['X-Copied-From'] = quote(
|
resp.headers['X-Copied-From'] = quote(
|
||||||
source_header.split('/', 2)[2])
|
source_header.split('/', 2)[2])
|
||||||
if 'last-modified' in source_resp.headers:
|
if 'last-modified' in source_resp.headers:
|
||||||
resp.headers['X-Copied-From-Last-Modified'] = \
|
resp.headers['X-Copied-From-Last-Modified'] = \
|
||||||
source_resp.headers['last-modified']
|
source_resp.headers['last-modified']
|
||||||
@ -829,12 +849,12 @@ class ObjectController(Controller):
|
|||||||
self.container_name + '/' + self.object_name
|
self.container_name + '/' + self.object_name
|
||||||
copy_headers = {'X-Newest': 'True',
|
copy_headers = {'X-Newest': 'True',
|
||||||
'Destination': orig_container + '/' + orig_obj
|
'Destination': orig_container + '/' + orig_obj
|
||||||
}
|
}
|
||||||
copy_environ = {'REQUEST_METHOD': 'COPY',
|
copy_environ = {'REQUEST_METHOD': 'COPY',
|
||||||
'swift_versioned_copy': True
|
'swift_versioned_copy': True
|
||||||
}
|
}
|
||||||
creq = Request.blank(copy_path, headers=copy_headers,
|
creq = Request.blank(copy_path, headers=copy_headers,
|
||||||
environ=copy_environ)
|
environ=copy_environ)
|
||||||
copy_resp = self.COPY(creq)
|
copy_resp = self.COPY(creq)
|
||||||
if is_client_error(copy_resp.status_int):
|
if is_client_error(copy_resp.status_int):
|
||||||
# some user error, maybe permissions
|
# some user error, maybe permissions
|
||||||
@ -867,7 +887,8 @@ class ObjectController(Controller):
|
|||||||
req.headers['X-Timestamp'] = \
|
req.headers['X-Timestamp'] = \
|
||||||
normalize_timestamp(float(req.headers['x-timestamp']))
|
normalize_timestamp(float(req.headers['x-timestamp']))
|
||||||
except ValueError:
|
except ValueError:
|
||||||
return HTTPBadRequest(request=req, content_type='text/plain',
|
return HTTPBadRequest(
|
||||||
|
request=req, content_type='text/plain',
|
||||||
body='X-Timestamp should be a UNIX timestamp float value; '
|
body='X-Timestamp should be a UNIX timestamp float value; '
|
||||||
'was %r' % req.headers['x-timestamp'])
|
'was %r' % req.headers['x-timestamp'])
|
||||||
else:
|
else:
|
||||||
@ -881,7 +902,7 @@ class ObjectController(Controller):
|
|||||||
nheaders['X-Container-Device'] = container['device']
|
nheaders['X-Container-Device'] = container['device']
|
||||||
headers.append(nheaders)
|
headers.append(nheaders)
|
||||||
resp = self.make_requests(req, self.app.object_ring,
|
resp = self.make_requests(req, self.app.object_ring,
|
||||||
partition, 'DELETE', req.path_info, headers)
|
partition, 'DELETE', req.path_info, headers)
|
||||||
return resp
|
return resp
|
||||||
|
|
||||||
@public
|
@public
|
||||||
@ -898,9 +919,10 @@ class ObjectController(Controller):
|
|||||||
try:
|
try:
|
||||||
_junk, dest_container, dest_object = dest.split('/', 2)
|
_junk, dest_container, dest_object = dest.split('/', 2)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
return HTTPPreconditionFailed(request=req,
|
return HTTPPreconditionFailed(
|
||||||
body='Destination header must be of the form '
|
request=req,
|
||||||
'<container name>/<object name>')
|
body='Destination header must be of the form '
|
||||||
|
'<container name>/<object name>')
|
||||||
source = '/' + self.container_name + '/' + self.object_name
|
source = '/' + self.container_name + '/' + self.object_name
|
||||||
self.container_name = dest_container
|
self.container_name = dest_container
|
||||||
self.object_name = dest_object
|
self.object_name = dest_object
|
||||||
|
@ -34,7 +34,7 @@ from eventlet import Timeout
|
|||||||
|
|
||||||
from swift.common.ring import Ring
|
from swift.common.ring import Ring
|
||||||
from swift.common.utils import cache_from_env, get_logger, \
|
from swift.common.utils import cache_from_env, get_logger, \
|
||||||
get_remote_client, split_path, TRUE_VALUES
|
get_remote_client, split_path, config_true_value
|
||||||
from swift.common.constraints import check_utf8
|
from swift.common.constraints import check_utf8
|
||||||
from swift.proxy.controllers import AccountController, ObjectController, \
|
from swift.proxy.controllers import AccountController, ObjectController, \
|
||||||
ContainerController, Controller
|
ContainerController, Controller
|
||||||
@ -72,9 +72,9 @@ class Application(object):
|
|||||||
self.recheck_account_existence = \
|
self.recheck_account_existence = \
|
||||||
int(conf.get('recheck_account_existence', 60))
|
int(conf.get('recheck_account_existence', 60))
|
||||||
self.allow_account_management = \
|
self.allow_account_management = \
|
||||||
conf.get('allow_account_management', 'no').lower() in TRUE_VALUES
|
config_true_value(conf.get('allow_account_management', 'no'))
|
||||||
self.object_post_as_copy = \
|
self.object_post_as_copy = \
|
||||||
conf.get('object_post_as_copy', 'true').lower() in TRUE_VALUES
|
config_true_value(conf.get('object_post_as_copy', 'true'))
|
||||||
self.resellers_conf = ConfigParser()
|
self.resellers_conf = ConfigParser()
|
||||||
self.resellers_conf.read(os.path.join(swift_dir, 'resellers.conf'))
|
self.resellers_conf.read(os.path.join(swift_dir, 'resellers.conf'))
|
||||||
self.object_ring = object_ring or Ring(swift_dir, ring_name='object')
|
self.object_ring = object_ring or Ring(swift_dir, ring_name='object')
|
||||||
@ -86,7 +86,7 @@ class Application(object):
|
|||||||
mimetypes.init(mimetypes.knownfiles +
|
mimetypes.init(mimetypes.knownfiles +
|
||||||
[os.path.join(swift_dir, 'mime.types')])
|
[os.path.join(swift_dir, 'mime.types')])
|
||||||
self.account_autocreate = \
|
self.account_autocreate = \
|
||||||
conf.get('account_autocreate', 'no').lower() in TRUE_VALUES
|
config_true_value(conf.get('account_autocreate', 'no'))
|
||||||
self.expiring_objects_account = \
|
self.expiring_objects_account = \
|
||||||
(conf.get('auto_create_account_prefix') or '.') + \
|
(conf.get('auto_create_account_prefix') or '.') + \
|
||||||
'expiring_objects'
|
'expiring_objects'
|
||||||
@ -105,8 +105,7 @@ class Application(object):
|
|||||||
int(conf.get('rate_limit_after_segment', 10))
|
int(conf.get('rate_limit_after_segment', 10))
|
||||||
self.rate_limit_segments_per_sec = \
|
self.rate_limit_segments_per_sec = \
|
||||||
int(conf.get('rate_limit_segments_per_sec', 1))
|
int(conf.get('rate_limit_segments_per_sec', 1))
|
||||||
self.log_handoffs = \
|
self.log_handoffs = config_true_value(conf.get('log_handoffs', 'true'))
|
||||||
conf.get('log_handoffs', 'true').lower() in TRUE_VALUES
|
|
||||||
self.cors_allow_origin = [
|
self.cors_allow_origin = [
|
||||||
a.strip()
|
a.strip()
|
||||||
for a in conf.get('cors_allow_origin', '').split(',')
|
for a in conf.get('cors_allow_origin', '').split(',')
|
||||||
|
@ -14,7 +14,7 @@ from shutil import rmtree
|
|||||||
from test import get_config
|
from test import get_config
|
||||||
from ConfigParser import MissingSectionHeaderError
|
from ConfigParser import MissingSectionHeaderError
|
||||||
from StringIO import StringIO
|
from StringIO import StringIO
|
||||||
from swift.common.utils import readconf, TRUE_VALUES
|
from swift.common.utils import readconf, config_true_value
|
||||||
from logging import Handler
|
from logging import Handler
|
||||||
import logging.handlers
|
import logging.handlers
|
||||||
|
|
||||||
@ -198,7 +198,7 @@ def fake_syslog_handler():
|
|||||||
logging.handlers.SysLogHandler = FakeLogger
|
logging.handlers.SysLogHandler = FakeLogger
|
||||||
|
|
||||||
|
|
||||||
if get_config('unit_test').get('fake_syslog', 'False').lower() in TRUE_VALUES:
|
if config_true_value(get_config('unit_test').get('fake_syslog', 'False')):
|
||||||
fake_syslog_handler()
|
fake_syslog_handler()
|
||||||
|
|
||||||
|
|
||||||
|
@ -901,6 +901,18 @@ log_name = %(yarr)s'''
|
|||||||
for v in utils.TRUE_VALUES:
|
for v in utils.TRUE_VALUES:
|
||||||
self.assertEquals(v, v.lower())
|
self.assertEquals(v, v.lower())
|
||||||
|
|
||||||
|
def test_config_true_value(self):
|
||||||
|
orig_trues = utils.TRUE_VALUES
|
||||||
|
try:
|
||||||
|
utils.TRUE_VALUES = 'hello world'.split()
|
||||||
|
for val in 'hello world HELLO WORLD'.split():
|
||||||
|
self.assertTrue(utils.config_true_value(val) is True)
|
||||||
|
self.assertTrue(utils.config_true_value(True) is True)
|
||||||
|
self.assertTrue(utils.config_true_value('foo') is False)
|
||||||
|
self.assertTrue(utils.config_true_value(False) is False)
|
||||||
|
finally:
|
||||||
|
utils.TRUE_VALUES = orig_trues
|
||||||
|
|
||||||
def test_streq_const_time(self):
|
def test_streq_const_time(self):
|
||||||
self.assertTrue(utils.streq_const_time('abc123', 'abc123'))
|
self.assertTrue(utils.streq_const_time('abc123', 'abc123'))
|
||||||
self.assertFalse(utils.streq_const_time('a', 'aaaaa'))
|
self.assertFalse(utils.streq_const_time('a', 'aaaaa'))
|
||||||
|
Loading…
Reference in New Issue
Block a user