merge to trunk

This commit is contained in:
David Goetz 2011-01-26 14:38:13 -08:00
commit 3ea09dd0e6
53 changed files with 1515 additions and 973 deletions

@ -24,9 +24,13 @@ Paul Jimenez
Brian K. Jones
Ed Leafe
Stephen Milton
Russ Nelson
Colin Nicholson
Andrew Clay Shafer
Monty Taylor
Caleb Tennis
FUJITA Tomonori
Kapil Thangavelu
Conrad Weidenkeller
Chris Wedgwood
Cory Wright

19
bin/st

@ -80,7 +80,7 @@ except ImportError:
res = []
consts = {'true': True, 'false': False, 'null': None}
string = '(' + comments.sub('', string) + ')'
for type, val, _, _, _ in \
for type, val, _junk, _junk, _junk in \
generate_tokens(StringIO(string).readline):
if (type == OP and val not in '[]{}:,()-') or \
(type == NAME and val not in consts):
@ -914,7 +914,7 @@ def st_delete(parser, args, print_queue, error_queue):
segment_queue.put((scontainer, delobj['name']))
if not segment_queue.empty():
segment_threads = [QueueFunctionThread(segment_queue,
_delete_segment, create_connection()) for _ in
_delete_segment, create_connection()) for _junk in
xrange(10)]
for thread in segment_threads:
thread.start()
@ -972,11 +972,11 @@ def st_delete(parser, args, print_queue, error_queue):
create_connection = lambda: Connection(options.auth, options.user,
options.key, preauthurl=url, preauthtoken=token, snet=options.snet)
object_threads = [QueueFunctionThread(object_queue, _delete_object,
create_connection()) for _ in xrange(10)]
create_connection()) for _junk in xrange(10)]
for thread in object_threads:
thread.start()
container_threads = [QueueFunctionThread(container_queue,
_delete_container, create_connection()) for _ in xrange(10)]
_delete_container, create_connection()) for _junk in xrange(10)]
for thread in container_threads:
thread.start()
if not args:
@ -1142,11 +1142,11 @@ def st_download(options, args, print_queue, error_queue):
create_connection = lambda: Connection(options.auth, options.user,
options.key, preauthurl=url, preauthtoken=token, snet=options.snet)
object_threads = [QueueFunctionThread(object_queue, _download_object,
create_connection()) for _ in xrange(10)]
create_connection()) for _junk in xrange(10)]
for thread in object_threads:
thread.start()
container_threads = [QueueFunctionThread(container_queue,
_download_container, create_connection()) for _ in xrange(10)]
_download_container, create_connection()) for _junk in xrange(10)]
for thread in container_threads:
thread.start()
if not args:
@ -1525,7 +1525,8 @@ def st_upload(options, args, print_queue, error_queue):
full_size = getsize(path)
segment_queue = Queue(10000)
segment_threads = [QueueFunctionThread(segment_queue,
_segment_job, create_connection()) for _ in xrange(10)]
_segment_job, create_connection()) for _junk in
xrange(10)]
for thread in segment_threads:
thread.start()
segment = 0
@ -1569,7 +1570,7 @@ def st_upload(options, args, print_queue, error_queue):
'container': scontainer, 'obj': delobj['name']})
if not segment_queue.empty():
segment_threads = [QueueFunctionThread(segment_queue,
_segment_job, create_connection()) for _ in
_segment_job, create_connection()) for _junk in
xrange(10)]
for thread in segment_threads:
thread.start()
@ -1603,7 +1604,7 @@ def st_upload(options, args, print_queue, error_queue):
create_connection = lambda: Connection(options.auth, options.user,
options.key, preauthurl=url, preauthtoken=token, snet=options.snet)
object_threads = [QueueFunctionThread(object_queue, _object_job,
create_connection()) for _ in xrange(10)]
create_connection()) for _junk in xrange(10)]
for thread in object_threads:
thread.start()
conn = create_connection()

@ -73,7 +73,7 @@ class Auditor(object):
def audit_object(self, account, container, name):
path = '/%s/%s/%s' % (account, container, name)
part, nodes = self.object_ring.get_nodes(account, container, name)
part, nodes = self.object_ring.get_nodes(account, container.encode('utf-8'), name.encode('utf-8'))
container_listing = self.audit_container(account, container)
consistent = True
if name not in container_listing:
@ -109,7 +109,7 @@ class Auditor(object):
etags.append(resp.getheader('ETag'))
else:
conn = http_connect(node['ip'], node['port'],
node['device'], part, 'HEAD', path, {})
node['device'], part, 'HEAD', path.encode('utf-8'), {})
resp = conn.getresponse()
if resp.status // 100 != 2:
self.object_not_found += 1
@ -144,14 +144,14 @@ class Auditor(object):
if (account, name) in self.list_cache:
return self.list_cache[(account, name)]
self.in_progress[(account, name)] = Event()
print 'Auditing container "%s"...' % name
print 'Auditing container "%s"' % name
path = '/%s/%s' % (account, name)
account_listing = self.audit_account(account)
consistent = True
if name not in account_listing:
consistent = False
print " Container %s not in account listing!" % path
part, nodes = self.container_ring.get_nodes(account, name)
part, nodes = self.container_ring.get_nodes(account, name.encode('utf-8'))
rec_d = {}
responses = {}
for node in nodes:
@ -161,8 +161,8 @@ class Auditor(object):
node_id = node['id']
try:
conn = http_connect(node['ip'], node['port'], node['device'],
part, 'GET', path, {},
'format=json&marker=%s' % quote(marker))
part, 'GET', path.encode('utf-8'), {},
'format=json&marker=%s' % quote(marker.encode('utf-8')))
resp = conn.getresponse()
if resp.status // 100 != 2:
self.container_not_found += 1
@ -189,7 +189,7 @@ class Auditor(object):
self.container_obj_mismatch += 1
consistent = False
print " Different versions of %s/%s in container dbs." % \
name, obj['name']
(name, obj['name'])
if obj['last_modified'] > rec_d[obj_name]['last_modified']:
rec_d[obj_name] = obj
obj_counts = [int(header['x-container-object-count'])
@ -220,7 +220,7 @@ class Auditor(object):
if account in self.list_cache:
return self.list_cache[account]
self.in_progress[account] = Event()
print "Auditing account %s..." % account
print 'Auditing account "%s"' % account
consistent = True
path = '/%s' % account
part, nodes = self.account_ring.get_nodes(account)
@ -233,19 +233,18 @@ class Auditor(object):
try:
conn = http_connect(node['ip'], node['port'],
node['device'], part, 'GET', path, {},
'format=json&marker=%s' % quote(marker))
'format=json&marker=%s' % quote(marker.encode('utf-8')))
resp = conn.getresponse()
if resp.status // 100 != 2:
self.account_not_found += 1
consistent = False
print " Bad status GETting account %(ip)s:%(device)s" \
% node
print " Bad status GETting account '%s' from %ss:%ss" % (account, node['ip'], node['device'])
break
results = simplejson.loads(resp.read())
except Exception:
self.account_exceptions += 1
consistent = False
print " Exception GETting account %(ip)s:%(device)s" % node
print " Exception GETting account '%s' on %ss:%ss" % (account, node['ip'], node['device'])
break
if node_id not in responses:
responses[node_id] = [dict(resp.getheaders()), []]
@ -258,15 +257,17 @@ class Auditor(object):
if len(set(cont_counts)) != 1:
self.account_container_mismatch += 1
consistent = False
print " Account databases don't agree on number of containers."
print " Max: %s, Min: %s" % (max(cont_counts), min(cont_counts))
print " Account databases for '%s' don't agree on number of containers." % account
if cont_counts:
print " Max: %s, Min: %s" % (max(cont_counts), min(cont_counts))
obj_counts = [int(header['x-account-object-count'])
for header in headers]
if len(set(obj_counts)) != 1:
self.account_object_mismatch += 1
consistent = False
print " Account databases don't agree on number of objects."
print " Max: %s, Min: %s" % (max(obj_counts), min(obj_counts))
print " Account databases for '%s' don't agree on number of objects." % account
if obj_counts:
print " Max: %s, Min: %s" % (max(obj_counts), min(obj_counts))
containers = set()
for resp in responses.values():
containers.update(container['name'] for container in resp[1])

@ -25,7 +25,7 @@ if __name__ == '__main__':
gettext.install('swift', unicode=1)
if len(argv) != 4 or argv[1] != '-K':
exit('Syntax: %s -K <super_admin_key> <path to auth.db>' % argv[0])
_, _, super_admin_key, auth_db = argv
_junk, _junk, super_admin_key, auth_db = argv
call(['swauth-prep', '-K', super_admin_key])
conn = sqlite3.connect(auth_db)
for account, cfaccount, user, password, admin, reseller_admin in \

@ -105,7 +105,7 @@ if __name__ == '__main__':
else:
conf = CONF_DEFAULTS
parser.set_defaults(**conf)
options, _ = parser.parse_args()
options, _junk = parser.parse_args()
if options.concurrency is not '':
options.put_concurrency = options.concurrency
options.get_concurrency = options.concurrency

@ -32,7 +32,7 @@ GRACEFUL_SHUTDOWN_SERVERS = ['account-server', 'container-server',
MAX_DESCRIPTORS = 32768
MAX_MEMORY = (1024 * 1024 * 1024) * 2 # 2 GB
_, server, command = sys.argv
_junk, server, command = sys.argv
if server == 'all':
servers = ALL_SERVERS
else:
@ -155,7 +155,7 @@ def do_stop(server, graceful=False):
except OSError:
pass
for pid_file, pid in pfiles:
for _ in xrange(150): # 15 seconds
for _junk in xrange(150): # 15 seconds
if not os.path.exists('/proc/%s' % pid):
break
time.sleep(0.1)

@ -127,7 +127,7 @@ if __name__ == '__main__':
next_report += 2
while need_to_queue >= 1:
container = 'stats_container_dispersion_%s' % uuid4()
part, _ = container_ring.get_nodes(account, container)
part, _junk = container_ring.get_nodes(account, container)
if part in parts_left:
coropool.spawn(put_container, connpool, container, report)
sleep()
@ -152,7 +152,7 @@ if __name__ == '__main__':
next_report += 2
while need_to_queue >= 1:
obj = 'stats_object_dispersion_%s' % uuid4()
part, _ = object_ring.get_nodes(account, container, obj)
part, _junk = object_ring.get_nodes(account, container, obj)
if part in parts_left:
coropool.spawn(put_object, connpool, container, obj, report)
sleep()

@ -107,7 +107,7 @@ def audit(coropool, connpool, account, container_ring, object_ring, options):
found = False
error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node)
try:
attempts, _ = direct_client.retry(
attempts, _junk = direct_client.retry(
direct_client.direct_head_object, node, part,
account, container, obj, error_log=error_log,
retries=options.retries)
@ -160,7 +160,7 @@ def audit(coropool, connpool, account, container_ring, object_ring, options):
print 'Containers Missing'
print '-' * 78
for container in sorted(containers_missing_replicas.keys()):
part, _ = container_ring.get_nodes(account, container)
part, _junk = container_ring.get_nodes(account, container)
for node in containers_missing_replicas[container]:
print 'http://%s:%s/%s/%s/%s/%s' % (node['ip'], node['port'],
node['device'], part, account, container)
@ -170,8 +170,8 @@ def audit(coropool, connpool, account, container_ring, object_ring, options):
print 'Objects Missing'
print '-' * 78
for opath in sorted(objects_missing_replicas.keys()):
_, container, obj = opath.split('/', 2)
part, _ = object_ring.get_nodes(account, container, obj)
_junk, container, obj = opath.split('/', 2)
part, _junk = object_ring.get_nodes(account, container, obj)
for node in objects_missing_replicas[opath]:
print 'http://%s:%s/%s/%s/%s/%s/%s' % (node['ip'],
node['port'], node['device'], part, account, container,
@ -200,7 +200,7 @@ def container_dispersion_report(coropool, connpool, account, container_ring,
for node in nodes:
error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node)
try:
attempts, _ = direct_client.retry(
attempts, _junk = direct_client.retry(
direct_client.direct_head_container, node,
part, account, container, error_log=error_log,
retries=options.retries)
@ -284,7 +284,7 @@ def object_dispersion_report(coropool, connpool, account, object_ring, options):
for node in nodes:
error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node)
try:
attempts, _ = direct_client.retry(
attempts, _junk = direct_client.retry(
direct_client.direct_head_object, node, part,
account, container, obj, error_log=error_log,
retries=options.retries)

@ -134,9 +134,80 @@ can be found in the :doc:`Ring Overview <overview_ring>`.
General Server Configuration
----------------------------
Swift uses paste.deploy to manage server configurations. Default configuration
options are set in the `[DEFAULT]` section, and any options specified there
can be overridden in any of the other sections.
Swift uses paste.deploy (http://pythonpaste.org/deploy/) to manage server
configurations. Default configuration options are set in the `[DEFAULT]`
section, and any options specified there can be overridden in any of the other
sections BUT ONLY BY USING THE SYNTAX ``set option_name = value``. This is the
unfortunate way paste.deploy works and I'll try to explain it in full.
First, here's an example paste.deploy configuration file::
[DEFAULT]
name1 = globalvalue
name2 = globalvalue
name3 = globalvalue
set name4 = globalvalue
[pipeline:main]
pipeline = myapp
[app:myapp]
use = egg:mypkg#myapp
name2 = localvalue
set name3 = localvalue
set name5 = localvalue
name6 = localvalue
The resulting configuration that myapp receives is::
global {'__file__': '/etc/mypkg/wsgi.conf', 'here': '/etc/mypkg',
'name1': 'globalvalue',
'name2': 'globalvalue',
'name3': 'localvalue',
'name4': 'globalvalue',
'name5': 'localvalue',
'set name4': 'globalvalue'}
local {'name6': 'localvalue'}
So, `name1` got the global value which is fine since it's only in the `DEFAULT`
section anyway.
`name2` got the global value from `DEFAULT` even though it's seemingly
overridden in the `app:myapp` subsection. This is just the unfortunate way
paste.deploy works (at least at the time of this writing.)
`name3` got the local value from the `app:myapp` subsection because it using
the special paste.deploy syntax of ``set option_name = value``. So, if you want
a default value for most app/filters but want to overridde it in one
subsection, this is how you do it.
`name4` got the global value from `DEFAULT` since it's only in that section
anyway. But, since we used the ``set`` syntax in the `DEFAULT` section even
though we shouldn't, notice we also got a ``set name4`` variable. Weird, but
probably not harmful.
`name5` got the local value from the `app:myapp` subsection since it's only
there anyway, but notice that it is in the global configuration and not the
local configuration. This is because we used the ``set`` syntax to set the
value. Again, weird, but not harmful since Swift just treats the two sets of
configuration values as one set anyway.
`name6` got the local value from `app:myapp` subsection since it's only there,
and since we didn't use the ``set`` syntax, it's only in the local
configuration and not the global one. Though, as indicated above, there is no
special distinction with Swift.
That's quite an explanation for something that should be so much simpler, but
it might be important to know how paste.deploy interprets configuration files.
The main rule to remember when working with Swift configuration files is:
.. note::
Use the ``set option_name = value`` syntax in subsections if the option is
also set in the ``[DEFAULT]`` section. Don't get in the habit of always
using the ``set`` syntax or you'll probably mess up your non-paste.deploy
configuration files.
---------------------------
Object Server Configuration
@ -170,10 +241,10 @@ Option Default Description
use paste.deploy entry point for the object
server. For most cases, this should be
`egg:swift#object`.
log_name object-server Label used when logging
log_facility LOG_LOCAL0 Syslog log facility
log_level INFO Logging level
log_requests True Whether or not to log each request
set log_name object-server Label used when logging
set log_facility LOG_LOCAL0 Syslog log facility
set log_level INFO Logging level
set log_requests True Whether or not to log each request
user swift User to run as
node_timeout 3 Request timeout to external services
conn_timeout 0.5 Connection timeout to external services
@ -229,6 +300,7 @@ Option Default Description
log_name object-auditor Label used when logging
log_facility LOG_LOCAL0 Syslog log facility
log_level INFO Logging level
log_time 3600 Frequency of status logs in seconds.
files_per_second 20 Maximum files audited per second. Should
be tuned according to individual system
specs. 0 is unlimited.
@ -270,9 +342,9 @@ Option Default Description
use paste.deploy entry point for the
container server. For most cases, this
should be `egg:swift#container`.
log_name container-server Label used when logging
log_facility LOG_LOCAL0 Syslog log facility
log_level INFO Logging level
set log_name container-server Label used when logging
set log_facility LOG_LOCAL0 Syslog log facility
set log_level INFO Logging level
node_timeout 3 Request timeout to external services
conn_timeout 0.5 Connection timeout to external services
================== ================ ========================================
@ -299,19 +371,25 @@ reclaim_age 604800 Time elapsed in seconds before a
[container-updater]
================== ================= =======================================
Option Default Description
------------------ ----------------- ---------------------------------------
log_name container-updater Label used when logging
log_facility LOG_LOCAL0 Syslog log facility
log_level INFO Logging level
interval 300 Minimum time for a pass to take
concurrency 4 Number of updater workers to spawn
node_timeout 3 Request timeout to external services
conn_timeout 0.5 Connection timeout to external services
slowdown 0.01 Time in seconds to wait between
containers
================== ================= =======================================
======================== ================= ==================================
Option Default Description
------------------------ ----------------- ----------------------------------
log_name container-updater Label used when logging
log_facility LOG_LOCAL0 Syslog log facility
log_level INFO Logging level
interval 300 Minimum time for a pass to take
concurrency 4 Number of updater workers to spawn
node_timeout 3 Request timeout to external
services
conn_timeout 0.5 Connection timeout to external
services
slowdown 0.01 Time in seconds to wait between
containers
account_suppression_time 60 Seconds to suppress updating an
account that has generated an
error (timeout, not yet found,
etc.)
======================== ================= ==================================
[container-auditor]
@ -357,9 +435,9 @@ Option Default Description
use Entry point for paste.deploy for the account
server. For most cases, this should be
`egg:swift#account`.
log_name account-server Label used when logging
log_facility LOG_LOCAL0 Syslog log facility
log_level INFO Logging level
set log_name account-server Label used when logging
set log_facility LOG_LOCAL0 Syslog log facility
set log_level INFO Logging level
================== ============== ==========================================
[account-replicator]
@ -438,10 +516,10 @@ use Entry point for paste.deploy for
the proxy server. For most
cases, this should be
`egg:swift#proxy`.
log_name proxy-server Label used when logging
log_facility LOG_LOCAL0 Syslog log facility
log_level INFO Log level
log_headers True If True, log headers in each
set log_name proxy-server Label used when logging
set log_facility LOG_LOCAL0 Syslog log facility
set log_level INFO Log level
set log_headers True If True, log headers in each
request
recheck_account_existence 60 Cache timeout in seconds to
send memcached for account
@ -499,10 +577,10 @@ use Entry point for
auth. To use the swauth
set to:
`egg:swift#swauth`
log_name auth-server Label used when logging
log_facility LOG_LOCAL0 Syslog log facility
log_level INFO Log level
log_headers True If True, log headers in
set log_name auth-server Label used when logging
set log_facility LOG_LOCAL0 Syslog log facility
set log_level INFO Log level
set log_headers True If True, log headers in
each request
reseller_prefix AUTH The naming scope for the
auth service. Swift

@ -50,7 +50,7 @@ If you are going to use a separate partition for Swift data, be sure to add anot
`/dev/sdb1 /mnt/sdb1 xfs noatime,nodiratime,nobarrier,logbufs=8 0 0`
#. `mkdir /mnt/sdb1`
#. `mount /mnt/sdb1`
#. `mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4 /mnt/sdb1/test`
#. `mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4`
#. `chown <your-user-name>:<your-group-name> /mnt/sdb1/*`
#. `mkdir /srv`
#. `for x in {1..4}; do ln -s /mnt/sdb1/$x /srv/$x; done`
@ -77,7 +77,7 @@ If you want to use a loopback device instead of another partition, follow these
`/srv/swift-disk /mnt/sdb1 xfs loop,noatime,nodiratime,nobarrier,logbufs=8 0 0`
#. `mkdir /mnt/sdb1`
#. `mount /mnt/sdb1`
#. `mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4 /mnt/sdb1/test`
#. `mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4`
#. `chown <your-user-name>:<your-group-name> /mnt/sdb1/*`
#. `mkdir /srv`
#. `for x in {1..4}; do ln -s /mnt/sdb1/$x /srv/$x; done`
@ -204,7 +204,6 @@ Do these commands as you on guest:
#. `cd ~/swift/trunk; sudo python setup.py develop`
#. Edit `~/.bashrc` and add to the end::
export PATH_TO_TEST_XFS=/mnt/sdb1/test
export SWIFT_TEST_CONFIG_FILE=/etc/swift/func_test.conf
export PATH=${PATH}:~/bin
@ -536,7 +535,7 @@ Setting up scripts for running Swift
sudo umount /mnt/sdb1
sudo mkfs.xfs -f -i size=1024 /dev/sdb1
sudo mount /mnt/sdb1
sudo mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4 /mnt/sdb1/test
sudo mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4
sudo chown <your-user-name>:<your-group-name> /mnt/sdb1/*
mkdir -p /srv/1/node/sdb1 /srv/2/node/sdb2 /srv/3/node/sdb3 /srv/4/node/sdb4
sudo rm -f /var/log/debug /var/log/messages /var/log/rsyncd.log /var/log/syslog

@ -30,6 +30,11 @@ max_sleep_time_seconds 60 App will immediately return a 498 response
log_sleep_time_seconds 0 To allow visibility into rate limiting set
this value > 0 and all sleeps greater than
the number will be logged.
rate_buffer_seconds 5 Number of seconds the rate counter can
drop and be allowed to catch up (at a
faster than listed rate). A larger number
will result in larger spikes in rate but
better average accuracy.
account_ratelimit 0 If set, will limit all requests to
/account_name and PUTs to
/account_name/container_name. Number is in

@ -7,18 +7,27 @@
# swift_dir = /etc/swift
# devices = /srv/node
# mount_check = true
# You can specify default log routing here if you want:
# log_name = swift
# log_facility = LOG_LOCAL0
# log_level = INFO
[pipeline:main]
pipeline = account-server
[app:account-server]
use = egg:swift#account
# log_name = account-server
# log_facility = LOG_LOCAL0
# log_level = INFO
# You can override the default log routing for this app here:
# set log_name = account-server
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_requests = True
[account-replicator]
# You can override the default log routing for this app here (don't use set!):
# log_name = account-replicator
# log_facility = LOG_LOCAL0
# log_level = INFO
# vm_test_mode = no
# log_facility = LOG_LOCAL0
# log_level = INFO
@ -36,7 +45,10 @@ use = egg:swift#account
# reclaim_age = 86400
[account-stats]
# You can override the default log routing for this app here (don't use set!):
# log_name = account-stats
# log_facility = LOG_LOCAL0
# log_level = INFO
# cf_account = AUTH_7abbc116-8a07-4b63-819d-02715d3e0f31
# container_name = account_stats
# proxy_server_conf = /etc/swift/proxy-server.conf
@ -44,14 +56,20 @@ use = egg:swift#account
# log_level = INFO
[account-auditor]
# You can override the default log routing for this app here (don't use set!):
# log_name = account-auditor
# log_facility = LOG_LOCAL0
# log_level = INFO
# Will audit, at most, 1 account per device per interval
# interval = 1800
# log_facility = LOG_LOCAL0
# log_level = INFO
[account-reaper]
# You can override the default log routing for this app here (don't use set!):
# log_name = account-reaper
# log_facility = LOG_LOCAL0
# log_level = INFO
# concurrency = 25
# interval = 3600
# node_timeout = 10

@ -7,6 +7,10 @@
# swift_dir = /etc/swift
# cert_file = Default is no cert; format is path like /etc/swift/auth.crt
# key_file = Default is no key; format is path like /etc/swift/auth.key
# You can specify default log routing here if you want:
# log_name = swift
# log_facility = LOG_LOCAL0
# log_level = INFO
[pipeline:main]
pipeline = auth-server
@ -15,11 +19,12 @@ pipeline = auth-server
use = egg:swift#auth
# Highly recommended to change this.
super_admin_key = devauth
# log_name = auth-server
# log_facility = LOG_LOCAL0
# log_level = INFO
# You can override the default log routing for this app here:
# set log_name = proxy-server
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = False
# reseller_prefix = AUTH
# default_cluster_url = http://127.0.0.1:8080/v1
# token_life = 86400
# log_headers = False
# node_timeout = 10

@ -7,20 +7,29 @@
# swift_dir = /etc/swift
# devices = /srv/node
# mount_check = true
# You can specify default log routing here if you want:
# log_name = swift
# log_facility = LOG_LOCAL0
# log_level = INFO
[pipeline:main]
pipeline = container-server
[app:container-server]
use = egg:swift#container
# log_name = container-server
# log_facility = LOG_LOCAL0
# log_level = INFO
# You can override the default log routing for this app here:
# set log_name = container-server
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_requests = True
# node_timeout = 3
# conn_timeout = 0.5
[container-replicator]
# You can override the default log routing for this app here (don't use set!):
# log_name = container-replicator
# log_facility = LOG_LOCAL0
# log_level = INFO
# vm_test_mode = no
# per_diff = 1000
# concurrency = 8
@ -31,15 +40,23 @@ use = egg:swift#container
# reclaim_age = 604800
[container-updater]
# You can override the default log routing for this app here (don't use set!):
# log_name = container-updater
# log_facility = LOG_LOCAL0
# log_level = INFO
# interval = 300
# concurrency = 4
# node_timeout = 3
# conn_timeout = 0.5
# slowdown will sleep that amount between containers
# slowdown = 0.01
# Seconds to suppress updating an account that has generated an error
# account_suppression_time = 60
[container-auditor]
# You can override the default log routing for this app here (don't use set!):
# log_name = container-auditor
# log_facility = LOG_LOCAL0
# log_level = INFO
# Will audit, at most, 1 container per device per interval
# interval = 1800

@ -7,16 +7,21 @@
# swift_dir = /etc/swift
# devices = /srv/node
# mount_check = true
# You can specify default log routing here if you want:
# log_name = swift
# log_facility = LOG_LOCAL0
# log_level = INFO
[pipeline:main]
pipeline = object-server
[app:object-server]
use = egg:swift#object
# log_name = object-server
# log_facility = LOG_LOCAL0
# log_level = INFO
# log_requests = True
# You can override the default log routing for this app here:
# set log_name = object-server
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_requests = True
# node_timeout = 3
# conn_timeout = 0.5
# network_chunk_size = 65536
@ -27,7 +32,10 @@ use = egg:swift#object
# mb_per_sync = 512
[object-replicator]
# You can override the default log routing for this app here (don't use set!):
# log_name = object-replicator
# log_facility = LOG_LOCAL0
# log_level = INFO
# vm_test_mode = no
# daemonize = on
# run_pause = 30
@ -45,7 +53,10 @@ use = egg:swift#object
# reclaim_age = 604800
[object-updater]
# You can override the default log routing for this app here (don't use set!):
# log_name = object-updater
# log_facility = LOG_LOCAL0
# log_level = INFO
# interval = 300
# concurrency = 1
# node_timeout = 10
@ -54,6 +65,10 @@ use = egg:swift#object
# slowdown = 0.01
[object-auditor]
# You can override the default log routing for this app here (don't use set!):
# log_name = object-auditor
# log_facility = LOG_LOCAL0
# log_level = INFO
# files_per_second = 20
# bytes_per_second = 10000000
# log_time = 3600

@ -7,6 +7,10 @@
# user = swift
# cert_file = /etc/swift/proxy.crt
# key_file = /etc/swift/proxy.key
# You can specify default log routing here if you want:
# log_name = swift
# log_facility = LOG_LOCAL0
# log_level = INFO
[pipeline:main]
# For DevAuth:
@ -16,10 +20,11 @@ pipeline = catch_errors healthcheck cache ratelimit auth proxy-server
[app:proxy-server]
use = egg:swift#proxy
# log_name = proxy-server
# log_facility = LOG_LOCAL0
# log_level = INFO
# log_headers = False
# You can override the default log routing for this app here:
# set log_name = proxy-server
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = False
# recheck_account_existence = 60
# recheck_container_existence = 60
# object_chunk_size = 8192
@ -39,6 +44,11 @@ use = egg:swift#proxy
# Only needed for DevAuth
[filter:auth]
use = egg:swift#auth
# You can override the default log routing for this filter here:
# set log_name = auth-server
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = False
# The reseller prefix will verify a token begins with this prefix before even
# attempting to validate it with the external authentication server. Also, with
# authorization, only Swift storage accounts with this prefix will be
@ -54,10 +64,11 @@ use = egg:swift#auth
# Only needed for Swauth
[filter:swauth]
use = egg:swift#swauth
# log_name = auth-server
# log_facility = LOG_LOCAL0
# log_level = INFO
# log_headers = False
# You can override the default log routing for this filter here:
# set log_name = auth-server
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = False
# The reseller prefix will verify a token begins with this prefix before even
# attempting to validate it. Also, with authorization, only Swift storage
# accounts with this prefix will be authorized by this middleware. Useful if
@ -82,15 +93,30 @@ super_admin_key = swauthkey
[filter:healthcheck]
use = egg:swift#healthcheck
# You can override the default log routing for this filter here:
# set log_name = auth-server
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = False
[filter:cache]
use = egg:swift#memcache
# You can override the default log routing for this filter here:
# set log_name = auth-server
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = False
# Default for memcache_servers is below, but you can specify multiple servers
# with the format: 10.1.2.3:11211,10.1.2.4:11211
# memcache_servers = 127.0.0.1:11211
[filter:ratelimit]
use = egg:swift#ratelimit
# You can override the default log routing for this filter here:
# set log_name = auth-server
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = False
# clock_accuracy should represent how accurate the proxy servers' system clocks
# are with each other. 1000 means that all the proxies' clock are accurate to
# each other within 1 millisecond. No ratelimit should be higher than the
@ -99,6 +125,8 @@ use = egg:swift#ratelimit
# max_sleep_time_seconds = 60
# log_sleep_time_seconds of 0 means disabled
# log_sleep_time_seconds = 0
# allows for slow rates (e.g. running up to 5 sec's behind) to catch up.
# rate_buffer_seconds = 5
# account_ratelimit of 0 means disabled
# account_ratelimit = 0
@ -116,14 +144,30 @@ use = egg:swift#ratelimit
[filter:domain_remap]
use = egg:swift#domain_remap
# You can override the default log routing for this filter here:
# set log_name = auth-server
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = False
# storage_domain = example.com
# path_root = v1
# reseller_prefixes = AUTH
[filter:catch_errors]
use = egg:swift#catch_errors
# You can override the default log routing for this filter here:
# set log_name = auth-server
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = False
[filter:cname_lookup]
# Note: this middleware requires python-dnspython
use = egg:swift#cname_lookup
# You can override the default log routing for this filter here:
# set log_name = auth-server
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = False
# storage_domain = example.com
# lookup_depth = 1

@ -1,5 +1,5 @@
import gettext
__version__ = '1.1.0'
__version__ = '1.2.0'
gettext.install('swift')

@ -229,7 +229,7 @@ class AccountReaper(Daemon):
if not containers:
break
try:
for (container, _, _, _) in containers:
for (container, _junk, _junk, _junk) in containers:
self.container_pool.spawn(self.reap_container, account,
partition, nodes, container)
self.container_pool.waitall()

@ -149,31 +149,32 @@ class AuthController(object):
previous_prefix = ''
if '_' in row[0]:
previous_prefix = row[0].split('_', 1)[0]
msg = _(('''
msg = (_('''
THERE ARE ACCOUNTS IN YOUR auth.db THAT DO NOT BEGIN WITH YOUR NEW RESELLER
PREFIX OF "%s".
PREFIX OF "%(reseller)s".
YOU HAVE A FEW OPTIONS:
1) RUN "swift-auth-update-reseller-prefixes %s %s",
1. RUN "swift-auth-update-reseller-prefixes %(db_file)s %(reseller)s",
"swift-init auth-server restart", AND
"swift-auth-recreate-accounts -K ..." TO CREATE FRESH ACCOUNTS.
OR
2) REMOVE %s, RUN "swift-init auth-server restart", AND RUN
2. REMOVE %(db_file)s, RUN "swift-init auth-server restart", AND RUN
"swift-auth-add-user ..." TO CREATE BRAND NEW ACCOUNTS THAT WAY.
OR
3) ADD "reseller_prefix = %s" (WITHOUT THE QUOTES) TO YOUR
3. ADD "reseller_prefix = %(previous)s" (WITHOUT THE QUOTES) TO YOUR
proxy-server.conf IN THE [filter:auth] SECTION AND TO YOUR
auth-server.conf IN THE [app:auth-server] SECTION AND RUN
"swift-init proxy-server restart" AND "swift-init auth-server restart"
TO REVERT BACK TO YOUR PREVIOUS RESELLER PREFIX.
%s
''') % (self.reseller_prefix.rstrip('_'), self.db_file,
self.reseller_prefix.rstrip('_'), self.db_file,
previous_prefix, previous_prefix and ' ' or _('''
%(note)s
''') % {'reseller': self.reseller_prefix.rstrip('_'),
'db_file': self.db_file,
'previous': previous_prefix,
'note': previous_prefix and ' ' or _('''
SINCE YOUR PREVIOUS RESELLER PREFIX WAS AN EMPTY STRING, IT IS NOT
RECOMMENDED TO PERFORM OPTION 3 AS THAT WOULD MAKE SUPPORTING MULTIPLE
RESELLERS MORE DIFFICULT.
''').strip())).strip()
''').strip()}).strip()
self.logger.critical(_('CRITICAL: ') + ' '.join(msg.split()))
raise Exception('\n' + msg)
@ -243,7 +244,8 @@ YOU HAVE A FEW OPTIONS:
raise err
def validate_s3_sign(self, request, token):
account, user, sign = request.headers['Authorization'].split(' ')[-1].split(':')
account, user, sign = \
request.headers['Authorization'].split(' ')[-1].split(':')
msg = base64.urlsafe_b64decode(unquote(token))
rv = False
with self.get_conn() as conn:
@ -253,7 +255,8 @@ YOU HAVE A FEW OPTIONS:
(account, user)).fetchone()
rv = (84000, account, user, row[1])
if rv:
s = base64.encodestring(hmac.new(row[0], msg, sha1).digest()).strip()
s = base64.encodestring(hmac.new(row[0], msg,
sha1).digest()).strip()
self.logger.info("orig %s, calc %s" % (sign, s))
if sign != s:
rv = False
@ -340,10 +343,14 @@ YOU HAVE A FEW OPTIONS:
'SELECT url FROM account WHERE account = ? AND user = ?',
(account, user)).fetchone()
if row:
self.logger.info(
_('ALREADY EXISTS create_user(%s, %s, _, %s, %s) [%.02f]') %
(repr(account), repr(user), repr(admin),
repr(reseller_admin), time() - begin))
self.logger.info(_('ALREADY EXISTS create_user(%(account)s, '
'%(user)s, _, %(admin)s, %(reseller_admin)s) '
'[%(elapsed).02f]') %
{'account': repr(account),
'user': repr(user),
'admin': repr(admin),
'reseller_admin': repr(reseller_admin),
'elapsed': time() - begin})
return 'already exists'
row = conn.execute(
'SELECT url, cfaccount FROM account WHERE account = ?',
@ -354,10 +361,14 @@ YOU HAVE A FEW OPTIONS:
else:
account_hash = self.add_storage_account()
if not account_hash:
self.logger.info(
_('FAILED create_user(%s, %s, _, %s, %s) [%.02f]') %
(repr(account), repr(user), repr(admin),
repr(reseller_admin), time() - begin))
self.logger.info(_('FAILED create_user(%(account)s, '
'%(user)s, _, %(admin)s, %(reseller_admin)s) '
'[%(elapsed).02f]') %
{'account': repr(account),
'user': repr(user),
'admin': repr(admin),
'reseller_admin': repr(reseller_admin),
'elapsed': time() - begin})
return False
url = self.default_cluster_url.rstrip('/') + '/' + account_hash
conn.execute('''INSERT INTO account
@ -367,10 +378,11 @@ YOU HAVE A FEW OPTIONS:
(account, url, account_hash, user, password,
admin and 't' or '', reseller_admin and 't' or ''))
conn.commit()
self.logger.info(
_('SUCCESS create_user(%s, %s, _, %s, %s) = %s [%.02f]') %
(repr(account), repr(user), repr(admin), repr(reseller_admin),
repr(url), time() - begin))
self.logger.info(_('SUCCESS create_user(%(account)s, %(user)s, _, '
'%(admin)s, %(reseller_admin)s) = %(url)s [%(elapsed).02f]') %
{'account': repr(account), 'user': repr(user),
'admin': repr(admin), 'reseller_admin': repr(reseller_admin),
'url': repr(url), 'elapsed': time() - begin})
return url
def recreate_accounts(self):
@ -435,14 +447,15 @@ YOU HAVE A FEW OPTIONS:
:param request: webob.Request object
"""
try:
_, token = split_path(request.path, minsegs=2)
_junk, token = split_path(request.path, minsegs=2)
except ValueError:
return HTTPBadRequest()
# Retrieves (TTL, account, user, cfaccount) if valid, False otherwise
headers = {}
if 'Authorization' in request.headers:
validation = self.validate_s3_sign(request, token)
headers['X-Auth-Account-Suffix'] = validation[3]
if validation:
headers['X-Auth-Account-Suffix'] = validation[3]
else:
validation = self.validate_token(token)
if not validation:
@ -477,7 +490,8 @@ YOU HAVE A FEW OPTIONS:
:param request: webob.Request object
"""
try:
_, account_name, user_name = split_path(request.path, minsegs=3)
_junk, account_name, user_name = \
split_path(request.path, minsegs=3)
except ValueError:
return HTTPBadRequest()
create_reseller_admin = \

@ -118,6 +118,8 @@ def http_connect(ipaddr, port, device, partition, method, path,
:param ssl: set True if SSL should be used (default: False)
:returns: HTTPConnection object
"""
if not port:
port = 443 if ssl else 80
if ssl:
conn = HTTPSConnection('%s:%s' % (ipaddr, port))
else:
@ -150,6 +152,8 @@ def http_connect_raw(ipaddr, port, method, path, headers=None,
:param ssl: set True if SSL should be used (default: False)
:returns: HTTPConnection object
"""
if not port:
port = 443 if ssl else 80
if ssl:
conn = HTTPSConnection('%s:%s' % (ipaddr, port))
else:

@ -76,7 +76,7 @@ except ImportError:
res = []
consts = {'true': True, 'false': False, 'null': None}
string = '(' + comments.sub('', string) + ')'
for type, val, _, _, _ in \
for type, val, _junk, _junk, _junk in \
generate_tokens(StringIO(string).readline):
if (type == OP and val not in '[]{}:,()-') or \
(type == NAME and val not in consts):
@ -696,7 +696,7 @@ class Connection(object):
"""Convenience class to make requests that will also retry the request"""
def __init__(self, authurl, user, key, retries=5, preauthurl=None,
preauthtoken=None, snet=False):
preauthtoken=None, snet=False, starting_backoff=1):
"""
:param authurl: authenitcation URL
:param user: user name to authenticate as
@ -716,6 +716,7 @@ class Connection(object):
self.token = preauthtoken
self.attempts = 0
self.snet = snet
self.starting_backoff = starting_backoff
def get_auth(self):
return get_auth(self.authurl, self.user, self.key, snet=self.snet)
@ -723,9 +724,9 @@ class Connection(object):
def http_connection(self):
return http_connection(self.url)
def _retry(self, func, *args, **kwargs):
def _retry(self, reset_func, func, *args, **kwargs):
self.attempts = 0
backoff = 1
backoff = self.starting_backoff
while self.attempts <= self.retries:
self.attempts += 1
try:
@ -754,10 +755,12 @@ class Connection(object):
raise
sleep(backoff)
backoff *= 2
if reset_func:
reset_func(func, *args, **kwargs)
def head_account(self):
"""Wrapper for :func:`head_account`"""
return self._retry(head_account)
return self._retry(None, head_account)
def get_account(self, marker=None, limit=None, prefix=None,
full_listing=False):
@ -765,16 +768,16 @@ class Connection(object):
# TODO(unknown): With full_listing=True this will restart the entire
# listing with each retry. Need to make a better version that just
# retries where it left off.
return self._retry(get_account, marker=marker, limit=limit,
return self._retry(None, get_account, marker=marker, limit=limit,
prefix=prefix, full_listing=full_listing)
def post_account(self, headers):
"""Wrapper for :func:`post_account`"""
return self._retry(post_account, headers)
return self._retry(None, post_account, headers)
def head_container(self, container):
"""Wrapper for :func:`head_container`"""
return self._retry(head_container, container)
return self._retry(None, head_container, container)
def get_container(self, container, marker=None, limit=None, prefix=None,
delimiter=None, full_listing=False):
@ -782,43 +785,55 @@ class Connection(object):
# TODO(unknown): With full_listing=True this will restart the entire
# listing with each retry. Need to make a better version that just
# retries where it left off.
return self._retry(get_container, container, marker=marker,
return self._retry(None, get_container, container, marker=marker,
limit=limit, prefix=prefix, delimiter=delimiter,
full_listing=full_listing)
def put_container(self, container, headers=None):
"""Wrapper for :func:`put_container`"""
return self._retry(put_container, container, headers=headers)
return self._retry(None, put_container, container, headers=headers)
def post_container(self, container, headers):
"""Wrapper for :func:`post_container`"""
return self._retry(post_container, container, headers)
return self._retry(None, post_container, container, headers)
def delete_container(self, container):
"""Wrapper for :func:`delete_container`"""
return self._retry(delete_container, container)
return self._retry(None, delete_container, container)
def head_object(self, container, obj):
"""Wrapper for :func:`head_object`"""
return self._retry(head_object, container, obj)
return self._retry(None, head_object, container, obj)
def get_object(self, container, obj, resp_chunk_size=None):
"""Wrapper for :func:`get_object`"""
return self._retry(get_object, container, obj,
return self._retry(None, get_object, container, obj,
resp_chunk_size=resp_chunk_size)
def put_object(self, container, obj, contents, content_length=None,
etag=None, chunk_size=65536, content_type=None,
headers=None):
"""Wrapper for :func:`put_object`"""
return self._retry(put_object, container, obj, contents,
def _default_reset(*args, **kwargs):
raise ClientException('put_object(%r, %r, ...) failure and no '
'ability to reset contents for reupload.' % (container, obj))
reset_func = _default_reset
tell = getattr(contents, 'tell', None)
seek = getattr(contents, 'seek', None)
if tell and seek:
orig_pos = tell()
reset_func = lambda *a, **k: seek(orig_pos)
return self._retry(reset_func, put_object, container, obj, contents,
content_length=content_length, etag=etag, chunk_size=chunk_size,
content_type=content_type, headers=headers)
def post_object(self, container, obj, headers):
"""Wrapper for :func:`post_object`"""
return self._retry(post_object, container, obj, headers)
return self._retry(None, post_object, container, obj, headers)
def delete_object(self, container, obj):
"""Wrapper for :func:`delete_object`"""
return self._retry(delete_object, container, obj)
return self._retry(None, delete_object, container, obj)

@ -932,7 +932,7 @@ class ContainerBroker(DatabaseBroker):
if not row:
return []
max_rowid = row['ROWID']
for _ in xrange(min(max_count, max_rowid)):
for _junk in xrange(min(max_count, max_rowid)):
row = conn.execute('''
SELECT name FROM object WHERE ROWID >= ? AND +deleted = 0
LIMIT 1
@ -1435,7 +1435,7 @@ class AccountBroker(DatabaseBroker):
if not row:
return []
max_rowid = row['ROWID']
for _ in xrange(min(max_count, max_rowid)):
for _junk in xrange(min(max_count, max_rowid)):
row = conn.execute('''
SELECT name FROM container WHERE
ROWID >= ? AND +deleted = 0

@ -59,8 +59,8 @@ class DevAuth(object):
if s3 or (token and token.startswith(self.reseller_prefix)):
# Note: Empty reseller_prefix will match all tokens.
# Attempt to auth my token with my auth server
groups = \
self.get_groups(env, token, memcache_client=cache_from_env(env))
groups = self.get_groups(env, token,
memcache_client=cache_from_env(env))
if groups:
env['REMOTE_USER'] = groups
user = groups and groups.split(',', 1)[0] or ''
@ -154,10 +154,12 @@ class DevAuth(object):
timeout=expiration)
if env.get('HTTP_AUTHORIZATION'):
account, user, sign = env['HTTP_AUTHORIZATION'].split(' ')[-1].split(':')
account, user, sign = \
env['HTTP_AUTHORIZATION'].split(' ')[-1].split(':')
cfaccount = resp.getheader('x-auth-account-suffix')
path = env['PATH_INFO']
env['PATH_INFO'] = path.replace("%s:%s" % (account, user), cfaccount, 1)
env['PATH_INFO'] = \
path.replace("%s:%s" % (account, user), cfaccount, 1)
return groups

@ -17,6 +17,7 @@ from webob import Request
from webob.exc import HTTPBadRequest
import dns.resolver
from dns.exception import DNSException
from dns.resolver import NXDOMAIN, NoAnswer
from swift.common.utils import cache_from_env, get_logger
@ -34,7 +35,7 @@ def lookup_cname(domain): # pragma: no cover
result = answer.items[0].to_text()
result = result.rstrip('.')
return ttl, result
except DNSException:
except (DNSException, NXDOMAIN, NoAnswer):
return 0, None

@ -27,6 +27,24 @@ class DomainRemapMiddleware(object):
account.storageurl/path_root/container/object gets translated to
account.storageurl/path_root/account/container/object
Browsers can convert a host header to lowercase, so check that reseller
prefix on the account is the correct case. This is done by comparing the
items in the reseller_prefixes config option to the found prefix. If they
match except for case, the item from reseller_prefixes will be used
instead of the found reseller prefix. The reseller_prefixes list is
exclusive. If defined, any request with an account prefix not in that list
will be ignored by this middleware. reseller_prefixes defaults to 'AUTH'.
Note that this middleware requires that container names and account names
(except as described above) must be DNS-compatible. This means that the
account name created in the system and the containers created by users
cannot exceed 63 characters or have UTF-8 characters. These are
restrictions over and above what swift requires and are not explicitly
checked. Simply put, the this middleware will do a best-effort attempt to
derive account and container names from elements in the domain name and
put those derived values into the URL path (leaving the Host header
unchanged).
"""
def __init__(self, app, conf):
@ -35,6 +53,11 @@ class DomainRemapMiddleware(object):
if self.storage_domain and self.storage_domain[0] != '.':
self.storage_domain = '.' + self.storage_domain
self.path_root = conf.get('path_root', 'v1').strip('/')
prefixes = conf.get('reseller_prefixes', 'AUTH')
self.reseller_prefixes = [x.strip() for x in prefixes.split(',')
if x.strip()]
self.reseller_prefixes_lower = [x.lower()
for x in self.reseller_prefixes]
def __call__(self, env, start_response):
if not self.storage_domain:
@ -58,6 +81,16 @@ class DomainRemapMiddleware(object):
return resp(env, start_response)
if '_' not in account and '-' in account:
account = account.replace('-', '_', 1)
account_reseller_prefix = account.split('_', 1)[0].lower()
if account_reseller_prefix not in self.reseller_prefixes_lower:
# account prefix is not in config list. bail.
return self.app(env, start_response)
prefix_index = self.reseller_prefixes_lower.index(
account_reseller_prefix)
real_prefix = self.reseller_prefixes[prefix_index]
if not account.startswith(real_prefix):
account_suffix = account[len(real_prefix):]
account = real_prefix + account_suffix
path = env['PATH_INFO'].strip('/')
new_path_parts = ['', self.path_root, account]
if container:

@ -20,7 +20,7 @@ from swift.common.utils import split_path, cache_from_env, get_logger
from swift.proxy.server import get_container_memcache_key
class MaxSleepTimeHit(Exception):
class MaxSleepTimeHitError(Exception):
pass
@ -32,6 +32,8 @@ class RateLimitMiddleware(object):
configurable.
"""
BLACK_LIST_SLEEP = 1
def __init__(self, app, conf, logger=None):
self.app = app
if logger:
@ -39,17 +41,16 @@ class RateLimitMiddleware(object):
else:
self.logger = get_logger(conf)
self.account_ratelimit = float(conf.get('account_ratelimit', 0))
self.max_sleep_time_seconds = float(conf.get('max_sleep_time_seconds',
60))
self.log_sleep_time_seconds = float(conf.get('log_sleep_time_seconds',
0))
self.max_sleep_time_seconds = \
float(conf.get('max_sleep_time_seconds', 60))
self.log_sleep_time_seconds = \
float(conf.get('log_sleep_time_seconds', 0))
self.clock_accuracy = int(conf.get('clock_accuracy', 1000))
self.rate_buffer_seconds = int(conf.get('rate_buffer_seconds', 5))
self.ratelimit_whitelist = [acc.strip() for acc in
conf.get('account_whitelist', '').split(',')
if acc.strip()]
conf.get('account_whitelist', '').split(',') if acc.strip()]
self.ratelimit_blacklist = [acc.strip() for acc in
conf.get('account_blacklist', '').split(',')
if acc.strip()]
conf.get('account_blacklist', '').split(',') if acc.strip()]
self.memcache_client = None
conf_limits = []
for conf_key in conf.keys():
@ -92,8 +93,7 @@ class RateLimitMiddleware(object):
return None
def get_ratelimitable_key_tuples(self, req_method, account_name,
container_name=None,
obj_name=None):
container_name=None, obj_name=None):
"""
Returns a list of key (used in memcache), ratelimit tuples. Keys
should be checked in order.
@ -105,19 +105,20 @@ class RateLimitMiddleware(object):
"""
keys = []
if self.account_ratelimit and account_name and (
not (container_name or obj_name) or
(container_name and not obj_name and req_method == 'PUT')):
not (container_name or obj_name) or
(container_name and not obj_name and
req_method in ('PUT', 'DELETE'))):
keys.append(("ratelimit/%s" % account_name,
self.account_ratelimit))
if account_name and container_name and (
(not obj_name and req_method in ('GET', 'HEAD')) or
(obj_name and req_method in ('PUT', 'DELETE'))):
(not obj_name and req_method in ('GET', 'HEAD')) or
(obj_name and req_method in ('PUT', 'DELETE'))):
container_size = None
memcache_key = get_container_memcache_key(account_name,
container_name)
container_info = self.memcache_client.get(memcache_key)
if type(container_info) == dict:
if isinstance(container_info, dict):
container_size = container_info.get('container_size', 0)
container_rate = self.get_container_maxrate(container_size)
if container_rate:
@ -129,31 +130,32 @@ class RateLimitMiddleware(object):
def _get_sleep_time(self, key, max_rate):
'''
Returns the amount of time (a float in seconds) that the app
should sleep. Throws a MaxSleepTimeHit exception if maximum
sleep time is exceeded.
should sleep.
:param key: a memcache key
:param max_rate: maximum rate allowed in requests per second
:raises: MaxSleepTimeHitError if max sleep time is exceeded.
'''
now_m = int(round(time.time() * self.clock_accuracy))
time_per_request_m = int(round(self.clock_accuracy / max_rate))
running_time_m = self.memcache_client.incr(key,
delta=time_per_request_m)
need_to_sleep_m = 0
request_time_limit = now_m + (time_per_request_m * max_rate)
if running_time_m < now_m:
if (now_m - running_time_m >
self.rate_buffer_seconds * self.clock_accuracy):
next_avail_time = int(now_m + time_per_request_m)
self.memcache_client.set(key, str(next_avail_time),
serialize=False)
elif running_time_m - now_m - time_per_request_m > 0:
need_to_sleep_m = running_time_m - now_m - time_per_request_m
else:
need_to_sleep_m = \
max(running_time_m - now_m - time_per_request_m, 0)
max_sleep_m = self.max_sleep_time_seconds * self.clock_accuracy
if max_sleep_m - need_to_sleep_m <= self.clock_accuracy * 0.01:
# treat as no-op decrement time
self.memcache_client.decr(key, delta=time_per_request_m)
raise MaxSleepTimeHit("Max Sleep Time Exceeded: %s" %
need_to_sleep_m)
raise MaxSleepTimeHitError("Max Sleep Time Exceeded: %s" %
need_to_sleep_m)
return float(need_to_sleep_m) / self.clock_accuracy
@ -168,26 +170,25 @@ class RateLimitMiddleware(object):
'''
if account_name in self.ratelimit_blacklist:
self.logger.error(_('Returning 497 because of blacklisting'))
eventlet.sleep(self.BLACK_LIST_SLEEP)
return Response(status='497 Blacklisted',
body='Your account has been blacklisted', request=req)
if account_name in self.ratelimit_whitelist:
return None
for key, max_rate in self.get_ratelimitable_key_tuples(
req.method,
account_name,
container_name=container_name,
obj_name=obj_name):
req.method, account_name, container_name=container_name,
obj_name=obj_name):
try:
need_to_sleep = self._get_sleep_time(key, max_rate)
if self.log_sleep_time_seconds and \
need_to_sleep > self.log_sleep_time_seconds:
self.logger.info(_("Ratelimit sleep log: %(sleep)s for "
self.logger.warning(_("Ratelimit sleep log: %(sleep)s for "
"%(account)s/%(container)s/%(object)s"),
{'sleep': need_to_sleep, 'account': account_name,
'container': container_name, 'object': obj_name})
if need_to_sleep > 0:
eventlet.sleep(need_to_sleep)
except MaxSleepTimeHit, e:
except MaxSleepTimeHitError, e:
self.logger.error(_('Returning 498 because of ops rate '
'limiting (Max Sleep) %s') % str(e))
error_resp = Response(status='498 Rate Limited',

@ -23,6 +23,9 @@ from traceback import format_exc
from urllib import quote, unquote
from urlparse import urlparse
from uuid import uuid4
from hashlib import md5, sha1
import hmac
import base64
from eventlet.timeout import Timeout
from eventlet import TimeoutError
@ -124,8 +127,9 @@ class Swauth(object):
env['HTTP_X_CF_TRANS_ID'] = 'tx' + str(uuid4())
if env.get('PATH_INFO', '').startswith(self.auth_prefix):
return self.handle(env, start_response)
s3 = env.get('HTTP_AUTHORIZATION')
token = env.get('HTTP_X_AUTH_TOKEN', env.get('HTTP_X_STORAGE_TOKEN'))
if token and token.startswith(self.reseller_prefix):
if s3 or (token and token.startswith(self.reseller_prefix)):
# Note: Empty reseller_prefix will match all tokens.
groups = self.get_groups(env, token)
if groups:
@ -133,7 +137,8 @@ class Swauth(object):
user = groups and groups.split(',', 1)[0] or ''
# We know the proxy logs the token, so we augment it just a bit
# to also log the authenticated user.
env['HTTP_X_AUTH_TOKEN'] = '%s,%s' % (user, token)
env['HTTP_X_AUTH_TOKEN'] = \
'%s,%s' % (user, 's3' if s3 else token)
env['swift.authorize'] = self.authorize
env['swift.clean_acl'] = clean_acl
else:
@ -193,6 +198,43 @@ class Swauth(object):
expires, groups = cached_auth_data
if expires < time():
groups = None
if env.get('HTTP_AUTHORIZATION'):
account = env['HTTP_AUTHORIZATION'].split(' ')[1]
account, user, sign = account.split(':')
path = quote('/v1/%s/%s/%s' % (self.auth_account, account, user))
resp = self.make_request(env, 'GET', path).get_response(self.app)
if resp.status_int // 100 != 2:
return None
if 'x-object-meta-account-id' in resp.headers:
account_id = resp.headers['x-object-meta-account-id']
else:
path = quote('/v1/%s/%s' % (self.auth_account, account))
resp2 = self.make_request(env, 'HEAD',
path).get_response(self.app)
if resp2.status_int // 100 != 2:
return None
account_id = resp2.headers['x-container-meta-account-id']
path = env['PATH_INFO']
env['PATH_INFO'] = path.replace("%s:%s" % (account, user),
account_id, 1)
detail = json.loads(resp.body)
password = detail['auth'].split(':')[-1]
msg = base64.urlsafe_b64decode(unquote(token))
s = base64.encodestring(hmac.new(detail['auth'].split(':')[-1],
msg, sha1).digest()).strip()
if s != sign:
return None
groups = [g['name'] for g in detail['groups']]
if '.admin' in groups:
groups.remove('.admin')
groups.append(account_id)
groups = ','.join(groups)
return groups
if not groups:
path = quote('/v1/%s/.token_%s/%s' %
(self.auth_account, token[-1], token))
@ -300,8 +342,8 @@ class Swauth(object):
req.start_time = time()
handler = None
try:
version, account, user, _ = split_path(req.path_info, minsegs=1,
maxsegs=4, rest_with_last=True)
version, account, user, _junk = split_path(req.path_info,
minsegs=1, maxsegs=4, rest_with_last=True)
except ValueError:
return HTTPNotFound(request=req)
if version in ('v1', 'v1.0', 'auth'):
@ -840,6 +882,15 @@ class Swauth(object):
return HTTPForbidden(request=req)
elif not self.is_account_admin(req, account):
return HTTPForbidden(request=req)
path = quote('/v1/%s/%s' % (self.auth_account, account))
resp = self.make_request(req.environ, 'HEAD',
path).get_response(self.app)
if resp.status_int // 100 != 2:
raise Exception('Could not retrieve account id value: %s %s' %
(path, resp.status))
headers = {'X-Object-Meta-Account-Id':
resp.headers['x-container-meta-account-id']}
# Create the object in the main auth account (this object represents
# the user)
path = quote('/v1/%s/%s/%s' % (self.auth_account, account, user))
@ -848,9 +899,10 @@ class Swauth(object):
groups.append('.admin')
if reseller_admin:
groups.append('.reseller_admin')
resp = self.make_request(req.environ, 'PUT', path, json.dumps({'auth':
'plaintext:%s' % key,
'groups': [{'name': g} for g in groups]})).get_response(self.app)
resp = self.make_request(req.environ, 'PUT', path,
json.dumps({'auth': 'plaintext:%s' % key,
'groups': [{'name': g} for g in groups]}),
headers=headers).get_response(self.app)
if resp.status_int == 404:
return HTTPNotFound(request=req)
if resp.status_int // 100 != 2:

@ -32,8 +32,8 @@ To add this middleware to your configuration, add the swift3 middleware
in front of the auth middleware, and before any other middleware that
look at swift requests (like rate limiting).
To set up your client, the access key will be the account string that
should look like AUTH_d305e9dbedbc47df8b25ab46f3152f81, and the
To set up your client, the access key will be the concatenation of the
account and user strings that should look like test:tester, and the
secret access key is the account password. The host should also point
to the swift storage hostname. It also will have to use the old style
calling format, and not the hostname based container format.
@ -42,7 +42,7 @@ An example client using the python boto library might look like the
following for an SAIO setup::
connection = boto.s3.Connection(
aws_access_key_id='AUTH_d305e9dbedbc47df8b25ab46f3152f81',
aws_access_key_id='test:tester',
aws_secret_access_key='testing',
port=8080,
host='127.0.0.1',
@ -139,11 +139,9 @@ class ServiceController(Controller):
return get_err_response('InvalidURI')
containers = loads(''.join(list(body_iter)))
resp = Response(content_type='text/xml')
resp.status = 200
# we don't keep the creation time of a backet (s3cmd doesn't
# work without that) so we use something bogus.
resp.body = '<?xml version="1.0" encoding="UTF-8"?>' \
body = '<?xml version="1.0" encoding="UTF-8"?>' \
'<ListAllMyBucketsResult ' \
'xmlns="http://doc.s3.amazonaws.com/2006-03-01">' \
'<Buckets>%s</Buckets>' \
@ -151,6 +149,7 @@ class ServiceController(Controller):
% ("".join(['<Bucket><Name>%s</Name><CreationDate>' \
'2009-02-03T16:45:09.000Z</CreationDate></Bucket>' %
xml_escape(i['name']) for i in containers]))
resp = Response(status=200, content_type='text/xml', body=body)
return resp
@ -400,8 +399,9 @@ class Swift3Middleware(object):
h += header.lower() + ":" + str(req.headers[header]) + "\n"
h += req.path
try:
account, user, _ = req.headers['Authorization'].split(' ')[-1].split(':')
except:
account, user, _junk = \
req.headers['Authorization'].split(' ')[-1].split(':')
except Exception:
return None, None
token = base64.urlsafe_b64encode(h)
return '%s:%s' % (account, user), token

@ -239,7 +239,7 @@ class RingBuilder(object):
(sum(d['parts'] for d in self.devs if d is not None),
self.parts * self.replicas))
if stats:
dev_usage = array('I', (0 for _ in xrange(len(self.devs))))
dev_usage = array('I', (0 for _junk in xrange(len(self.devs))))
for part in xrange(self.parts):
zones = {}
for replica in xrange(self.replicas):
@ -342,8 +342,9 @@ class RingBuilder(object):
'%08x.%04x' % (dev['parts_wanted'], randint(0, 0xffff))
available_devs = sorted((d for d in self.devs if d is not None),
key=lambda x: x['sort_key'])
self._replica2part2dev = [array('H') for _ in xrange(self.replicas)]
for _ in xrange(self.parts):
self._replica2part2dev = \
[array('H') for _junk in xrange(self.replicas)]
for _junk in xrange(self.parts):
other_zones = array('H')
for replica in xrange(self.replicas):
index = len(available_devs) - 1
@ -365,7 +366,7 @@ class RingBuilder(object):
index = mid + 1
available_devs.insert(index, dev)
other_zones.append(dev['zone'])
self._last_part_moves = array('B', (0 for _ in xrange(self.parts)))
self._last_part_moves = array('B', (0 for _junk in xrange(self.parts)))
self._last_part_moves_epoch = int(time())
for dev in self.devs:
del dev['sort_key']

@ -382,7 +382,7 @@ class NamedFormatter(logging.Formatter):
return msg
def get_logger(conf, name=None, log_to_console=False):
def get_logger(conf, name=None, log_to_console=False, log_route=None):
"""
Get the current system logger using config settings.
@ -396,33 +396,41 @@ def get_logger(conf, name=None, log_to_console=False):
:param name: Name of the logger
:param log_to_console: Add handler which writes to console on stderr
"""
root_logger = logging.getLogger()
if hasattr(get_logger, 'handler') and get_logger.handler:
root_logger.removeHandler(get_logger.handler)
get_logger.handler.close()
get_logger.handler = None
if not conf:
conf = {}
if not hasattr(get_logger, 'root_logger_configured'):
get_logger.root_logger_configured = True
get_logger(conf, name, log_to_console, log_route='root')
if name is None:
name = conf.get('log_name', 'swift')
if not log_route:
log_route = name
if log_route == 'root':
logger = logging.getLogger()
else:
logger = logging.getLogger(log_route)
if not hasattr(get_logger, 'handlers'):
get_logger.handlers = {}
facility = getattr(SysLogHandler, conf.get('log_facility', 'LOG_LOCAL0'),
SysLogHandler.LOG_LOCAL0)
if facility in get_logger.handlers:
logger.removeHandler(get_logger.handlers[facility])
get_logger.handlers[facility].close()
del get_logger.handlers[facility]
if log_to_console:
# check if a previous call to get_logger already added a console logger
if hasattr(get_logger, 'console') and get_logger.console:
root_logger.removeHandler(get_logger.console)
logger.removeHandler(get_logger.console)
get_logger.console = logging.StreamHandler(sys.__stderr__)
root_logger.addHandler(get_logger.console)
if conf is None:
root_logger.setLevel(logging.INFO)
adapted_logger = LogAdapter(root_logger)
return adapted_logger
if name is None:
name = conf.get('log_name', 'swift')
get_logger.handler = SysLogHandler(address='/dev/log',
facility=getattr(SysLogHandler,
conf.get('log_facility', 'LOG_LOCAL0'),
SysLogHandler.LOG_LOCAL0))
root_logger.addHandler(get_logger.handler)
root_logger.setLevel(
logger.addHandler(get_logger.console)
get_logger.handlers[facility] = \
SysLogHandler(address='/dev/log', facility=facility)
logger.addHandler(get_logger.handlers[facility])
logger.setLevel(
getattr(logging, conf.get('log_level', 'INFO').upper(), logging.INFO))
adapted_logger = LogAdapter(root_logger)
adapted_logger = LogAdapter(logger)
formatter = NamedFormatter(name, adapted_logger)
get_logger.handler.setFormatter(formatter)
get_logger.handlers[facility].setFormatter(formatter)
if hasattr(get_logger, 'console'):
get_logger.console.setFormatter(formatter)
return adapted_logger
@ -820,7 +828,7 @@ def audit_location_generator(devices, datadir, mount_check=True, logger=None):
yield path, device, partition
def ratelimit_sleep(running_time, max_rate, incr_by=1):
def ratelimit_sleep(running_time, max_rate, incr_by=1, rate_buffer=5):
'''
Will eventlet.sleep() for the appropriate time so that the max_rate
is never exceeded. If max_rate is 0, will not ratelimit. The
@ -834,13 +842,17 @@ def ratelimit_sleep(running_time, max_rate, incr_by=1):
:param incr_by: How much to increment the counter. Useful if you want
to ratelimit 1024 bytes/sec and have differing sizes
of requests. Must be >= 0.
:param rate_buffer: Number of seconds the rate counter can drop and be
allowed to catch up (at a faster than listed rate).
A larger number will result in larger spikes in rate
but better average accuracy.
'''
if not max_rate or incr_by <= 0:
return running_time
clock_accuracy = 1000.0
now = time.time() * clock_accuracy
time_per_request = clock_accuracy * (float(incr_by) / max_rate)
if running_time < now:
if now - running_time > rate_buffer * clock_accuracy:
running_time = now
elif running_time - now > time_per_request:
eventlet.sleep((running_time - now) / clock_accuracy)

@ -19,6 +19,7 @@ import signal
import sys
import time
from random import random, shuffle
from tempfile import mkstemp
from eventlet import spawn, patcher, Timeout, TimeoutError
@ -51,6 +52,10 @@ class ContainerUpdater(Daemon):
self.no_changes = 0
self.successes = 0
self.failures = 0
self.account_suppressions = {}
self.account_suppression_time = \
float(conf.get('account_suppression_time', 60))
self.new_account_suppressions = None
def get_account_ring(self):
"""Get the account ring. Load it if it hasn't been yet."""
@ -80,6 +85,19 @@ class ContainerUpdater(Daemon):
shuffle(paths)
return paths
def _load_suppressions(self, filename):
try:
with open(filename, 'r') as tmpfile:
for line in tmpfile:
account, until = line.split()
until = float(until)
self.account_suppressions[account] = until
except Exception:
self.logger.exception(
_('ERROR with loading suppressions from %s: ') % filename)
finally:
os.unlink(filename)
def run_forever(self): # pragma: no cover
"""
Run the updator continuously.
@ -88,21 +106,33 @@ class ContainerUpdater(Daemon):
while True:
self.logger.info(_('Begin container update sweep'))
begin = time.time()
pids = []
now = time.time()
expired_suppressions = \
[a for a, u in self.account_suppressions.iteritems() if u < now]
for account in expired_suppressions:
del self.account_suppressions[account]
pid2filename = {}
# read from account ring to ensure it's fresh
self.get_account_ring().get_nodes('')
for path in self.get_paths():
while len(pids) >= self.concurrency:
pids.remove(os.wait()[0])
while len(pid2filename) >= self.concurrency:
pid = os.wait()[0]
try:
self._load_suppressions(pid2filename[pid])
finally:
del pid2filename[pid]
fd, tmpfilename = mkstemp()
os.close(fd)
pid = os.fork()
if pid:
pids.append(pid)
pid2filename[pid] = tmpfilename
else:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
patcher.monkey_patch(all=False, socket=True)
self.no_changes = 0
self.successes = 0
self.failures = 0
self.new_account_suppressions = open(tmpfilename, 'w')
forkbegin = time.time()
self.container_sweep(path)
elapsed = time.time() - forkbegin
@ -114,8 +144,12 @@ class ContainerUpdater(Daemon):
'success': self.successes, 'fail': self.failures,
'no_change': self.no_changes})
sys.exit()
while pids:
pids.remove(os.wait()[0])
while pid2filename:
pid = os.wait()[0]
try:
self._load_suppressions(pid2filename[pid])
finally:
del pid2filename[pid]
elapsed = time.time() - begin
self.logger.info(_('Container update sweep completed: %.02fs'),
elapsed)
@ -165,6 +199,8 @@ class ContainerUpdater(Daemon):
# definitely doesn't have up to date statistics.
if float(info['put_timestamp']) <= 0:
return
if self.account_suppressions.get(info['account'], 0) > time.time():
return
if info['put_timestamp'] > info['reported_put_timestamp'] or \
info['delete_timestamp'] > info['reported_delete_timestamp'] \
or info['object_count'] != info['reported_object_count'] or \
@ -195,6 +231,11 @@ class ContainerUpdater(Daemon):
self.logger.debug(
_('Update report failed for %(container)s %(dbfile)s'),
{'container': container, 'dbfile': dbfile})
self.account_suppressions[info['account']] = until = \
time.time() + self.account_suppression_time
if self.new_account_suppressions:
print >>self.new_account_suppressions, \
info['account'], until
else:
self.no_changes += 1

@ -38,6 +38,7 @@ class ObjectAuditor(Daemon):
self.max_files_per_second = float(conf.get('files_per_second', 20))
self.max_bytes_per_second = float(conf.get('bytes_per_second',
10000000))
self.log_time = int(conf.get('log_time', 3600))
self.files_running_time = 0
self.bytes_running_time = 0
self.bytes_processed = 0
@ -46,7 +47,6 @@ class ObjectAuditor(Daemon):
self.passes = 0
self.quarantines = 0
self.errors = 0
self.log_time = 3600 # once an hour
def run_forever(self):
"""Run the object audit until stopped."""

@ -407,7 +407,8 @@ class ObjectReplicator(Daemon):
conn.getresponse().read()
self.suffix_sync += len(suffixes)
except (Exception, Timeout):
self.logger.exception(_("Error syncing with node: %s") % node)
self.logger.exception(_("Error syncing with node: %s") %
node)
self.suffix_count += len(local_hash)
except (Exception, Timeout):
self.logger.exception(_("Error syncing partition"))

@ -51,6 +51,7 @@ ASYNCDIR = 'async_pending'
PICKLE_PROTOCOL = 2
METADATA_KEY = 'user.swift.metadata'
MAX_OBJECT_NAME_LENGTH = 1024
KEEP_CACHE_SIZE = (5 * 1024 * 1024)
def read_metadata(fd):
@ -113,6 +114,7 @@ class DiskFile(object):
self.meta_file = None
self.data_file = None
self.fp = None
self.keep_cache = False
if not os.path.exists(self.datadir):
return
files = sorted(os.listdir(self.datadir), reverse=True)
@ -150,12 +152,12 @@ class DiskFile(object):
if chunk:
read += len(chunk)
if read - dropped_cache > (1024 * 1024):
drop_buffer_cache(self.fp.fileno(), dropped_cache,
self.drop_cache(self.fp.fileno(), dropped_cache,
read - dropped_cache)
dropped_cache = read
yield chunk
else:
drop_buffer_cache(self.fp.fileno(), dropped_cache,
self.drop_cache(self.fp.fileno(), dropped_cache,
read - dropped_cache)
break
finally:
@ -226,7 +228,7 @@ class DiskFile(object):
timestamp = normalize_timestamp(metadata['X-Timestamp'])
write_metadata(fd, metadata)
if 'Content-Length' in metadata:
drop_buffer_cache(fd, 0, int(metadata['Content-Length']))
self.drop_cache(fd, 0, int(metadata['Content-Length']))
tpool.execute(os.fsync, fd)
invalidate_hash(os.path.dirname(self.datadir))
renamer(tmppath, os.path.join(self.datadir, timestamp + extension))
@ -248,6 +250,11 @@ class DiskFile(object):
if err.errno != errno.ENOENT:
raise
def drop_cache(self, fd, offset, length):
"""Method for no-oping buffer cache drop method."""
if not self.keep_cache:
drop_buffer_cache(fd, offset, length)
class ObjectController(object):
"""Implements the WSGI application for the Swift Object Server."""
@ -482,6 +489,10 @@ class ObjectController(object):
response.etag = file.metadata['ETag']
response.last_modified = float(file.metadata['X-Timestamp'])
response.content_length = int(file.metadata['Content-Length'])
if response.content_length < KEEP_CACHE_SIZE and \
'X-Auth-Token' not in request.headers and \
'X-Storage-Token' not in request.headers:
file.keep_cache = True
if 'Content-Encoding' in file.metadata:
response.content_encoding = file.metadata['Content-Encoding']
return request.get_response(response)
@ -566,7 +577,7 @@ class ObjectController(object):
if suffix:
recalculate_hashes(path, suffix.split('-'))
return Response()
_, hashes = get_hashes(path, do_listdir=False)
_junk, hashes = get_hashes(path, do_listdir=False)
return Response(body=pickle.dumps(hashes))
def __call__(self, env, start_response):

@ -29,6 +29,7 @@ from urllib import unquote, quote
import uuid
import functools
from hashlib import md5
from random import shuffle
from eventlet import sleep, TimeoutError
from eventlet.timeout import Timeout
@ -707,6 +708,7 @@ class ObjectController(Controller):
return aresp
partition, nodes = self.app.object_ring.get_nodes(
self.account_name, self.container_name, self.object_name)
shuffle(nodes)
resp = self.GETorHEAD_base(req, _('Object'), partition,
self.iter_nodes(partition, nodes, self.app.object_ring),
req.path_info, self.app.object_ring.replica_count)

@ -40,7 +40,7 @@ class AccessLogProcessor(object):
'''given a raw access log line, return a dict of the good parts'''
d = {}
try:
(_,
(unused,
server,
client_ip,
lb_ip,
@ -57,7 +57,8 @@ class AccessLogProcessor(object):
etag,
trans_id,
headers,
processing_time) = (unquote(x) for x in raw_log[16:].split(' '))
processing_time) = (unquote(x) for x in
raw_log[16:].split(' ')[:18])
except ValueError:
self.logger.debug(_('Bad line data: %s') % repr(raw_log))
return {}

@ -55,7 +55,8 @@ class AccountStat(Daemon):
self.logger.info(_("Gathering account stats"))
start = time.time()
self.find_and_process()
self.logger.info(_("Gathering account stats complete (%0.2f minutes)") %
self.logger.info(
_("Gathering account stats complete (%0.2f minutes)") %
((time.time() - start) / 60))
def find_and_process(self):
@ -70,8 +71,8 @@ class AccountStat(Daemon):
# Account Name, Container Count, Object Count, Bytes Used
for device in os.listdir(self.devices):
if self.mount_check and not check_mount(self.devices, device):
self.logger.error(_("Device %s is not mounted, skipping.") %
device)
self.logger.error(
_("Device %s is not mounted, skipping.") % device)
continue
accounts = os.path.join(self.devices,
device,
@ -87,11 +88,11 @@ class AccountStat(Daemon):
broker = AccountBroker(db_path)
if not broker.is_deleted():
(account_name,
_, _, _,
_junk, _junk, _junk,
container_count,
object_count,
bytes_used,
_, _) = broker.get_info()
_junk, _junk) = broker.get_info()
line_data = '"%s",%d,%d,%d\n' % (
account_name, container_count,
object_count, bytes_used)

@ -280,7 +280,8 @@ class LogProcessorDaemon(Daemon):
logs_to_process = self.log_processor.get_data_list(lookback_start,
lookback_end,
already_processed_files)
self.logger.info(_('loaded %d files to process') % len(logs_to_process))
self.logger.info(_('loaded %d files to process') %
len(logs_to_process))
if not logs_to_process:
self.logger.info(_("Log processing done (%0.2f minutes)") %
((time.time() - start) / 60))
@ -365,7 +366,7 @@ def multiprocess_collate(processor_args, logs_to_process, worker_count):
results = []
in_queue = multiprocessing.Queue()
out_queue = multiprocessing.Queue()
for _ in range(worker_count):
for _junk in range(worker_count):
p = multiprocessing.Process(target=collate_worker,
args=(processor_args,
in_queue,
@ -374,7 +375,7 @@ def multiprocess_collate(processor_args, logs_to_process, worker_count):
results.append(p)
for x in logs_to_process:
in_queue.put(x)
for _ in range(worker_count):
for _junk in range(worker_count):
in_queue.put(None)
count = 0
while True:

@ -26,7 +26,7 @@ class StatsLogProcessor(object):
data_object_name):
'''generate hourly groupings of data from one stats log file'''
account_totals = {}
year, month, day, hour, _ = data_object_name.split('/')
year, month, day, hour, _junk = data_object_name.split('/')
for line in obj_stream:
if not line:
continue

@ -37,6 +37,36 @@ def tmpfile(content):
finally:
os.unlink(file_name)
xattr_data = {}
def _get_inode(fd):
if not isinstance(fd, int):
try:
fd = fd.fileno()
except AttributeError:
return os.stat(fd).st_ino
return os.fstat(fd).st_ino
def _setxattr(fd, k, v):
inode = _get_inode(fd)
data = xattr_data.get(inode, {})
data[k] = v
xattr_data[inode] = data
def _getxattr(fd, k):
inode = _get_inode(fd)
data = xattr_data.get(inode, {}).get(k)
if not data:
raise IOError
return data
import xattr
xattr.setxattr = _setxattr
xattr.getxattr = _getxattr
class MockTrue(object):
"""

@ -119,7 +119,7 @@ class TestAuthServer(unittest.TestCase):
headers={'X-Storage-User': 'tester',
'X-Storage-Pass': 'testing'}))
token = res.headers['x-storage-token']
ttl, _, _, _ = self.controller.validate_token(token)
ttl, _junk, _junk, _junk = self.controller.validate_token(token)
self.assert_(ttl > 0, repr(ttl))
def test_validate_token_expired(self):
@ -134,7 +134,7 @@ class TestAuthServer(unittest.TestCase):
headers={'X-Storage-User': 'tester',
'X-Storage-Pass': 'testing'}))
token = res.headers['x-storage-token']
ttl, _, _, _ = self.controller.validate_token(token)
ttl, _junk, _junk, _junk = self.controller.validate_token(token)
self.assert_(ttl > 0, repr(ttl))
auth_server.time = lambda: 1 + self.controller.token_life
self.assertEquals(self.controller.validate_token(token), False)
@ -318,7 +318,7 @@ class TestAuthServer(unittest.TestCase):
headers={'X-Storage-User': 'tester',
'X-Storage-Pass': 'testing'}))
token = res.headers['x-storage-token']
ttl, _, _, _ = self.controller.validate_token(token)
ttl, _junk, _junk, _junk = self.controller.validate_token(token)
self.assert_(ttl > 0, repr(ttl))
def test_auth_SOSO_good_Mosso_headers(self):
@ -330,7 +330,7 @@ class TestAuthServer(unittest.TestCase):
headers={'X-Auth-User': 'test:tester',
'X-Auth-Key': 'testing'}))
token = res.headers['x-storage-token']
ttl, _, _, _ = self.controller.validate_token(token)
ttl, _junk, _junk, _junk = self.controller.validate_token(token)
self.assert_(ttl > 0, repr(ttl))
def test_auth_SOSO_bad_Mosso_headers(self):
@ -438,7 +438,7 @@ class TestAuthServer(unittest.TestCase):
headers={'X-Auth-User': 'test:tester',
'X-Auth-Key': 'testing'}))
token = res.headers['x-storage-token']
ttl, _, _, _ = self.controller.validate_token(token)
ttl, _junk, _junk, _junk = self.controller.validate_token(token)
self.assert_(ttl > 0, repr(ttl))
def test_auth_Mosso_good_SOSO_header_names(self):
@ -450,7 +450,7 @@ class TestAuthServer(unittest.TestCase):
headers={'X-Storage-User': 'test:tester',
'X-Storage-Pass': 'testing'}))
token = res.headers['x-storage-token']
ttl, _, _, _ = self.controller.validate_token(token)
ttl, _junk, _junk, _junk = self.controller.validate_token(token)
self.assert_(ttl > 0, repr(ttl))
def test_basic_logging(self):
@ -712,7 +712,7 @@ class TestAuthServer(unittest.TestCase):
res = self.controller.handle_auth(Request.blank('/v1.0',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Auth-User': 'act:usr', 'X-Auth-Key': 'pas'}))
_, _, _, stgact = \
_junk, _junk, _junk, stgact = \
self.controller.validate_token(res.headers['x-auth-token'])
self.assertEquals(stgact, '')
@ -723,7 +723,7 @@ class TestAuthServer(unittest.TestCase):
res = self.controller.handle_auth(Request.blank('/v1.0',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Auth-User': 'act:usr', 'X-Auth-Key': 'pas'}))
_, _, _, vstgact = \
_junk, _junk, _junk, vstgact = \
self.controller.validate_token(res.headers['x-auth-token'])
self.assertEquals(stgact, vstgact)
@ -734,7 +734,7 @@ class TestAuthServer(unittest.TestCase):
res = self.controller.handle_auth(Request.blank('/v1.0',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Auth-User': 'act:usr', 'X-Auth-Key': 'pas'}))
_, _, _, stgact = \
_junk, _junk, _junk, stgact = \
self.controller.validate_token(res.headers['x-auth-token'])
self.assertEquals(stgact, '.reseller_admin')

@ -95,7 +95,7 @@ class Logger(object):
self.error_value = (msg, args, kwargs)
def exception(self, msg, *args, **kwargs):
_, exc, _ = sys.exc_info()
_junk, exc, _junk = sys.exc_info()
self.exception_value = (msg,
'%s %s' % (exc.__class__.__name__, str(exc)), args, kwargs)

@ -47,49 +47,49 @@ class TestDomainRemap(unittest.TestCase):
def test_domain_remap_account(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'a.example.com'})
headers={'Host': 'AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, '/v1/a')
self.assertEquals(resp, '/v1/AUTH_a')
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'a-uuid.example.com'})
headers={'Host': 'AUTH-uuid.example.com'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, '/v1/a_uuid')
self.assertEquals(resp, '/v1/AUTH_uuid')
def test_domain_remap_account_container(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.a.example.com'})
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, '/v1/a/c')
self.assertEquals(resp, '/v1/AUTH_a/c')
def test_domain_remap_extra_subdomains(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'x.y.c.a.example.com'})
headers={'Host': 'x.y.c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, ['Bad domain in host header'])
def test_domain_remap_account_with_path_root(self):
req = Request.blank('/v1', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'a.example.com'})
headers={'Host': 'AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, '/v1/a')
self.assertEquals(resp, '/v1/AUTH_a')
def test_domain_remap_account_container_with_path_root(self):
req = Request.blank('/v1', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.a.example.com'})
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, '/v1/a/c')
self.assertEquals(resp, '/v1/AUTH_a/c')
def test_domain_remap_account_container_with_path(self):
req = Request.blank('/obj', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.a.example.com'})
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, '/v1/a/c/obj')
self.assertEquals(resp, '/v1/AUTH_a/c/obj')
def test_domain_remap_account_container_with_path_root_and_path(self):
req = Request.blank('/v1/obj', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.a.example.com'})
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, '/v1/a/c/obj')
self.assertEquals(resp, '/v1/AUTH_a/c/obj')
def test_domain_remap_account_matching_ending_not_domain(self):
req = Request.blank('/dontchange', environ={'REQUEST_METHOD': 'GET'},
@ -101,7 +101,23 @@ class TestDomainRemap(unittest.TestCase):
self.app = domain_remap.DomainRemapMiddleware(FakeApp(),
{'storage_domain': ''})
req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.a.example.com'})
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, '/test')
def test_domain_remap_configured_with_prefixes(self):
conf = {'reseller_prefixes': 'PREFIX'}
self.app = domain_remap.DomainRemapMiddleware(FakeApp(), conf)
req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.prefix_uuid.example.com'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, '/v1/PREFIX_uuid/c/test')
def test_domain_remap_configured_with_bad_prefixes(self):
conf = {'reseller_prefixes': 'UNKNOWN'}
self.app = domain_remap.DomainRemapMiddleware(FakeApp(), conf)
req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.prefix_uuid.example.com'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, '/test')

@ -95,13 +95,13 @@ class FakeApp(object):
class FakeLogger(object):
# a thread safe logger
def error(self, msg):
def error(self, *args, **kwargs):
pass
def info(self, msg):
def info(self, *args, **kwargs):
pass
def warning(self, msg):
def warning(self, *args, **kwargs):
pass
@ -224,6 +224,7 @@ class TestRateLimit(unittest.TestCase):
'account_whitelist': 'a',
'account_blacklist': 'b'}
self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp())
self.test_ratelimit.BLACK_LIST_SLEEP = 0
ratelimit.http_connect = mock_http_connect(204)
req = Request.blank('/v/b/c')
req.environ['swift.cache'] = FakeMemcache()
@ -260,6 +261,7 @@ class TestRateLimit(unittest.TestCase):
# making clock less accurate for nosetests running slow
self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp())
ratelimit.http_connect = mock_http_connect(204)
self.test_ratelimit.log_sleep_time_seconds = .00001
req = Request.blank('/v/a')
req.environ['swift.cache'] = FakeMemcache()
begin = time.time()
@ -402,7 +404,5 @@ class TestRateLimit(unittest.TestCase):
self._run(make_app_call, num_calls, current_rate)
if __name__ == '__main__':
unittest.main()

@ -2561,6 +2561,7 @@ class TestAuth(unittest.TestCase):
def test_put_user_regular_success(self):
self.test_auth.app = FakeApp(iter([
('200 Ok', {'X-Container-Meta-Account-Id': 'AUTH_cfa'}, ''),
# PUT of user object
('201 Created', {}, '')]))
resp = Request.blank('/auth/v2/act/usr',
@ -2570,13 +2571,14 @@ class TestAuth(unittest.TestCase):
'X-Auth-User-Key': 'key'}
).get_response(self.test_auth)
self.assertEquals(resp.status_int, 201)
self.assertEquals(self.test_auth.app.calls, 1)
self.assertEquals(self.test_auth.app.calls, 2)
self.assertEquals(json.loads(self.test_auth.app.request.body),
{"groups": [{"name": "act:usr"}, {"name": "act"}],
"auth": "plaintext:key"})
def test_put_user_account_admin_success(self):
self.test_auth.app = FakeApp(iter([
('200 Ok', {'X-Container-Meta-Account-Id': 'AUTH_cfa'}, ''),
# PUT of user object
('201 Created', {}, '')]))
resp = Request.blank('/auth/v2/act/usr',
@ -2587,7 +2589,7 @@ class TestAuth(unittest.TestCase):
'X-Auth-User-Admin': 'true'}
).get_response(self.test_auth)
self.assertEquals(resp.status_int, 201)
self.assertEquals(self.test_auth.app.calls, 1)
self.assertEquals(self.test_auth.app.calls, 2)
self.assertEquals(json.loads(self.test_auth.app.request.body),
{"groups": [{"name": "act:usr"}, {"name": "act"},
{"name": ".admin"}],
@ -2595,6 +2597,7 @@ class TestAuth(unittest.TestCase):
def test_put_user_reseller_admin_success(self):
self.test_auth.app = FakeApp(iter([
('200 Ok', {'X-Container-Meta-Account-Id': 'AUTH_cfa'}, ''),
# PUT of user object
('201 Created', {}, '')]))
resp = Request.blank('/auth/v2/act/usr',
@ -2605,7 +2608,7 @@ class TestAuth(unittest.TestCase):
'X-Auth-User-Reseller-Admin': 'true'}
).get_response(self.test_auth)
self.assertEquals(resp.status_int, 201)
self.assertEquals(self.test_auth.app.calls, 1)
self.assertEquals(self.test_auth.app.calls, 2)
self.assertEquals(json.loads(self.test_auth.app.request.body),
{"groups": [{"name": "act:usr"}, {"name": "act"},
{"name": ".admin"}, {"name": ".reseller_admin"}],
@ -2613,6 +2616,7 @@ class TestAuth(unittest.TestCase):
def test_put_user_fail_not_found(self):
self.test_auth.app = FakeApp(iter([
('200 Ok', {'X-Container-Meta-Account-Id': 'AUTH_cfa'}, ''),
# PUT of user object
('404 Not Found', {}, '')]))
resp = Request.blank('/auth/v2/act/usr',
@ -2622,7 +2626,7 @@ class TestAuth(unittest.TestCase):
'X-Auth-User-Key': 'key'}
).get_response(self.test_auth)
self.assertEquals(resp.status_int, 404)
self.assertEquals(self.test_auth.app.calls, 1)
self.assertEquals(self.test_auth.app.calls, 2)
def test_put_user_fail(self):
self.test_auth.app = FakeApp(iter([

@ -14,7 +14,10 @@
# limitations under the License.
# TODO: More tests
import socket
import unittest
from StringIO import StringIO
from urlparse import urlparse
# TODO: mock http connection class with more control over headers
from test.unit.proxy.test_server import fake_http_connect
@ -32,10 +35,10 @@ class TestHttpHelpers(unittest.TestCase):
def test_http_connection(self):
url = 'http://www.test.com'
_, conn = c.http_connection(url)
_junk, conn = c.http_connection(url)
self.assertTrue(isinstance(conn, c.HTTPConnection))
url = 'https://www.test.com'
_, conn = c.http_connection(url)
_junk, conn = c.http_connection(url)
self.assertTrue(isinstance(conn, c.HTTPSConnection))
url = 'ftp://www.test.com'
self.assertRaises(c.ClientException, c.http_connection, url)
@ -377,5 +380,97 @@ class TestConnection(MockHttpTest):
self.assertEquals(conn.url, 'http://www.new.com')
self.assertEquals(conn.token, 'new')
def test_reset_stream(self):
class LocalContents(object):
def __init__(self, tell_value=0):
self.already_read = False
self.seeks = []
self.tell_value = tell_value
def tell(self):
return self.tell_value
def seek(self, position):
self.seeks.append(position)
self.already_read = False
def read(self, size=-1):
if self.already_read:
return ''
else:
self.already_read = True
return 'abcdef'
class LocalConnection(object):
def putrequest(self, *args, **kwargs):
return
def putheader(self, *args, **kwargs):
return
def endheaders(self, *args, **kwargs):
return
def send(self, *args, **kwargs):
raise socket.error('oops')
def request(self, *args, **kwargs):
return
def getresponse(self, *args, **kwargs):
self.status = 200
return self
def getheader(self, *args, **kwargs):
return ''
def read(self, *args, **kwargs):
return ''
def local_http_connection(url):
parsed = urlparse(url)
return parsed, LocalConnection()
orig_conn = c.http_connection
try:
c.http_connection = local_http_connection
conn = c.Connection('http://www.example.com', 'asdf', 'asdf',
retries=1, starting_backoff=.0001)
contents = LocalContents()
exc = None
try:
conn.put_object('c', 'o', contents)
except socket.error, err:
exc = err
self.assertEquals(contents.seeks, [0])
self.assertEquals(str(exc), 'oops')
contents = LocalContents(tell_value=123)
exc = None
try:
conn.put_object('c', 'o', contents)
except socket.error, err:
exc = err
self.assertEquals(contents.seeks, [123])
self.assertEquals(str(exc), 'oops')
contents = LocalContents()
contents.tell = None
exc = None
try:
conn.put_object('c', 'o', contents)
except c.ClientException, err:
exc = err
self.assertEquals(contents.seeks, [])
self.assertEquals(str(exc), "put_object('c', 'o', ...) failure "
"and no ability to reset contents for reupload.")
finally:
c.http_connection = orig_conn
if __name__ == '__main__':
unittest.main()

@ -456,15 +456,6 @@ log_name = yarr'''
# make sure its accurate to 10th of a second
self.assertTrue(abs(25 - (time.time() - start) * 100) < 10)
def test_ratelimit_sleep_with_sleep(self):
running_time = 0
start = time.time()
for i in range(25):
running_time = utils.ratelimit_sleep(running_time, 50)
time.sleep(1.0 / 75)
# make sure its accurate to 10th of a second
self.assertTrue(abs(50 - (time.time() - start) * 100) < 10)
def test_ratelimit_sleep_with_incr(self):
running_time = 0
start = time.time()
@ -477,6 +468,17 @@ log_name = yarr'''
total += i
self.assertTrue(abs(50 - (time.time() - start) * 100) < 10)
def test_ratelimit_sleep_with_sleep(self):
running_time = 0
start = time.time()
sleeps = [0] * 7 + [.2] * 3 + [0] * 30
for i in sleeps:
running_time = utils.ratelimit_sleep(running_time, 40,
rate_buffer=1)
time.sleep(i)
# make sure its accurate to 10th of a second
self.assertTrue(abs(100 - (time.time() - start) * 100) < 10)
if __name__ == '__main__':
unittest.main()

@ -19,6 +19,7 @@ import unittest
from shutil import rmtree
from StringIO import StringIO
from time import time
from tempfile import mkdtemp
from eventlet import spawn, TimeoutError, listen
from eventlet.timeout import Timeout
@ -33,17 +34,8 @@ class TestContainerController(unittest.TestCase):
""" Test swift.container_server.ContainerController """
def setUp(self):
""" Set up for testing swift.object_server.ObjectController """
self.path_to_test_xfs = os.environ.get('PATH_TO_TEST_XFS')
if not self.path_to_test_xfs or \
not os.path.exists(self.path_to_test_xfs):
print >>sys.stderr, 'WARNING: PATH_TO_TEST_XFS not set or not ' \
'pointing to a valid directory.\n' \
'Please set PATH_TO_TEST_XFS to a directory on an XFS file ' \
'system for testing.'
self.testdir = '/tmp/SWIFTUNITTEST'
else:
self.testdir = os.path.join(self.path_to_test_xfs,
'tmp_test_object_server_ObjectController')
self.testdir = os.path.join(mkdtemp(),
'tmp_test_object_server_ObjectController')
mkdirs(self.testdir)
rmtree(self.testdir)
mkdirs(os.path.join(self.testdir, 'sda1'))
@ -53,7 +45,7 @@ class TestContainerController(unittest.TestCase):
def tearDown(self):
""" Tear down for testing swift.object_server.ObjectController """
rmtree(self.testdir, ignore_errors=1)
rmtree(os.path.dirname(self.testdir), ignore_errors=1)
def test_acl_container(self):
# Ensure no acl by default

@ -19,6 +19,7 @@ import sys
import unittest
from gzip import GzipFile
from shutil import rmtree
from tempfile import mkdtemp
from eventlet import spawn, TimeoutError, listen
from eventlet.timeout import Timeout
@ -35,17 +36,7 @@ class TestContainerUpdater(unittest.TestCase):
def setUp(self):
utils.HASH_PATH_SUFFIX = 'endcap'
self.path_to_test_xfs = os.environ.get('PATH_TO_TEST_XFS')
if not self.path_to_test_xfs or \
not os.path.exists(self.path_to_test_xfs):
print >>sys.stderr, 'WARNING: PATH_TO_TEST_XFS not set or not ' \
'pointing to a valid directory.\n' \
'Please set PATH_TO_TEST_XFS to a directory on an XFS file ' \
'system for testing.'
self.testdir = '/tmp/SWIFTUNITTEST'
else:
self.testdir = os.path.join(self.path_to_test_xfs,
'tmp_test_container_updater')
self.testdir = os.path.join(mkdtemp(), 'tmp_test_container_updater')
rmtree(self.testdir, ignore_errors=1)
os.mkdir(self.testdir)
pickle.dump(RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
@ -60,7 +51,7 @@ class TestContainerUpdater(unittest.TestCase):
os.mkdir(self.sda1)
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
rmtree(os.path.dirname(self.testdir), ignore_errors=1)
def test_creation(self):
cu = container_updater.ContainerUpdater({
@ -87,6 +78,7 @@ class TestContainerUpdater(unittest.TestCase):
'interval': '1',
'concurrency': '1',
'node_timeout': '15',
'account_suppression_time': 0
})
cu.run_once()
containers_dir = os.path.join(self.sda1, container_server.DATADIR)
@ -142,7 +134,7 @@ class TestContainerUpdater(unittest.TestCase):
bindsock = listen(('127.0.0.1', 0))
def spawn_accepts():
events = []
for _ in xrange(2):
for _junk in xrange(2):
sock, addr = bindsock.accept()
events.append(spawn(accept, sock, addr, 201))
return events
@ -195,7 +187,7 @@ class TestContainerUpdater(unittest.TestCase):
bindsock = listen(('127.0.0.1', 0))
def spawn_accepts():
events = []
for _ in xrange(2):
for _junk in xrange(2):
with Timeout(3):
sock, addr = bindsock.accept()
events.append(spawn(accept, sock, addr))

@ -14,13 +14,16 @@
# limitations under the License.
# TODO: Tests
from test import unit as _setup_mocks
import unittest
import tempfile
import os
import time
from shutil import rmtree
from hashlib import md5
from tempfile import mkdtemp
from swift.obj import auditor
from swift.obj import server as object_server
from swift.obj.server import DiskFile, write_metadata
from swift.common.utils import hash_path, mkdirs, normalize_timestamp, renamer
from swift.obj.replicator import invalidate_hash
@ -30,18 +33,8 @@ from swift.common.exceptions import AuditException
class TestAuditor(unittest.TestCase):
def setUp(self):
self.path_to_test_xfs = os.environ.get('PATH_TO_TEST_XFS')
if not self.path_to_test_xfs or \
not os.path.exists(self.path_to_test_xfs):
print >> sys.stderr, 'WARNING: PATH_TO_TEST_XFS not set or not ' \
'pointing to a valid directory.\n' \
'Please set PATH_TO_TEST_XFS to a directory on an XFS file ' \
'system for testing.'
self.testdir = '/tmp/SWIFTUNITTEST'
else:
self.testdir = os.path.join(self.path_to_test_xfs,
'tmp_test_object_auditor')
self.testdir = \
os.path.join(mkdtemp(), 'tmp_test_object_auditor')
self.devices = os.path.join(self.testdir, 'node')
rmtree(self.testdir, ignore_errors=1)
os.mkdir(self.testdir)
@ -63,7 +56,7 @@ class TestAuditor(unittest.TestCase):
mount_check='false')
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
rmtree(os.path.dirname(self.testdir), ignore_errors=1)
def test_object_audit_extra_data(self):
self.auditor = auditor.ObjectAuditor(self.conf)
@ -130,25 +123,21 @@ class TestAuditor(unittest.TestCase):
self.assertEquals(self.auditor.quarantines, pre_quarantines + 1)
def test_object_audit_no_meta(self):
self.auditor = auditor.ObjectAuditor(self.conf)
cur_part = '0'
disk_file = DiskFile(self.devices, 'sda', cur_part, 'a', 'c', 'o')
data = '0' * 1024
etag = md5()
timestamp = str(normalize_timestamp(time.time()))
path = os.path.join(disk_file.datadir, timestamp + '.data')
mkdirs(disk_file.datadir)
fp = open(path, 'w')
fp.write('0' * 1024)
fp.close()
invalidate_hash(os.path.dirname(disk_file.datadir))
self.auditor = auditor.ObjectAuditor(self.conf)
pre_quarantines = self.auditor.quarantines
with disk_file.mkstemp() as (fd, tmppath):
os.write(fd, data)
etag.update(data)
etag = etag.hexdigest()
timestamp = str(normalize_timestamp(time.time()))
os.fsync(fd)
invalidate_hash(os.path.dirname(disk_file.datadir))
renamer(tmppath, os.path.join(disk_file.datadir,
timestamp + '.data'))
self.auditor.object_audit(
os.path.join(disk_file.datadir, timestamp + '.data'),
'sda', cur_part)
self.assertEquals(self.auditor.quarantines, pre_quarantines + 1)
self.auditor.object_audit(
os.path.join(disk_file.datadir, timestamp + '.data'),
'sda', cur_part)
self.assertEquals(self.auditor.quarantines, pre_quarantines + 1)
def test_object_audit_bad_args(self):
self.auditor = auditor.ObjectAuditor(self.conf)

@ -23,10 +23,12 @@ from nose import SkipTest
from shutil import rmtree
from StringIO import StringIO
from time import gmtime, sleep, strftime, time
from tempfile import mkdtemp
from eventlet import sleep, spawn, wsgi, listen
from webob import Request
from xattr import getxattr, setxattr
from test.unit import _getxattr as getxattr
from test.unit import _setxattr as setxattr
from test.unit import connect_tcp, readuntil2crlfs
from swift.obj import server as object_server
@ -39,17 +41,8 @@ class TestObjectController(unittest.TestCase):
def setUp(self):
""" Set up for testing swift.object_server.ObjectController """
self.path_to_test_xfs = os.environ.get('PATH_TO_TEST_XFS')
if not self.path_to_test_xfs or \
not os.path.exists(self.path_to_test_xfs):
print >> sys.stderr, 'WARNING: PATH_TO_TEST_XFS not set or not ' \
'pointing to a valid directory.\n' \
'Please set PATH_TO_TEST_XFS to a directory on an XFS file ' \
'system for testing.'
self.testdir = '/tmp/SWIFTUNITTEST'
else:
self.testdir = os.path.join(self.path_to_test_xfs,
'tmp_test_object_server_ObjectController')
self.testdir = \
os.path.join(mkdtemp(), 'tmp_test_object_server_ObjectController')
mkdirs(self.testdir)
rmtree(self.testdir)
mkdirs(os.path.join(self.testdir, 'sda1'))
@ -60,12 +53,10 @@ class TestObjectController(unittest.TestCase):
def tearDown(self):
""" Tear down for testing swift.object_server.ObjectController """
rmtree(self.testdir)
rmtree(os.path.dirname(self.testdir))
def test_POST_update_meta(self):
""" Test swift.object_server.ObjectController.POST """
if not self.path_to_test_xfs:
raise SkipTest
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
@ -93,8 +84,6 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.headers['Content-Type'], 'application/x-test')
def test_POST_not_exist(self):
if not self.path_to_test_xfs:
raise SkipTest
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/fail',
environ={'REQUEST_METHOD': 'POST'},
@ -116,8 +105,6 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 400)
def test_POST_container_connection(self):
if not self.path_to_test_xfs:
raise SkipTest
def mock_http_connect(response, with_exc=False):
@ -222,8 +209,6 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 411)
def test_PUT_common(self):
if not self.path_to_test_xfs:
raise SkipTest
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
@ -247,8 +232,6 @@ class TestObjectController(unittest.TestCase):
'name': '/a/c/o'})
def test_PUT_overwrite(self):
if not self.path_to_test_xfs:
raise SkipTest
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Length': '6',
@ -281,8 +264,6 @@ class TestObjectController(unittest.TestCase):
'Content-Encoding': 'gzip'})
def test_PUT_no_etag(self):
if not self.path_to_test_xfs:
raise SkipTest
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Type': 'text/plain'})
@ -300,8 +281,6 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 422)
def test_PUT_user_metadata(self):
if not self.path_to_test_xfs:
raise SkipTest
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
@ -329,8 +308,6 @@ class TestObjectController(unittest.TestCase):
'X-Object-Meta-Two': 'Two'})
def test_PUT_container_connection(self):
if not self.path_to_test_xfs:
raise SkipTest
def mock_http_connect(response, with_exc=False):
@ -399,8 +376,6 @@ class TestObjectController(unittest.TestCase):
def test_HEAD(self):
""" Test swift.object_server.ObjectController.HEAD """
if not self.path_to_test_xfs:
raise SkipTest
req = Request.blank('/sda1/p/a/c')
resp = self.object_controller.HEAD(req)
self.assertEquals(resp.status_int, 400)
@ -466,8 +441,6 @@ class TestObjectController(unittest.TestCase):
def test_GET(self):
""" Test swift.object_server.ObjectController.GET """
if not self.path_to_test_xfs:
raise SkipTest
req = Request.blank('/sda1/p/a/c')
resp = self.object_controller.GET(req)
self.assertEquals(resp.status_int, 400)
@ -555,8 +528,6 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 404)
def test_GET_if_match(self):
if not self.path_to_test_xfs:
raise SkipTest
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': normalize_timestamp(time()),
@ -610,8 +581,6 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 412)
def test_GET_if_none_match(self):
if not self.path_to_test_xfs:
raise SkipTest
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': normalize_timestamp(time()),
@ -661,8 +630,6 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.etag, etag)
def test_GET_if_modified_since(self):
if not self.path_to_test_xfs:
raise SkipTest
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={
@ -698,8 +665,6 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 304)
def test_GET_if_unmodified_since(self):
if not self.path_to_test_xfs:
raise SkipTest
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={
@ -737,8 +702,6 @@ class TestObjectController(unittest.TestCase):
def test_DELETE(self):
""" Test swift.object_server.ObjectController.DELETE """
if not self.path_to_test_xfs:
raise SkipTest
req = Request.blank('/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'})
resp = self.object_controller.DELETE(req)
@ -865,8 +828,6 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(outbuf.getvalue()[:4], '405 ')
def test_chunked_put(self):
if not self.path_to_test_xfs:
raise SkipTest
listener = listen(('localhost', 0))
port = listener.getsockname()[1]
killer = spawn(wsgi.server, listener, self.object_controller,
@ -891,8 +852,6 @@ class TestObjectController(unittest.TestCase):
killer.kill()
def test_max_object_name_length(self):
if not self.path_to_test_xfs:
raise SkipTest
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/' + ('1' * 1024),
environ={'REQUEST_METHOD': 'PUT'},
@ -912,8 +871,6 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 400)
def test_disk_file_app_iter_corners(self):
if not self.path_to_test_xfs:
raise SkipTest
df = object_server.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o')
mkdirs(df.datadir)
f = open(os.path.join(df.datadir,
@ -946,8 +903,6 @@ class TestObjectController(unittest.TestCase):
self.assert_(os.path.exists(tmpdir))
def test_max_upload_time(self):
if not self.path_to_test_xfs:
raise SkipTest
class SlowBody():
@ -996,8 +951,6 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 499)
def test_bad_sinces(self):
if not self.path_to_test_xfs:
raise SkipTest
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Length': '4', 'Content-Type': 'text/plain'},
@ -1022,8 +975,6 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 412)
def test_content_encoding(self):
if not self.path_to_test_xfs:
raise SkipTest
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Length': '4', 'Content-Type': 'text/plain',
@ -1042,8 +993,6 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.headers['content-encoding'], 'gzip')
def test_manifest_header(self):
if not self.path_to_test_xfs:
raise SkipTest
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,

File diff suppressed because it is too large Load Diff

@ -21,8 +21,49 @@ from swift.stats import access_processor
class TestAccessProcessor(unittest.TestCase):
def test_placeholder(self):
pass
def test_log_line_parser_field_count(self):
p = access_processor.AccessLogProcessor({})
# too few fields
log_line = [str(x) for x in range(17)]
log_line[1] = 'proxy-server'
log_line[4] = '1/Jan/3/4/5/6'
log_line[6] = '/v1/a/c/o'
log_line = 'x'*16 + ' '.join(log_line)
res = p.log_line_parser(log_line)
expected = {}
self.assertEquals(res, expected)
# right amount of fields
log_line = [str(x) for x in range(18)]
log_line[1] = 'proxy-server'
log_line[4] = '1/Jan/3/4/5/6'
log_line[6] = '/v1/a/c/o'
log_line = 'x'*16 + ' '.join(log_line)
res = p.log_line_parser(log_line)
expected = {'code': 8, 'processing_time': '17', 'auth_token': '11',
'month': '01', 'second': '6', 'year': '3', 'tz': '+0000',
'http_version': '7', 'object_name': 'o', 'etag': '14',
'method': '5', 'trans_id': '15', 'client_ip': '2',
'bytes_out': 13, 'container_name': 'c', 'day': '1',
'minute': '5', 'account': 'a', 'hour': '4',
'referrer': '9', 'request': '/v1/a/c/o',
'user_agent': '10', 'bytes_in': 12, 'lb_ip': '3'}
self.assertEquals(res, expected)
# too many fields
log_line = [str(x) for x in range(19)]
log_line[1] = 'proxy-server'
log_line[4] = '1/Jan/3/4/5/6'
log_line[6] = '/v1/a/c/o'
log_line = 'x'*16 + ' '.join(log_line)
res = p.log_line_parser(log_line)
expected = {'code': 8, 'processing_time': '17', 'auth_token': '11',
'month': '01', 'second': '6', 'year': '3', 'tz': '+0000',
'http_version': '7', 'object_name': 'o', 'etag': '14',
'method': '5', 'trans_id': '15', 'client_ip': '2',
'bytes_out': 13, 'container_name': 'c', 'day': '1',
'minute': '5', 'account': 'a', 'hour': '4',
'referrer': '9', 'request': '/v1/a/c/o',
'user_agent': '10', 'bytes_in': 12, 'lb_ip': '3'}
self.assertEquals(res, expected)
if __name__ == '__main__':