diff --git a/AUTHORS b/AUTHORS index e7bc59b72e..bf834db788 100644 --- a/AUTHORS +++ b/AUTHORS @@ -24,9 +24,13 @@ Paul Jimenez Brian K. Jones Ed Leafe Stephen Milton +Russ Nelson +Colin Nicholson Andrew Clay Shafer Monty Taylor Caleb Tennis +FUJITA Tomonori Kapil Thangavelu Conrad Weidenkeller +Chris Wedgwood Cory Wright diff --git a/babel.cfg b/babel.cfg new file mode 100644 index 0000000000..15cd6cb76b --- /dev/null +++ b/babel.cfg @@ -0,0 +1,2 @@ +[python: **.py] + diff --git a/bin/st b/bin/st index cab398910e..58285423bd 100755 --- a/bin/st +++ b/bin/st @@ -38,13 +38,13 @@ from urlparse import urlparse, urlunparse try: from eventlet import sleep -except: +except Exception: from time import sleep try: from swift.common.bufferedhttp \ import BufferedHTTPConnection as HTTPConnection -except: +except Exception: from httplib import HTTPConnection @@ -80,7 +80,7 @@ except ImportError: res = [] consts = {'true': True, 'false': False, 'null': None} string = '(' + comments.sub('', string) + ')' - for type, val, _, _, _ in \ + for type, val, _junk, _junk, _junk in \ generate_tokens(StringIO(string).readline): if (type == OP and val not in '[]{}:,()-') or \ (type == NAME and val not in consts): @@ -91,7 +91,7 @@ except ImportError: else: res.append(val) return eval(''.join(res), {}, consts) - except: + except Exception: raise AttributeError() @@ -914,7 +914,7 @@ def st_delete(parser, args, print_queue, error_queue): segment_queue.put((scontainer, delobj['name'])) if not segment_queue.empty(): segment_threads = [QueueFunctionThread(segment_queue, - _delete_segment, create_connection()) for _ in + _delete_segment, create_connection()) for _junk in xrange(10)] for thread in segment_threads: thread.start() @@ -972,11 +972,11 @@ def st_delete(parser, args, print_queue, error_queue): create_connection = lambda: Connection(options.auth, options.user, options.key, preauthurl=url, preauthtoken=token, snet=options.snet) object_threads = [QueueFunctionThread(object_queue, _delete_object, - create_connection()) for _ in xrange(10)] + create_connection()) for _junk in xrange(10)] for thread in object_threads: thread.start() container_threads = [QueueFunctionThread(container_queue, - _delete_container, create_connection()) for _ in xrange(10)] + _delete_container, create_connection()) for _junk in xrange(10)] for thread in container_threads: thread.start() if not args: @@ -1142,11 +1142,11 @@ def st_download(options, args, print_queue, error_queue): create_connection = lambda: Connection(options.auth, options.user, options.key, preauthurl=url, preauthtoken=token, snet=options.snet) object_threads = [QueueFunctionThread(object_queue, _download_object, - create_connection()) for _ in xrange(10)] + create_connection()) for _junk in xrange(10)] for thread in object_threads: thread.start() container_threads = [QueueFunctionThread(container_queue, - _download_container, create_connection()) for _ in xrange(10)] + _download_container, create_connection()) for _junk in xrange(10)] for thread in container_threads: thread.start() if not args: @@ -1525,7 +1525,8 @@ def st_upload(options, args, print_queue, error_queue): full_size = getsize(path) segment_queue = Queue(10000) segment_threads = [QueueFunctionThread(segment_queue, - _segment_job, create_connection()) for _ in xrange(10)] + _segment_job, create_connection()) for _junk in + xrange(10)] for thread in segment_threads: thread.start() segment = 0 @@ -1569,7 +1570,7 @@ def st_upload(options, args, print_queue, error_queue): 'container': scontainer, 'obj': delobj['name']}) if not segment_queue.empty(): segment_threads = [QueueFunctionThread(segment_queue, - _segment_job, create_connection()) for _ in + _segment_job, create_connection()) for _junk in xrange(10)] for thread in segment_threads: thread.start() @@ -1603,7 +1604,7 @@ def st_upload(options, args, print_queue, error_queue): create_connection = lambda: Connection(options.auth, options.user, options.key, preauthurl=url, preauthtoken=token, snet=options.snet) object_threads = [QueueFunctionThread(object_queue, _object_job, - create_connection()) for _ in xrange(10)] + create_connection()) for _junk in xrange(10)] for thread in object_threads: thread.start() conn = create_connection() @@ -1615,7 +1616,7 @@ def st_upload(options, args, print_queue, error_queue): conn.put_container(args[0]) if options.segment_size is not None: conn.put_container(args[0] + '_segments') - except: + except Exception: pass try: for arg in args[1:]: @@ -1722,7 +1723,7 @@ Example: error_thread.abort = True while error_thread.isAlive(): error_thread.join(0.01) - except: + except Exception: for thread in threading_enumerate(): thread.abort = True raise diff --git a/bin/swift-account-audit b/bin/swift-account-audit index 9fd1b13e80..9a19198853 100755 --- a/bin/swift-account-audit +++ b/bin/swift-account-audit @@ -73,7 +73,7 @@ class Auditor(object): def audit_object(self, account, container, name): path = '/%s/%s/%s' % (account, container, name) - part, nodes = self.object_ring.get_nodes(account, container, name) + part, nodes = self.object_ring.get_nodes(account, container.encode('utf-8'), name.encode('utf-8')) container_listing = self.audit_container(account, container) consistent = True if name not in container_listing: @@ -109,7 +109,7 @@ class Auditor(object): etags.append(resp.getheader('ETag')) else: conn = http_connect(node['ip'], node['port'], - node['device'], part, 'HEAD', path, {}) + node['device'], part, 'HEAD', path.encode('utf-8'), {}) resp = conn.getresponse() if resp.status // 100 != 2: self.object_not_found += 1 @@ -144,14 +144,14 @@ class Auditor(object): if (account, name) in self.list_cache: return self.list_cache[(account, name)] self.in_progress[(account, name)] = Event() - print 'Auditing container "%s"...' % name + print 'Auditing container "%s"' % name path = '/%s/%s' % (account, name) account_listing = self.audit_account(account) consistent = True if name not in account_listing: consistent = False print " Container %s not in account listing!" % path - part, nodes = self.container_ring.get_nodes(account, name) + part, nodes = self.container_ring.get_nodes(account, name.encode('utf-8')) rec_d = {} responses = {} for node in nodes: @@ -161,8 +161,8 @@ class Auditor(object): node_id = node['id'] try: conn = http_connect(node['ip'], node['port'], node['device'], - part, 'GET', path, {}, - 'format=json&marker=%s' % quote(marker)) + part, 'GET', path.encode('utf-8'), {}, + 'format=json&marker=%s' % quote(marker.encode('utf-8'))) resp = conn.getresponse() if resp.status // 100 != 2: self.container_not_found += 1 @@ -189,7 +189,7 @@ class Auditor(object): self.container_obj_mismatch += 1 consistent = False print " Different versions of %s/%s in container dbs." % \ - name, obj['name'] + (name, obj['name']) if obj['last_modified'] > rec_d[obj_name]['last_modified']: rec_d[obj_name] = obj obj_counts = [int(header['x-container-object-count']) @@ -220,7 +220,7 @@ class Auditor(object): if account in self.list_cache: return self.list_cache[account] self.in_progress[account] = Event() - print "Auditing account %s..." % account + print 'Auditing account "%s"' % account consistent = True path = '/%s' % account part, nodes = self.account_ring.get_nodes(account) @@ -233,19 +233,18 @@ class Auditor(object): try: conn = http_connect(node['ip'], node['port'], node['device'], part, 'GET', path, {}, - 'format=json&marker=%s' % quote(marker)) + 'format=json&marker=%s' % quote(marker.encode('utf-8'))) resp = conn.getresponse() if resp.status // 100 != 2: self.account_not_found += 1 consistent = False - print " Bad status GETting account %(ip)s:%(device)s" \ - % node + print " Bad status GETting account '%s' from %ss:%ss" % (account, node['ip'], node['device']) break results = simplejson.loads(resp.read()) except Exception: self.account_exceptions += 1 consistent = False - print " Exception GETting account %(ip)s:%(device)s" % node + print " Exception GETting account '%s' on %ss:%ss" % (account, node['ip'], node['device']) break if node_id not in responses: responses[node_id] = [dict(resp.getheaders()), []] @@ -258,15 +257,17 @@ class Auditor(object): if len(set(cont_counts)) != 1: self.account_container_mismatch += 1 consistent = False - print " Account databases don't agree on number of containers." - print " Max: %s, Min: %s" % (max(cont_counts), min(cont_counts)) + print " Account databases for '%s' don't agree on number of containers." % account + if cont_counts: + print " Max: %s, Min: %s" % (max(cont_counts), min(cont_counts)) obj_counts = [int(header['x-account-object-count']) for header in headers] if len(set(obj_counts)) != 1: self.account_object_mismatch += 1 consistent = False - print " Account databases don't agree on number of objects." - print " Max: %s, Min: %s" % (max(obj_counts), min(obj_counts)) + print " Account databases for '%s' don't agree on number of objects." % account + if obj_counts: + print " Max: %s, Min: %s" % (max(obj_counts), min(obj_counts)) containers = set() for resp in responses.values(): containers.update(container['name'] for container in resp[1]) diff --git a/bin/swift-auth-to-swauth b/bin/swift-auth-to-swauth index a84c6cd1a5..93cb4fe199 100755 --- a/bin/swift-auth-to-swauth +++ b/bin/swift-auth-to-swauth @@ -25,8 +25,9 @@ if __name__ == '__main__': gettext.install('swift', unicode=1) if len(argv) != 4 or argv[1] != '-K': exit('Syntax: %s -K ' % argv[0]) - _, _, super_admin_key, auth_db = argv - call(['swauth-prep', '-K', super_admin_key]) + _junk, _junk, super_admin_key, auth_db = argv + # This version will not attempt to prep swauth + # call(['swauth-prep', '-K', super_admin_key]) conn = sqlite3.connect(auth_db) for account, cfaccount, user, password, admin, reseller_admin in \ conn.execute('SELECT account, cfaccount, user, password, admin, ' @@ -39,7 +40,8 @@ if __name__ == '__main__': cmd.append('-r') cmd.extend([account, user, password]) print ' '.join(cmd) - call(cmd) + # For this version, the script will only print out the commands + # call(cmd) print '----------------------------------------------------------------' print ' Assuming the above worked perfectly, you should copy and paste ' print ' those lines into your ~/bin/recreateaccounts script.' diff --git a/bin/swift-bench b/bin/swift-bench index 447d82724d..3c167ee06f 100755 --- a/bin/swift-bench +++ b/bin/swift-bench @@ -105,7 +105,7 @@ if __name__ == '__main__': else: conf = CONF_DEFAULTS parser.set_defaults(**conf) - options, _ = parser.parse_args() + options, _junk = parser.parse_args() if options.concurrency is not '': options.put_concurrency = options.concurrency options.get_concurrency = options.concurrency diff --git a/bin/swift-drive-audit b/bin/swift-drive-audit index 95143e8b56..e92d1e3c12 100755 --- a/bin/swift-drive-audit +++ b/bin/swift-drive-audit @@ -89,7 +89,7 @@ if __name__ == '__main__': c = ConfigParser() try: conf_path = sys.argv[1] - except: + except Exception: print "Usage: %s CONF_FILE" % sys.argv[0].split('/')[-1] sys.exit(1) if not c.read(conf_path): diff --git a/bin/swift-init b/bin/swift-init index 1a6b272345..cdbde28d4d 100755 --- a/bin/swift-init +++ b/bin/swift-init @@ -32,7 +32,7 @@ GRACEFUL_SHUTDOWN_SERVERS = ['account-server', 'container-server', MAX_DESCRIPTORS = 32768 MAX_MEMORY = (1024 * 1024 * 1024) * 2 # 2 GB -_, server, command = sys.argv +_junk, server, command = sys.argv if server == 'all': servers = ALL_SERVERS else: @@ -155,7 +155,7 @@ def do_stop(server, graceful=False): except OSError: pass for pid_file, pid in pfiles: - for _ in xrange(150): # 15 seconds + for _junk in xrange(150): # 15 seconds if not os.path.exists('/proc/%s' % pid): break time.sleep(0.1) diff --git a/bin/swift-object-info b/bin/swift-object-info index e7befddf8c..278a7de0f2 100755 --- a/bin/swift-object-info +++ b/bin/swift-object-info @@ -29,7 +29,7 @@ if __name__ == '__main__': sys.exit(1) try: ring = Ring('/etc/swift/object.ring.gz') - except: + except Exception: ring = None datafile = sys.argv[1] fp = open(datafile, 'rb') diff --git a/bin/swift-stats-populate b/bin/swift-stats-populate index ba531ddc87..b1f4f0a568 100755 --- a/bin/swift-stats-populate +++ b/bin/swift-stats-populate @@ -38,7 +38,7 @@ def put_container(connpool, container, report): retries_done += conn.attempts - 1 if report: report(True) - except: + except Exception: if report: report(False) raise @@ -53,7 +53,7 @@ def put_object(connpool, container, obj, report): retries_done += conn.attempts - 1 if report: report(True) - except: + except Exception: if report: report(False) raise @@ -127,7 +127,7 @@ if __name__ == '__main__': next_report += 2 while need_to_queue >= 1: container = 'stats_container_dispersion_%s' % uuid4() - part, _ = container_ring.get_nodes(account, container) + part, _junk = container_ring.get_nodes(account, container) if part in parts_left: coropool.spawn(put_container, connpool, container, report) sleep() @@ -152,7 +152,7 @@ if __name__ == '__main__': next_report += 2 while need_to_queue >= 1: obj = 'stats_object_dispersion_%s' % uuid4() - part, _ = object_ring.get_nodes(account, container, obj) + part, _junk = object_ring.get_nodes(account, container, obj) if part in parts_left: coropool.spawn(put_object, connpool, container, obj, report) sleep() diff --git a/bin/swift-stats-report b/bin/swift-stats-report index f2504280ba..4c47b404de 100755 --- a/bin/swift-stats-report +++ b/bin/swift-stats-report @@ -107,7 +107,7 @@ def audit(coropool, connpool, account, container_ring, object_ring, options): found = False error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node) try: - attempts, _ = direct_client.retry( + attempts, _junk = direct_client.retry( direct_client.direct_head_object, node, part, account, container, obj, error_log=error_log, retries=options.retries) @@ -160,7 +160,7 @@ def audit(coropool, connpool, account, container_ring, object_ring, options): print 'Containers Missing' print '-' * 78 for container in sorted(containers_missing_replicas.keys()): - part, _ = container_ring.get_nodes(account, container) + part, _junk = container_ring.get_nodes(account, container) for node in containers_missing_replicas[container]: print 'http://%s:%s/%s/%s/%s/%s' % (node['ip'], node['port'], node['device'], part, account, container) @@ -170,8 +170,8 @@ def audit(coropool, connpool, account, container_ring, object_ring, options): print 'Objects Missing' print '-' * 78 for opath in sorted(objects_missing_replicas.keys()): - _, container, obj = opath.split('/', 2) - part, _ = object_ring.get_nodes(account, container, obj) + _junk, container, obj = opath.split('/', 2) + part, _junk = object_ring.get_nodes(account, container, obj) for node in objects_missing_replicas[opath]: print 'http://%s:%s/%s/%s/%s/%s/%s' % (node['ip'], node['port'], node['device'], part, account, container, @@ -200,7 +200,7 @@ def container_dispersion_report(coropool, connpool, account, container_ring, for node in nodes: error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node) try: - attempts, _ = direct_client.retry( + attempts, _junk = direct_client.retry( direct_client.direct_head_container, node, part, account, container, error_log=error_log, retries=options.retries) @@ -284,7 +284,7 @@ def object_dispersion_report(coropool, connpool, account, object_ring, options): for node in nodes: error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node) try: - attempts, _ = direct_client.retry( + attempts, _junk = direct_client.retry( direct_client.direct_head_object, node, part, account, container, obj, error_log=error_log, retries=options.retries) diff --git a/doc/source/deployment_guide.rst b/doc/source/deployment_guide.rst index 7af25ad631..9a78c56960 100644 --- a/doc/source/deployment_guide.rst +++ b/doc/source/deployment_guide.rst @@ -134,9 +134,80 @@ can be found in the :doc:`Ring Overview `. General Server Configuration ---------------------------- -Swift uses paste.deploy to manage server configurations. Default configuration -options are set in the `[DEFAULT]` section, and any options specified there -can be overridden in any of the other sections. +Swift uses paste.deploy (http://pythonpaste.org/deploy/) to manage server +configurations. Default configuration options are set in the `[DEFAULT]` +section, and any options specified there can be overridden in any of the other +sections BUT ONLY BY USING THE SYNTAX ``set option_name = value``. This is the +unfortunate way paste.deploy works and I'll try to explain it in full. + +First, here's an example paste.deploy configuration file:: + + [DEFAULT] + name1 = globalvalue + name2 = globalvalue + name3 = globalvalue + set name4 = globalvalue + + [pipeline:main] + pipeline = myapp + + [app:myapp] + use = egg:mypkg#myapp + name2 = localvalue + set name3 = localvalue + set name5 = localvalue + name6 = localvalue + +The resulting configuration that myapp receives is:: + + global {'__file__': '/etc/mypkg/wsgi.conf', 'here': '/etc/mypkg', + 'name1': 'globalvalue', + 'name2': 'globalvalue', + 'name3': 'localvalue', + 'name4': 'globalvalue', + 'name5': 'localvalue', + 'set name4': 'globalvalue'} + local {'name6': 'localvalue'} + +So, `name1` got the global value which is fine since it's only in the `DEFAULT` +section anyway. + +`name2` got the global value from `DEFAULT` even though it's seemingly +overridden in the `app:myapp` subsection. This is just the unfortunate way +paste.deploy works (at least at the time of this writing.) + +`name3` got the local value from the `app:myapp` subsection because it using +the special paste.deploy syntax of ``set option_name = value``. So, if you want +a default value for most app/filters but want to overridde it in one +subsection, this is how you do it. + +`name4` got the global value from `DEFAULT` since it's only in that section +anyway. But, since we used the ``set`` syntax in the `DEFAULT` section even +though we shouldn't, notice we also got a ``set name4`` variable. Weird, but +probably not harmful. + +`name5` got the local value from the `app:myapp` subsection since it's only +there anyway, but notice that it is in the global configuration and not the +local configuration. This is because we used the ``set`` syntax to set the +value. Again, weird, but not harmful since Swift just treats the two sets of +configuration values as one set anyway. + +`name6` got the local value from `app:myapp` subsection since it's only there, +and since we didn't use the ``set`` syntax, it's only in the local +configuration and not the global one. Though, as indicated above, there is no +special distinction with Swift. + +That's quite an explanation for something that should be so much simpler, but +it might be important to know how paste.deploy interprets configuration files. +The main rule to remember when working with Swift configuration files is: + +.. note:: + + Use the ``set option_name = value`` syntax in subsections if the option is + also set in the ``[DEFAULT]`` section. Don't get in the habit of always + using the ``set`` syntax or you'll probably mess up your non-paste.deploy + configuration files. + --------------------------- Object Server Configuration @@ -170,10 +241,10 @@ Option Default Description use paste.deploy entry point for the object server. For most cases, this should be `egg:swift#object`. -log_name object-server Label used when logging -log_facility LOG_LOCAL0 Syslog log facility -log_level INFO Logging level -log_requests True Whether or not to log each request +set log_name object-server Label used when logging +set log_facility LOG_LOCAL0 Syslog log facility +set log_level INFO Logging level +set log_requests True Whether or not to log each request user swift User to run as node_timeout 3 Request timeout to external services conn_timeout 0.5 Connection timeout to external services @@ -229,6 +300,7 @@ Option Default Description log_name object-auditor Label used when logging log_facility LOG_LOCAL0 Syslog log facility log_level INFO Logging level +log_time 3600 Frequency of status logs in seconds. files_per_second 20 Maximum files audited per second. Should be tuned according to individual system specs. 0 is unlimited. @@ -270,9 +342,9 @@ Option Default Description use paste.deploy entry point for the container server. For most cases, this should be `egg:swift#container`. -log_name container-server Label used when logging -log_facility LOG_LOCAL0 Syslog log facility -log_level INFO Logging level +set log_name container-server Label used when logging +set log_facility LOG_LOCAL0 Syslog log facility +set log_level INFO Logging level node_timeout 3 Request timeout to external services conn_timeout 0.5 Connection timeout to external services ================== ================ ======================================== @@ -299,19 +371,25 @@ reclaim_age 604800 Time elapsed in seconds before a [container-updater] -================== ================= ======================================= -Option Default Description ------------------- ----------------- --------------------------------------- -log_name container-updater Label used when logging -log_facility LOG_LOCAL0 Syslog log facility -log_level INFO Logging level -interval 300 Minimum time for a pass to take -concurrency 4 Number of updater workers to spawn -node_timeout 3 Request timeout to external services -conn_timeout 0.5 Connection timeout to external services -slowdown 0.01 Time in seconds to wait between - containers -================== ================= ======================================= +======================== ================= ================================== +Option Default Description +------------------------ ----------------- ---------------------------------- +log_name container-updater Label used when logging +log_facility LOG_LOCAL0 Syslog log facility +log_level INFO Logging level +interval 300 Minimum time for a pass to take +concurrency 4 Number of updater workers to spawn +node_timeout 3 Request timeout to external + services +conn_timeout 0.5 Connection timeout to external + services +slowdown 0.01 Time in seconds to wait between + containers +account_suppression_time 60 Seconds to suppress updating an + account that has generated an + error (timeout, not yet found, + etc.) +======================== ================= ================================== [container-auditor] @@ -357,9 +435,9 @@ Option Default Description use Entry point for paste.deploy for the account server. For most cases, this should be `egg:swift#account`. -log_name account-server Label used when logging -log_facility LOG_LOCAL0 Syslog log facility -log_level INFO Logging level +set log_name account-server Label used when logging +set log_facility LOG_LOCAL0 Syslog log facility +set log_level INFO Logging level ================== ============== ========================================== [account-replicator] @@ -438,10 +516,10 @@ use Entry point for paste.deploy for the proxy server. For most cases, this should be `egg:swift#proxy`. -log_name proxy-server Label used when logging -log_facility LOG_LOCAL0 Syslog log facility -log_level INFO Log level -log_headers True If True, log headers in each +set log_name proxy-server Label used when logging +set log_facility LOG_LOCAL0 Syslog log facility +set log_level INFO Log level +set log_headers True If True, log headers in each request recheck_account_existence 60 Cache timeout in seconds to send memcached for account @@ -499,10 +577,10 @@ use Entry point for auth. To use the swauth set to: `egg:swift#swauth` -log_name auth-server Label used when logging -log_facility LOG_LOCAL0 Syslog log facility -log_level INFO Log level -log_headers True If True, log headers in +set log_name auth-server Label used when logging +set log_facility LOG_LOCAL0 Syslog log facility +set log_level INFO Log level +set log_headers True If True, log headers in each request reseller_prefix AUTH The naming scope for the auth service. Swift @@ -515,7 +593,7 @@ auth_prefix /auth/ The HTTP request path reserves anything beginning with the letter `v`. -default_swift_cluster local:http://127.0.0.1:8080/v1 The default Swift +default_swift_cluster local#http://127.0.0.1:8080/v1 The default Swift cluster to place newly created accounts on. token_life 86400 The number of seconds a diff --git a/doc/source/development_saio.rst b/doc/source/development_saio.rst index a74e6df8c9..bd0753794e 100644 --- a/doc/source/development_saio.rst +++ b/doc/source/development_saio.rst @@ -50,7 +50,7 @@ If you are going to use a separate partition for Swift data, be sure to add anot `/dev/sdb1 /mnt/sdb1 xfs noatime,nodiratime,nobarrier,logbufs=8 0 0` #. `mkdir /mnt/sdb1` #. `mount /mnt/sdb1` - #. `mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4 /mnt/sdb1/test` + #. `mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4` #. `chown : /mnt/sdb1/*` #. `mkdir /srv` #. `for x in {1..4}; do ln -s /mnt/sdb1/$x /srv/$x; done` @@ -77,7 +77,7 @@ If you want to use a loopback device instead of another partition, follow these `/srv/swift-disk /mnt/sdb1 xfs loop,noatime,nodiratime,nobarrier,logbufs=8 0 0` #. `mkdir /mnt/sdb1` #. `mount /mnt/sdb1` - #. `mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4 /mnt/sdb1/test` + #. `mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4` #. `chown : /mnt/sdb1/*` #. `mkdir /srv` #. `for x in {1..4}; do ln -s /mnt/sdb1/$x /srv/$x; done` @@ -204,7 +204,6 @@ Do these commands as you on guest: #. `cd ~/swift/trunk; sudo python setup.py develop` #. Edit `~/.bashrc` and add to the end:: - export PATH_TO_TEST_XFS=/mnt/sdb1/test export SWIFT_TEST_CONFIG_FILE=/etc/swift/func_test.conf export PATH=${PATH}:~/bin @@ -536,7 +535,7 @@ Setting up scripts for running Swift sudo umount /mnt/sdb1 sudo mkfs.xfs -f -i size=1024 /dev/sdb1 sudo mount /mnt/sdb1 - sudo mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4 /mnt/sdb1/test + sudo mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4 sudo chown : /mnt/sdb1/* mkdir -p /srv/1/node/sdb1 /srv/2/node/sdb2 /srv/3/node/sdb3 /srv/4/node/sdb4 sudo rm -f /var/log/debug /var/log/messages /var/log/rsyncd.log /var/log/syslog diff --git a/doc/source/howto_installmultinode.rst b/doc/source/howto_installmultinode.rst index 2a84357bb9..bec4f9510b 100644 --- a/doc/source/howto_installmultinode.rst +++ b/doc/source/howto_installmultinode.rst @@ -138,7 +138,7 @@ Configure the Proxy node # Only needed for Swauth [filter:swauth] use = egg:swift#swauth - default_swift_cluster = https://:8080/v1 + default_swift_cluster = local#https://:8080/v1 # Highly recommended to change this key to something else! super_admin_key = swauthkey @@ -437,7 +437,7 @@ See :ref:`config-proxy` for the initial setup, and then follow these additional # For Swauth, in /etc/swift/proxy-server.conf [filter:swauth] use = egg:swift#swauth - default_swift_cluster = local:http:///v1 + default_swift_cluster = local#http:///v1 # Highly recommended to change this key to something else! super_admin_key = swauthkey diff --git a/doc/source/ratelimit.rst b/doc/source/ratelimit.rst index 80db870773..4924b71153 100644 --- a/doc/source/ratelimit.rst +++ b/doc/source/ratelimit.rst @@ -30,6 +30,11 @@ max_sleep_time_seconds 60 App will immediately return a 498 response log_sleep_time_seconds 0 To allow visibility into rate limiting set this value > 0 and all sleeps greater than the number will be logged. +rate_buffer_seconds 5 Number of seconds the rate counter can + drop and be allowed to catch up (at a + faster than listed rate). A larger number + will result in larger spikes in rate but + better average accuracy. account_ratelimit 0 If set, will limit all requests to /account_name and PUTs to /account_name/container_name. Number is in diff --git a/etc/account-server.conf-sample b/etc/account-server.conf-sample index e48650f4a5..1fac948619 100644 --- a/etc/account-server.conf-sample +++ b/etc/account-server.conf-sample @@ -7,18 +7,27 @@ # swift_dir = /etc/swift # devices = /srv/node # mount_check = true +# You can specify default log routing here if you want: +# log_name = swift +# log_facility = LOG_LOCAL0 +# log_level = INFO [pipeline:main] pipeline = account-server [app:account-server] use = egg:swift#account -# log_name = account-server -# log_facility = LOG_LOCAL0 -# log_level = INFO +# You can override the default log routing for this app here: +# set log_name = account-server +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_requests = True [account-replicator] +# You can override the default log routing for this app here (don't use set!): # log_name = account-replicator +# log_facility = LOG_LOCAL0 +# log_level = INFO # vm_test_mode = no # log_facility = LOG_LOCAL0 # log_level = INFO @@ -36,7 +45,10 @@ use = egg:swift#account # reclaim_age = 86400 [account-stats] +# You can override the default log routing for this app here (don't use set!): # log_name = account-stats +# log_facility = LOG_LOCAL0 +# log_level = INFO # cf_account = AUTH_7abbc116-8a07-4b63-819d-02715d3e0f31 # container_name = account_stats # proxy_server_conf = /etc/swift/proxy-server.conf @@ -44,14 +56,20 @@ use = egg:swift#account # log_level = INFO [account-auditor] +# You can override the default log routing for this app here (don't use set!): # log_name = account-auditor +# log_facility = LOG_LOCAL0 +# log_level = INFO # Will audit, at most, 1 account per device per interval # interval = 1800 # log_facility = LOG_LOCAL0 # log_level = INFO [account-reaper] +# You can override the default log routing for this app here (don't use set!): # log_name = account-reaper +# log_facility = LOG_LOCAL0 +# log_level = INFO # concurrency = 25 # interval = 3600 # node_timeout = 10 diff --git a/etc/auth-server.conf-sample b/etc/auth-server.conf-sample index 27b6cf3e14..711f48d564 100644 --- a/etc/auth-server.conf-sample +++ b/etc/auth-server.conf-sample @@ -7,6 +7,10 @@ # swift_dir = /etc/swift # cert_file = Default is no cert; format is path like /etc/swift/auth.crt # key_file = Default is no key; format is path like /etc/swift/auth.key +# You can specify default log routing here if you want: +# log_name = swift +# log_facility = LOG_LOCAL0 +# log_level = INFO [pipeline:main] pipeline = auth-server @@ -15,11 +19,12 @@ pipeline = auth-server use = egg:swift#auth # Highly recommended to change this. super_admin_key = devauth -# log_name = auth-server -# log_facility = LOG_LOCAL0 -# log_level = INFO +# You can override the default log routing for this app here: +# set log_name = proxy-server +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = False # reseller_prefix = AUTH # default_cluster_url = http://127.0.0.1:8080/v1 # token_life = 86400 -# log_headers = False # node_timeout = 10 diff --git a/etc/container-server.conf-sample b/etc/container-server.conf-sample index fb250708fe..fb3a47835e 100644 --- a/etc/container-server.conf-sample +++ b/etc/container-server.conf-sample @@ -7,20 +7,29 @@ # swift_dir = /etc/swift # devices = /srv/node # mount_check = true +# You can specify default log routing here if you want: +# log_name = swift +# log_facility = LOG_LOCAL0 +# log_level = INFO [pipeline:main] pipeline = container-server [app:container-server] use = egg:swift#container -# log_name = container-server -# log_facility = LOG_LOCAL0 -# log_level = INFO +# You can override the default log routing for this app here: +# set log_name = container-server +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_requests = True # node_timeout = 3 # conn_timeout = 0.5 [container-replicator] +# You can override the default log routing for this app here (don't use set!): # log_name = container-replicator +# log_facility = LOG_LOCAL0 +# log_level = INFO # vm_test_mode = no # per_diff = 1000 # concurrency = 8 @@ -31,15 +40,23 @@ use = egg:swift#container # reclaim_age = 604800 [container-updater] +# You can override the default log routing for this app here (don't use set!): # log_name = container-updater +# log_facility = LOG_LOCAL0 +# log_level = INFO # interval = 300 # concurrency = 4 # node_timeout = 3 # conn_timeout = 0.5 # slowdown will sleep that amount between containers # slowdown = 0.01 +# Seconds to suppress updating an account that has generated an error +# account_suppression_time = 60 [container-auditor] +# You can override the default log routing for this app here (don't use set!): # log_name = container-auditor +# log_facility = LOG_LOCAL0 +# log_level = INFO # Will audit, at most, 1 container per device per interval # interval = 1800 diff --git a/etc/object-server.conf-sample b/etc/object-server.conf-sample index cc80c18c07..f3f7f07346 100644 --- a/etc/object-server.conf-sample +++ b/etc/object-server.conf-sample @@ -7,16 +7,21 @@ # swift_dir = /etc/swift # devices = /srv/node # mount_check = true +# You can specify default log routing here if you want: +# log_name = swift +# log_facility = LOG_LOCAL0 +# log_level = INFO [pipeline:main] pipeline = object-server [app:object-server] use = egg:swift#object -# log_name = object-server -# log_facility = LOG_LOCAL0 -# log_level = INFO -# log_requests = True +# You can override the default log routing for this app here: +# set log_name = object-server +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_requests = True # node_timeout = 3 # conn_timeout = 0.5 # network_chunk_size = 65536 @@ -27,25 +32,31 @@ use = egg:swift#object # mb_per_sync = 512 [object-replicator] +# You can override the default log routing for this app here (don't use set!): # log_name = object-replicator +# log_facility = LOG_LOCAL0 +# log_level = INFO # vm_test_mode = no # daemonize = on # run_pause = 30 # concurrency = 1 # stats_interval = 300 # max duration of a partition rsync -# rsync_timeout = 600 +# rsync_timeout = 900 # passed to rsync for io op timeout -# rsync_io_timeout = 10 +# rsync_io_timeout = 30 # max duration of an http request # http_timeout = 60 # attempts to kill all workers if nothing replicates for lockup_timeout seconds -# lockup_timeout = 900 +# lockup_timeout = 1800 # The replicator also performs reclamation # reclaim_age = 604800 [object-updater] +# You can override the default log routing for this app here (don't use set!): # log_name = object-updater +# log_facility = LOG_LOCAL0 +# log_level = INFO # interval = 300 # concurrency = 1 # node_timeout = 10 @@ -54,6 +65,10 @@ use = egg:swift#object # slowdown = 0.01 [object-auditor] +# You can override the default log routing for this app here (don't use set!): # log_name = object-auditor +# log_facility = LOG_LOCAL0 +# log_level = INFO # files_per_second = 20 # bytes_per_second = 10000000 +# log_time = 3600 diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample index 2d85f19508..fad511ca30 100644 --- a/etc/proxy-server.conf-sample +++ b/etc/proxy-server.conf-sample @@ -7,6 +7,10 @@ # user = swift # cert_file = /etc/swift/proxy.crt # key_file = /etc/swift/proxy.key +# You can specify default log routing here if you want: +# log_name = swift +# log_facility = LOG_LOCAL0 +# log_level = INFO [pipeline:main] # For DevAuth: @@ -16,10 +20,11 @@ pipeline = catch_errors healthcheck cache ratelimit auth proxy-server [app:proxy-server] use = egg:swift#proxy -# log_name = proxy-server -# log_facility = LOG_LOCAL0 -# log_level = INFO -# log_headers = False +# You can override the default log routing for this app here: +# set log_name = proxy-server +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = False # recheck_account_existence = 60 # recheck_container_existence = 60 # object_chunk_size = 8192 @@ -39,6 +44,11 @@ use = egg:swift#proxy # Only needed for DevAuth [filter:auth] use = egg:swift#auth +# You can override the default log routing for this filter here: +# set log_name = auth-server +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = False # The reseller prefix will verify a token begins with this prefix before even # attempting to validate it with the external authentication server. Also, with # authorization, only Swift storage accounts with this prefix will be @@ -54,10 +64,11 @@ use = egg:swift#auth # Only needed for Swauth [filter:swauth] use = egg:swift#swauth -# log_name = auth-server -# log_facility = LOG_LOCAL0 -# log_level = INFO -# log_headers = False +# You can override the default log routing for this filter here: +# set log_name = auth-server +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = False # The reseller prefix will verify a token begins with this prefix before even # attempting to validate it. Also, with authorization, only Swift storage # accounts with this prefix will be authorized by this middleware. Useful if @@ -66,15 +77,15 @@ use = egg:swift#swauth # The auth prefix will cause requests beginning with this prefix to be routed # to the auth subsystem, for granting tokens, creating accounts, users, etc. # auth_prefix = /auth/ -# Cluster strings are of the format name:url where name is a short name for the +# Cluster strings are of the format name#url where name is a short name for the # Swift cluster and url is the url to the proxy server(s) for the cluster. # default_swift_cluster = local#http://127.0.0.1:8080/v1 -# You may also use the format name::url::url where the first url is the one +# You may also use the format name#url#url where the first url is the one # given to users to access their account (public url) and the second is the one # used by swauth itself to create and delete accounts (private url). This is # useful when a load balancer url should be used by users, but swauth itself is # behind the load balancer. Example: -# default_swift_cluster = local##https://public.com:8080/v1##http://private.com:8080/v1 +# default_swift_cluster = local#https://public.com:8080/v1#http://private.com:8080/v1 # token_life = 86400 # node_timeout = 10 # Highly recommended to change this. @@ -82,15 +93,30 @@ super_admin_key = swauthkey [filter:healthcheck] use = egg:swift#healthcheck +# You can override the default log routing for this filter here: +# set log_name = auth-server +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = False [filter:cache] use = egg:swift#memcache +# You can override the default log routing for this filter here: +# set log_name = auth-server +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = False # Default for memcache_servers is below, but you can specify multiple servers # with the format: 10.1.2.3:11211,10.1.2.4:11211 # memcache_servers = 127.0.0.1:11211 [filter:ratelimit] use = egg:swift#ratelimit +# You can override the default log routing for this filter here: +# set log_name = auth-server +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = False # clock_accuracy should represent how accurate the proxy servers' system clocks # are with each other. 1000 means that all the proxies' clock are accurate to # each other within 1 millisecond. No ratelimit should be higher than the @@ -99,6 +125,8 @@ use = egg:swift#ratelimit # max_sleep_time_seconds = 60 # log_sleep_time_seconds of 0 means disabled # log_sleep_time_seconds = 0 +# allows for slow rates (e.g. running up to 5 sec's behind) to catch up. +# rate_buffer_seconds = 5 # account_ratelimit of 0 means disabled # account_ratelimit = 0 @@ -116,14 +144,30 @@ use = egg:swift#ratelimit [filter:domain_remap] use = egg:swift#domain_remap +# You can override the default log routing for this filter here: +# set log_name = auth-server +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = False # storage_domain = example.com # path_root = v1 +# reseller_prefixes = AUTH [filter:catch_errors] use = egg:swift#catch_errors +# You can override the default log routing for this filter here: +# set log_name = auth-server +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = False [filter:cname_lookup] # Note: this middleware requires python-dnspython use = egg:swift#cname_lookup +# You can override the default log routing for this filter here: +# set log_name = auth-server +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = False # storage_domain = example.com # lookup_depth = 1 diff --git a/locale/swift.pot b/locale/swift.pot new file mode 100644 index 0000000000..7f905f2940 --- /dev/null +++ b/locale/swift.pot @@ -0,0 +1,1030 @@ +# Translations template for swift. +# Copyright (C) 2011 ORGANIZATION +# This file is distributed under the same license as the swift project. +# FIRST AUTHOR , 2011. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: swift 1.2.0\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2011-01-26 23:59+0000\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.4\n" + +#: swift/account/auditor.py:52 swift/account/auditor.py:75 +#, python-format +msgid "" +"Since %(time)s: Account audits: %(passed)s passed audit, %(failed)s " +"failed audit" +msgstr "" + +#: swift/account/auditor.py:100 swift/container/auditor.py:103 +#, python-format +msgid "Audit passed for %s" +msgstr "" + +#: swift/account/auditor.py:103 +#, python-format +msgid "ERROR Could not get account info %s" +msgstr "" + +#: swift/account/reaper.py:80 swift/container/updater.py:64 +#, python-format +msgid "Loading account ring from %s" +msgstr "" + +#: swift/account/reaper.py:88 swift/obj/updater.py:57 +#, python-format +msgid "Loading container ring from %s" +msgstr "" + +#: swift/account/reaper.py:96 +#, python-format +msgid "Loading object ring from %s" +msgstr "" + +#: swift/account/reaper.py:106 +msgid "Daemon started." +msgstr "" + +#: swift/account/reaper.py:122 +#, python-format +msgid "Begin devices pass: %s" +msgstr "" + +#: swift/account/reaper.py:128 swift/common/utils.py:805 +#: swift/obj/updater.py:74 swift/obj/updater.py:113 +#, python-format +msgid "Skipping %s as it is not mounted" +msgstr "" + +#: swift/account/reaper.py:132 +#, python-format +msgid "Devices pass completed: %.02fs" +msgstr "" + +#: swift/account/reaper.py:215 +#, python-format +msgid "Beginning pass on account %s" +msgstr "" + +#: swift/account/reaper.py:238 +#, python-format +msgid "Exception with containers for account %s" +msgstr "" + +#: swift/account/reaper.py:243 +#, python-format +msgid "Exception with account %s" +msgstr "" + +#: swift/account/reaper.py:244 +#, python-format +msgid "Incomplete pass on account %s" +msgstr "" + +#: swift/account/reaper.py:246 +#, python-format +msgid ", %s containers deleted" +msgstr "" + +#: swift/account/reaper.py:248 +#, python-format +msgid ", %s objects deleted" +msgstr "" + +#: swift/account/reaper.py:250 +#, python-format +msgid ", %s containers remaining" +msgstr "" + +#: swift/account/reaper.py:253 +#, python-format +msgid ", %s objects remaining" +msgstr "" + +#: swift/account/reaper.py:255 +#, python-format +msgid ", %s containers possibly remaining" +msgstr "" + +#: swift/account/reaper.py:258 +#, python-format +msgid ", %s objects possibly remaining" +msgstr "" + +#: swift/account/reaper.py:261 +msgid ", return codes: " +msgstr "" + +#: swift/account/reaper.py:265 +#, python-format +msgid ", elapsed: %.02fs" +msgstr "" + +#: swift/account/reaper.py:320 swift/account/reaper.py:355 +#: swift/account/reaper.py:406 swift/container/updater.py:277 +#, python-format +msgid "Exception with %(ip)s:%(port)s/%(device)s" +msgstr "" + +#: swift/account/reaper.py:333 +#, python-format +msgid "Exception with objects for container %(container)s for account %(account)s" +msgstr "" + +#: swift/account/server.py:309 swift/container/server.py:397 +#: swift/obj/server.py:597 +#, python-format +msgid "ERROR __call__ error with %(method)s %(path)s " +msgstr "" + +#: swift/auth/server.py:96 swift/common/middleware/swauth.py:94 +msgid "No super_admin_key set in conf file! Exiting." +msgstr "" + +#: swift/auth/server.py:152 +#, python-format +msgid "" +"\n" +"THERE ARE ACCOUNTS IN YOUR auth.db THAT DO NOT BEGIN WITH YOUR NEW " +"RESELLER\n" +"PREFIX OF \"%(reseller)s\".\n" +"YOU HAVE A FEW OPTIONS:\n" +" 1. RUN \"swift-auth-update-reseller-prefixes %(db_file)s " +"%(reseller)s\",\n" +" \"swift-init auth-server restart\", AND\n" +" \"swift-auth-recreate-accounts -K ...\" TO CREATE FRESH ACCOUNTS.\n" +" OR\n" +" 2. REMOVE %(db_file)s, RUN \"swift-init auth-server restart\", AND " +"RUN\n" +" \"swift-auth-add-user ...\" TO CREATE BRAND NEW ACCOUNTS THAT WAY." +"\n" +" OR\n" +" 3. ADD \"reseller_prefix = %(previous)s\" (WITHOUT THE QUOTES) TO " +"YOUR\n" +" proxy-server.conf IN THE [filter:auth] SECTION AND TO YOUR\n" +" auth-server.conf IN THE [app:auth-server] SECTION AND RUN\n" +" \"swift-init proxy-server restart\" AND \"swift-init auth-server " +"restart\"\n" +" TO REVERT BACK TO YOUR PREVIOUS RESELLER PREFIX.\n" +"\n" +" %(note)s\n" +" " +msgstr "" + +#: swift/auth/server.py:173 +msgid "" +"\n" +" SINCE YOUR PREVIOUS RESELLER PREFIX WAS AN EMPTY STRING, IT IS NOT\n" +" RECOMMENDED TO PERFORM OPTION 3 AS THAT WOULD MAKE SUPPORTING " +"MULTIPLE\n" +" RESELLERS MORE DIFFICULT.\n" +" " +msgstr "" + +#: swift/auth/server.py:178 +msgid "CRITICAL: " +msgstr "" + +#: swift/auth/server.py:213 +#, python-format +msgid "ERROR attempting to create account %(url)s: %(status)s %(reason)s" +msgstr "" + +#: swift/auth/server.py:346 +#, python-format +msgid "" +"ALREADY EXISTS create_user(%(account)s, %(user)s, _, %(admin)s, " +"%(reseller_admin)s) [%(elapsed).02f]" +msgstr "" + +#: swift/auth/server.py:364 +#, python-format +msgid "" +"FAILED create_user(%(account)s, %(user)s, _, %(admin)s, " +"%(reseller_admin)s) [%(elapsed).02f]" +msgstr "" + +#: swift/auth/server.py:381 +#, python-format +msgid "" +"SUCCESS create_user(%(account)s, %(user)s, _, %(admin)s, " +"%(reseller_admin)s) = %(url)s [%(elapsed).02f]" +msgstr "" + +#: swift/auth/server.py:656 +msgid "ERROR Unhandled exception in ReST request" +msgstr "" + +#: swift/common/bench.py:85 +#, python-format +msgid "%(complete)s %(title)s [%(fail)s failures], %(rate).01f/s" +msgstr "" + +#: swift/common/bench.py:97 +msgid "CannotSendRequest. Skipping..." +msgstr "" + +#: swift/common/bufferedhttp.py:96 +#, python-format +msgid "HTTP PERF: %(time).5f seconds to %(method)s %(host)s:%(port)s %(path)s)" +msgstr "" + +#: swift/common/db.py:299 +msgid "Broker error trying to rollback locked connection" +msgstr "" + +#: swift/common/db.py:754 swift/common/db.py:1221 +#, python-format +msgid "Invalid pending entry %(file)s: %(entry)s" +msgstr "" + +#: swift/common/db_replicator.py:84 +#, python-format +msgid "ERROR reading HTTP response from %s" +msgstr "" + +#: swift/common/db_replicator.py:123 +#, python-format +msgid "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" +msgstr "" + +#: swift/common/db_replicator.py:129 +#, python-format +msgid "Removed %(remove)d dbs" +msgstr "" + +#: swift/common/db_replicator.py:130 +#, python-format +msgid "%(success)s successes, %(failure)s failures" +msgstr "" + +#: swift/common/db_replicator.py:155 +#, python-format +msgid "ERROR rsync failed with %(code)s: %(args)s" +msgstr "" + +#: swift/common/db_replicator.py:205 +#, python-format +msgid "Syncing chunks with %s" +msgstr "" + +#: swift/common/db_replicator.py:213 +#, python-format +msgid "ERROR Bad response %(status)s from %(host)s" +msgstr "" + +#: swift/common/db_replicator.py:278 +#, python-format +msgid "ERROR Unable to connect to remote server: %s" +msgstr "" + +#: swift/common/db_replicator.py:316 +#, python-format +msgid "Replicating db %s" +msgstr "" + +#: swift/common/db_replicator.py:325 swift/common/db_replicator.py:479 +#, python-format +msgid "Quarantining DB %s" +msgstr "" + +#: swift/common/db_replicator.py:328 +#, python-format +msgid "ERROR reading db %s" +msgstr "" + +#: swift/common/db_replicator.py:361 +#, python-format +msgid "ERROR Remote drive not mounted %s" +msgstr "" + +#: swift/common/db_replicator.py:363 +#, python-format +msgid "ERROR syncing %(file)s with node %(node)s" +msgstr "" + +#: swift/common/db_replicator.py:405 +msgid "ERROR Failed to get my own IPs?" +msgstr "" + +#: swift/common/db_replicator.py:412 +#, python-format +msgid "Skipping %(device)s as it is not mounted" +msgstr "" + +#: swift/common/db_replicator.py:420 +msgid "Beginning replication run" +msgstr "" + +#: swift/common/db_replicator.py:425 +msgid "Replication run OVER" +msgstr "" + +#: swift/common/db_replicator.py:436 +msgid "ERROR trying to replicate" +msgstr "" + +#: swift/common/memcached.py:69 +#, python-format +msgid "Timeout %(action)s to memcached: %(server)s" +msgstr "" + +#: swift/common/memcached.py:72 +#, python-format +msgid "Error %(action)s to memcached: %(server)s" +msgstr "" + +#: swift/common/memcached.py:81 +#, python-format +msgid "Error limiting server %s" +msgstr "" + +#: swift/common/utils.py:88 +#, python-format +msgid "Unable to locate %s in libc. Leaving as a no-op." +msgstr "" + +#: swift/common/utils.py:255 +msgid "STDOUT: Connection reset by peer" +msgstr "" + +#: swift/common/utils.py:257 swift/common/utils.py:260 +#, python-format +msgid "STDOUT: %s" +msgstr "" + +#: swift/common/utils.py:324 +msgid "Connection refused" +msgstr "" + +#: swift/common/utils.py:326 +msgid "Host unreachable" +msgstr "" + +#: swift/common/utils.py:328 +msgid "Connection timeout" +msgstr "" + +#: swift/common/utils.py:464 +msgid "UNCAUGHT EXCEPTION" +msgstr "" + +#: swift/common/utils.py:511 +msgid "Error: missing config file argument" +msgstr "" + +#: swift/common/utils.py:516 +#, python-format +msgid "Error: unable to locate %s" +msgstr "" + +#: swift/common/utils.py:743 +#, python-format +msgid "Unable to read config file %s" +msgstr "" + +#: swift/common/utils.py:749 +#, python-format +msgid "Unable to find %s config section in %s" +msgstr "" + +#: swift/common/middleware/catch_errors.py:39 +#, python-format +msgid "Error: %s" +msgstr "" + +#: swift/common/middleware/cname_lookup.py:91 +#, python-format +msgid "Mapped %(given_domain)s to %(found_domain)s" +msgstr "" + +#: swift/common/middleware/cname_lookup.py:102 +#, python-format +msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" +msgstr "" + +#: swift/common/middleware/ratelimit.py:172 +msgid "Returning 497 because of blacklisting" +msgstr "" + +#: swift/common/middleware/ratelimit.py:185 +#, python-format +msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" +msgstr "" + +#: swift/common/middleware/ratelimit.py:192 +#, python-format +msgid "Returning 498 because of ops rate limiting (Max Sleep) %s" +msgstr "" + +#: swift/common/middleware/ratelimit.py:212 +msgid "Warning: Cannot ratelimit without a memcached client" +msgstr "" + +#: swift/common/middleware/swauth.py:635 +#, python-format +msgid "" +"ERROR: Exception while trying to communicate with " +"%(scheme)s://%(host)s:%(port)s/%(path)s" +msgstr "" + +#: swift/container/auditor.py:54 swift/container/auditor.py:78 +#, python-format +msgid "" +"Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " +"audit" +msgstr "" + +#: swift/container/auditor.py:68 +msgid "Begin container audit \"once\" mode" +msgstr "" + +#: swift/container/auditor.py:88 +#, python-format +msgid "Container audit \"once\" mode completed: %.02fs" +msgstr "" + +#: swift/container/auditor.py:106 +#, python-format +msgid "ERROR Could not get container info %s" +msgstr "" + +#: swift/container/server.py:114 +#, python-format +msgid "" +"ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " +"later): Response %(status)s %(reason)s" +msgstr "" + +#: swift/container/server.py:122 +#, python-format +msgid "" +"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " +"later)" +msgstr "" + +#: swift/container/updater.py:78 swift/obj/replicator.py:492 +#, python-format +msgid "%s is not mounted" +msgstr "" + +#: swift/container/updater.py:97 +#, python-format +msgid "ERROR with loading suppressions from %s: " +msgstr "" + +#: swift/container/updater.py:107 +msgid "Begin container update sweep" +msgstr "" + +#: swift/container/updater.py:140 +#, python-format +msgid "" +"Container update sweep of %(path)s completed: %(elapsed).02fs, " +"%(success)s successes, %(fail)s failures, %(no_change)s with no changes" +msgstr "" + +#: swift/container/updater.py:154 +#, python-format +msgid "Container update sweep completed: %.02fs" +msgstr "" + +#: swift/container/updater.py:164 +msgid "Begin container update single threaded sweep" +msgstr "" + +#: swift/container/updater.py:172 +#, python-format +msgid "" +"Container update single threaded sweep completed: %(elapsed).02fs, " +"%(success)s successes, %(fail)s failures, %(no_change)s with no changes" +msgstr "" + +#: swift/container/updater.py:224 +#, python-format +msgid "Update report sent for %(container)s %(dbfile)s" +msgstr "" + +#: swift/container/updater.py:232 +#, python-format +msgid "Update report failed for %(container)s %(dbfile)s" +msgstr "" + +#: swift/container/updater.py:266 +#, python-format +msgid "" +"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " +"later): " +msgstr "" + +#: swift/obj/auditor.py:61 +#, python-format +msgid "Begin object audit \"%s\" mode" +msgstr "" + +#: swift/obj/auditor.py:73 +#, python-format +msgid "" +"Since %(start_time)s: Locally: %(passes)d passed audit, %(quars)d " +"quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: " +"%(brate).2f" +msgstr "" + +#: swift/obj/auditor.py:90 +#, python-format +msgid "" +"Object audit \"%(mode)s\" mode completed: %(elapsed).02fs. Total " +"files/sec: %(frate).2f , Total bytes/sec: %(brate).2f " +msgstr "" + +#: swift/obj/auditor.py:141 +#, python-format +msgid "ERROR Object %(obj)s failed audit and will be quarantined: %(err)s" +msgstr "" + +#: swift/obj/auditor.py:150 +#, python-format +msgid "ERROR Trying to audit %s" +msgstr "" + +#: swift/obj/replicator.py:182 +msgid "Error hashing suffix" +msgstr "" + +#: swift/obj/replicator.py:246 +#, python-format +msgid "Killing long-running rsync: %s" +msgstr "" + +#: swift/obj/replicator.py:257 +#, python-format +msgid "Bad rsync return code: %(args)s -> %(ret)d" +msgstr "" + +#: swift/obj/replicator.py:261 swift/obj/replicator.py:265 +#, python-format +msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" +msgstr "" + +#: swift/obj/replicator.py:350 +#, python-format +msgid "Removing partition: %s" +msgstr "" + +#: swift/obj/replicator.py:353 +msgid "Error syncing handoff partition" +msgstr "" + +#: swift/obj/replicator.py:383 +#, python-format +msgid "%(ip)s/%(device)s responded as unmounted" +msgstr "" + +#: swift/obj/replicator.py:388 +#, python-format +msgid "Invalid response %(resp)s from %(ip)s" +msgstr "" + +#: swift/obj/replicator.py:410 +#, python-format +msgid "Error syncing with node: %s" +msgstr "" + +#: swift/obj/replicator.py:414 +msgid "Error syncing partition" +msgstr "" + +#: swift/obj/replicator.py:424 +#, python-format +msgid "" +"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " +"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" +msgstr "" + +#: swift/obj/replicator.py:433 +#, python-format +msgid "" +"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% " +"synced" +msgstr "" + +#: swift/obj/replicator.py:439 +#, python-format +msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" +msgstr "" + +#: swift/obj/replicator.py:446 +#, python-format +msgid "Nothing replicated for %s seconds." +msgstr "" + +#: swift/obj/replicator.py:475 +msgid "Lockup detected.. killing live coros." +msgstr "" + +#: swift/obj/replicator.py:530 +msgid "Ring change detected. Aborting current replication pass." +msgstr "" + +#: swift/obj/replicator.py:540 +msgid "Exception in top-level replication loop" +msgstr "" + +#: swift/obj/replicator.py:549 +msgid "Running object replicator in script mode." +msgstr "" + +#: swift/obj/replicator.py:553 swift/obj/replicator.py:565 +#, python-format +msgid "Object replication complete. (%.02f minutes)" +msgstr "" + +#: swift/obj/replicator.py:560 +msgid "Starting object replication pass." +msgstr "" + +#: swift/obj/replicator.py:566 +#, python-format +msgid "Replication sleeping for %s seconds." +msgstr "" + +#: swift/obj/server.py:313 +#, python-format +msgid "" +"ERROR Container update failed (saving for async update later): %(status)d" +" response from %(ip)s:%(port)s/%(dev)s" +msgstr "" + +#: swift/obj/server.py:319 +#, python-format +msgid "" +"ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for " +"async update later)" +msgstr "" + +#: swift/obj/updater.py:65 +msgid "Begin object update sweep" +msgstr "" + +#: swift/obj/updater.py:89 +#, python-format +msgid "" +"Object update sweep of %(device)s completed: %(elapsed).02fs, %(success)s" +" successes, %(fail)s failures" +msgstr "" + +#: swift/obj/updater.py:98 +#, python-format +msgid "Object update sweep completed: %.02fs" +msgstr "" + +#: swift/obj/updater.py:105 +msgid "Begin object update single threaded sweep" +msgstr "" + +#: swift/obj/updater.py:117 +#, python-format +msgid "" +"Object update single threaded sweep completed: %(elapsed).02fs, " +"%(success)s successes, %(fail)s failures" +msgstr "" + +#: swift/obj/updater.py:157 +#, python-format +msgid "ERROR Pickle problem, quarantining %s" +msgstr "" + +#: swift/obj/updater.py:177 +#, python-format +msgid "Update sent for %(obj)s %(path)s" +msgstr "" + +#: swift/obj/updater.py:182 +#, python-format +msgid "Update failed for %(obj)s %(path)s" +msgstr "" + +#: swift/obj/updater.py:206 +#, python-format +msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" +msgstr "" + +#: swift/proxy/server.py:165 swift/proxy/server.py:629 +#: swift/proxy/server.py:696 swift/proxy/server.py:712 +#: swift/proxy/server.py:721 swift/proxy/server.py:1004 +#: swift/proxy/server.py:1044 swift/proxy/server.py:1089 +msgid "Object" +msgstr "" + +#: swift/proxy/server.py:170 +#, python-format +msgid "Could not load object segment %(path)s: %(status)s" +msgstr "" + +#: swift/proxy/server.py:177 swift/proxy/server.py:210 +#: swift/proxy/server.py:257 +#, python-format +msgid "ERROR: While processing manifest /%(acc)s/%(cont)s/%(obj)s" +msgstr "" + +#: swift/proxy/server.py:292 +#, python-format +msgid "%(msg)s %(ip)s:%(port)s" +msgstr "" + +#: swift/proxy/server.py:304 +#, python-format +msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" +msgstr "" + +#: swift/proxy/server.py:328 +#, python-format +msgid "Node error limited %(ip)s:%(port)s (%(device)s)" +msgstr "" + +#: swift/proxy/server.py:388 swift/proxy/server.py:1451 +#: swift/proxy/server.py:1497 swift/proxy/server.py:1545 +#: swift/proxy/server.py:1590 +msgid "Account" +msgstr "" + +#: swift/proxy/server.py:389 +#, python-format +msgid "Trying to get account info for %s" +msgstr "" + +#: swift/proxy/server.py:466 swift/proxy/server.py:740 +#: swift/proxy/server.py:772 swift/proxy/server.py:1214 +#: swift/proxy/server.py:1301 swift/proxy/server.py:1356 +#: swift/proxy/server.py:1413 +msgid "Container" +msgstr "" + +#: swift/proxy/server.py:467 +#, python-format +msgid "Trying to get container info for %s" +msgstr "" + +#: swift/proxy/server.py:552 +#, python-format +msgid "%(type)s returning 503 for %(statuses)s" +msgstr "" + +#: swift/proxy/server.py:598 swift/proxy/server.py:697 +#, python-format +msgid "Trying to %(method)s %(path)s" +msgstr "" + +#: swift/proxy/server.py:627 +msgid "Client disconnected on read" +msgstr "" + +#: swift/proxy/server.py:630 +#, python-format +msgid "Trying to read during GET of %s" +msgstr "" + +#: swift/proxy/server.py:653 +#, python-format +msgid "ERROR %(status)d %(body)s From %(type)s Server" +msgstr "" + +#: swift/proxy/server.py:692 +#, python-format +msgid "ERROR %(status)d %(body)s From Object Server" +msgstr "" + +#: swift/proxy/server.py:776 swift/proxy/server.py:783 +#, python-format +msgid "Object manifest GET could not continue listing: %s %s" +msgstr "" + +#: swift/proxy/server.py:905 +msgid "Object POST" +msgstr "" + +#: swift/proxy/server.py:1005 +#, python-format +msgid "Expect: 100-continue on %s" +msgstr "" + +#: swift/proxy/server.py:1017 +#, python-format +msgid "Object PUT returning 503, %(conns)s/%(nodes)s required connections" +msgstr "" + +#: swift/proxy/server.py:1045 +#, python-format +msgid "Trying to write to %s" +msgstr "" + +#: swift/proxy/server.py:1049 +#, python-format +msgid "" +"Object PUT exceptions during send, %(conns)s/%(nodes)s required " +"connections" +msgstr "" + +#: swift/proxy/server.py:1058 +#, python-format +msgid "ERROR Client read timeout (%ss)" +msgstr "" + +#: swift/proxy/server.py:1063 +msgid "ERROR Exception causing client disconnect" +msgstr "" + +#: swift/proxy/server.py:1068 +msgid "Client disconnected without sending enough data" +msgstr "" + +#: swift/proxy/server.py:1083 +#, python-format +msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" +msgstr "" + +#: swift/proxy/server.py:1090 +#, python-format +msgid "Trying to get final status of PUT to %s" +msgstr "" + +#: swift/proxy/server.py:1093 +#, python-format +msgid "Object servers returned %s mismatched etags" +msgstr "" + +#: swift/proxy/server.py:1101 +msgid "Object PUT" +msgstr "" + +#: swift/proxy/server.py:1153 +msgid "Object DELETE" +msgstr "" + +#: swift/proxy/server.py:1302 swift/proxy/server.py:1498 +#, python-format +msgid "Trying to PUT to %s" +msgstr "" + +#: swift/proxy/server.py:1314 +msgid "Container PUT" +msgstr "" + +#: swift/proxy/server.py:1357 swift/proxy/server.py:1546 +#, python-format +msgid "Trying to POST %s" +msgstr "" + +#: swift/proxy/server.py:1369 +msgid "Container POST" +msgstr "" + +#: swift/proxy/server.py:1414 swift/proxy/server.py:1591 +#, python-format +msgid "Trying to DELETE %s" +msgstr "" + +#: swift/proxy/server.py:1426 +msgid "Container DELETE" +msgstr "" + +#: swift/proxy/server.py:1433 +msgid "Returning 503 because not all container nodes confirmed DELETE" +msgstr "" + +#: swift/proxy/server.py:1508 +msgid "Account PUT" +msgstr "" + +#: swift/proxy/server.py:1556 +msgid "Account POST" +msgstr "" + +#: swift/proxy/server.py:1601 +msgid "Account DELETE" +msgstr "" + +#: swift/proxy/server.py:1757 +msgid "ERROR Unhandled exception in request" +msgstr "" + +#: swift/stats/access_processor.py:63 swift/stats/stats_processor.py:40 +#, python-format +msgid "Bad line data: %s" +msgstr "" + +#: swift/stats/access_processor.py:67 +#, python-format +msgid "Bad server name: found \"%(found)s\" expected \"%(expected)s\"" +msgstr "" + +#: swift/stats/access_processor.py:75 +#, python-format +msgid "Invalid path: %(error)s from data: %(log)s" +msgstr "" + +#: swift/stats/access_processor.py:199 +#, python-format +msgid "I found a bunch of bad lines in %(name)s (%(bad)d bad, %(total)d total)" +msgstr "" + +#: swift/stats/account_stats.py:55 +msgid "Gathering account stats" +msgstr "" + +#: swift/stats/account_stats.py:59 +#, python-format +msgid "Gathering account stats complete (%0.2f minutes)" +msgstr "" + +#: swift/stats/account_stats.py:75 +#, python-format +msgid "Device %s is not mounted, skipping." +msgstr "" + +#: swift/stats/account_stats.py:81 +#, python-format +msgid "Path %s does not exist, skipping." +msgstr "" + +#: swift/stats/log_processor.py:62 +#, python-format +msgid "Loaded plugin \"%s\"" +msgstr "" + +#: swift/stats/log_processor.py:79 +#, python-format +msgid "Processing %(obj)s with plugin \"%(plugin)s\"" +msgstr "" + +#: swift/stats/log_processor.py:179 +#, python-format +msgid "Bad compressed data for %s" +msgstr "" + +#: swift/stats/log_processor.py:240 +msgid "Beginning log processing" +msgstr "" + +#: swift/stats/log_processor.py:278 +#, python-format +msgid "found %d processed files" +msgstr "" + +#: swift/stats/log_processor.py:283 +#, python-format +msgid "loaded %d files to process" +msgstr "" + +#: swift/stats/log_processor.py:286 swift/stats/log_processor.py:360 +#, python-format +msgid "Log processing done (%0.2f minutes)" +msgstr "" + +#: swift/stats/log_uploader.py:71 +msgid "Uploading logs" +msgstr "" + +#: swift/stats/log_uploader.py:74 +#, python-format +msgid "Uploading logs complete (%0.2f minutes)" +msgstr "" + +#: swift/stats/log_uploader.py:129 +#, python-format +msgid "Unexpected log: %s" +msgstr "" + +#: swift/stats/log_uploader.py:135 +#, python-format +msgid "Skipping log: %(file)s (< %(cutoff)d seconds old)" +msgstr "" + +#: swift/stats/log_uploader.py:142 +#, python-format +msgid "Log %s is 0 length, skipping" +msgstr "" + +#: swift/stats/log_uploader.py:144 +#, python-format +msgid "Processing log: %s" +msgstr "" + +#: swift/stats/log_uploader.py:165 +#, python-format +msgid "Uploaded log %(file)s to %(target)s" +msgstr "" + +#: swift/stats/log_uploader.py:170 +#, python-format +msgid "ERROR: Upload of log %s failed!" +msgstr "" + diff --git a/setup.cfg b/setup.cfg index d53addcbf3..50cfaf10fc 100644 --- a/setup.cfg +++ b/setup.cfg @@ -7,3 +7,17 @@ source-dir = doc/source tag_build = tag_date = 0 tag_svn_revision = 0 + +[compile_catalog] +directory = locale +domain = swift + +[update_catalog] +domain = swift +output_dir = locale +input_file = locale/swift.pot + +[extract_messages] +keywords = _ l_ lazy_gettext +mapping_file = babel.cfg +output_file = locale/swift.pot diff --git a/setup.py b/setup.py index 912213c1c9..c80d62ddc8 100644 --- a/setup.py +++ b/setup.py @@ -18,6 +18,10 @@ from setuptools import setup, find_packages from setuptools.command.sdist import sdist import os import subprocess +try: + from babel.messages import frontend +except ImportError: + frontend = None from swift import __version__ as version @@ -39,6 +43,19 @@ class local_sdist(sdist): name = 'swift' + +cmdclass = {'sdist': local_sdist} + + +if frontend: + cmdclass.update({ + 'compile_catalog': frontend.compile_catalog, + 'extract_messages': frontend.extract_messages, + 'init_catalog': frontend.init_catalog, + 'update_catalog': frontend.update_catalog, + }) + + setup( name=name, version=version, @@ -49,7 +66,7 @@ setup( url='https://launchpad.net/swift', packages=find_packages(exclude=['test', 'bin']), test_suite='nose.collector', - cmdclass={'sdist': local_sdist}, + cmdclass=cmdclass, classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI Approved :: Apache Software License', diff --git a/swift/__init__.py b/swift/__init__.py index 0bd0062056..316208f929 100644 --- a/swift/__init__.py +++ b/swift/__init__.py @@ -1,5 +1,5 @@ import gettext -__version__ = '1.1.0' +__version__ = '1.2-gamma' gettext.install('swift') diff --git a/swift/account/reaper.py b/swift/account/reaper.py index d31558b9c6..dd0d4b3890 100644 --- a/swift/account/reaper.py +++ b/swift/account/reaper.py @@ -229,7 +229,7 @@ class AccountReaper(Daemon): if not containers: break try: - for (container, _, _, _) in containers: + for (container, _junk, _junk, _junk) in containers: self.container_pool.spawn(self.reap_container, account, partition, nodes, container) self.container_pool.waitall() diff --git a/swift/account/server.py b/swift/account/server.py index a1e20c1f4f..2c83f51cc6 100644 --- a/swift/account/server.py +++ b/swift/account/server.py @@ -305,7 +305,7 @@ class AccountController(object): res = getattr(self, req.method)(req) else: res = HTTPMethodNotAllowed() - except: + except Exception: self.logger.exception(_('ERROR __call__ error with %(method)s' ' %(path)s '), {'method': req.method, 'path': req.path}) res = HTTPInternalServerError(body=traceback.format_exc()) diff --git a/swift/auth/server.py b/swift/auth/server.py index 967f853291..2d5f813897 100644 --- a/swift/auth/server.py +++ b/swift/auth/server.py @@ -95,7 +95,7 @@ class AuthController(object): msg = _('No super_admin_key set in conf file! Exiting.') try: self.logger.critical(msg) - except: + except Exception: pass raise ValueError(msg) self.swift_dir = conf.get('swift_dir', '/etc/swift') @@ -148,31 +148,32 @@ class AuthController(object): previous_prefix = '' if '_' in row[0]: previous_prefix = row[0].split('_', 1)[0] - msg = _((''' + msg = (_(''' THERE ARE ACCOUNTS IN YOUR auth.db THAT DO NOT BEGIN WITH YOUR NEW RESELLER -PREFIX OF "%s". +PREFIX OF "%(reseller)s". YOU HAVE A FEW OPTIONS: - 1) RUN "swift-auth-update-reseller-prefixes %s %s", + 1. RUN "swift-auth-update-reseller-prefixes %(db_file)s %(reseller)s", "swift-init auth-server restart", AND "swift-auth-recreate-accounts -K ..." TO CREATE FRESH ACCOUNTS. OR - 2) REMOVE %s, RUN "swift-init auth-server restart", AND RUN + 2. REMOVE %(db_file)s, RUN "swift-init auth-server restart", AND RUN "swift-auth-add-user ..." TO CREATE BRAND NEW ACCOUNTS THAT WAY. OR - 3) ADD "reseller_prefix = %s" (WITHOUT THE QUOTES) TO YOUR + 3. ADD "reseller_prefix = %(previous)s" (WITHOUT THE QUOTES) TO YOUR proxy-server.conf IN THE [filter:auth] SECTION AND TO YOUR auth-server.conf IN THE [app:auth-server] SECTION AND RUN "swift-init proxy-server restart" AND "swift-init auth-server restart" TO REVERT BACK TO YOUR PREVIOUS RESELLER PREFIX. - %s - ''') % (self.reseller_prefix.rstrip('_'), self.db_file, - self.reseller_prefix.rstrip('_'), self.db_file, - previous_prefix, previous_prefix and ' ' or _(''' + %(note)s + ''') % {'reseller': self.reseller_prefix.rstrip('_'), + 'db_file': self.db_file, + 'previous': previous_prefix, + 'note': previous_prefix and ' ' or _(''' SINCE YOUR PREVIOUS RESELLER PREFIX WAS AN EMPTY STRING, IT IS NOT RECOMMENDED TO PERFORM OPTION 3 AS THAT WOULD MAKE SUPPORTING MULTIPLE RESELLERS MORE DIFFICULT. - ''').strip())).strip() + ''').strip()}).strip() self.logger.critical(_('CRITICAL: ') + ' '.join(msg.split())) raise Exception('\n' + msg) @@ -236,24 +237,25 @@ YOU HAVE A FEW OPTIONS: except Exception, err: try: conn.close() - except: + except Exception: pass self.conn = get_db_connection(self.db_file) raise err def validate_s3_sign(self, request, token): - cfaccount, sign = request.headers['Authorization'].split(' ')[-1].split(':') + account, user, sign = \ + request.headers['Authorization'].split(' ')[-1].split(':') msg = base64.urlsafe_b64decode(unquote(token)) rv = False with self.get_conn() as conn: row = conn.execute(''' - SELECT account, user, password FROM account - WHERE cfaccount = ?''', - (cfaccount,)).fetchone() - rv = (84000, row[0], row[1], cfaccount) - + SELECT password, cfaccount FROM account + WHERE account = ? AND user = ?''', + (account, user)).fetchone() + rv = (84000, account, user, row[1]) if rv: - s = base64.encodestring(hmac.new(row[2], msg, sha1).digest()).strip() + s = base64.encodestring(hmac.new(row[0], msg, + sha1).digest()).strip() self.logger.info("orig %s, calc %s" % (sign, s)) if sign != s: rv = False @@ -340,10 +342,14 @@ YOU HAVE A FEW OPTIONS: 'SELECT url FROM account WHERE account = ? AND user = ?', (account, user)).fetchone() if row: - self.logger.info( - _('ALREADY EXISTS create_user(%s, %s, _, %s, %s) [%.02f]') % - (repr(account), repr(user), repr(admin), - repr(reseller_admin), time() - begin)) + self.logger.info(_('ALREADY EXISTS create_user(%(account)s, ' + '%(user)s, _, %(admin)s, %(reseller_admin)s) ' + '[%(elapsed).02f]') % + {'account': repr(account), + 'user': repr(user), + 'admin': repr(admin), + 'reseller_admin': repr(reseller_admin), + 'elapsed': time() - begin}) return 'already exists' row = conn.execute( 'SELECT url, cfaccount FROM account WHERE account = ?', @@ -354,10 +360,14 @@ YOU HAVE A FEW OPTIONS: else: account_hash = self.add_storage_account() if not account_hash: - self.logger.info( - _('FAILED create_user(%s, %s, _, %s, %s) [%.02f]') % - (repr(account), repr(user), repr(admin), - repr(reseller_admin), time() - begin)) + self.logger.info(_('FAILED create_user(%(account)s, ' + '%(user)s, _, %(admin)s, %(reseller_admin)s) ' + '[%(elapsed).02f]') % + {'account': repr(account), + 'user': repr(user), + 'admin': repr(admin), + 'reseller_admin': repr(reseller_admin), + 'elapsed': time() - begin}) return False url = self.default_cluster_url.rstrip('/') + '/' + account_hash conn.execute('''INSERT INTO account @@ -367,10 +377,11 @@ YOU HAVE A FEW OPTIONS: (account, url, account_hash, user, password, admin and 't' or '', reseller_admin and 't' or '')) conn.commit() - self.logger.info( - _('SUCCESS create_user(%s, %s, _, %s, %s) = %s [%.02f]') % - (repr(account), repr(user), repr(admin), repr(reseller_admin), - repr(url), time() - begin)) + self.logger.info(_('SUCCESS create_user(%(account)s, %(user)s, _, ' + '%(admin)s, %(reseller_admin)s) = %(url)s [%(elapsed).02f]') % + {'account': repr(account), 'user': repr(user), + 'admin': repr(admin), 'reseller_admin': repr(reseller_admin), + 'url': repr(url), 'elapsed': time() - begin}) return url def recreate_accounts(self): @@ -435,12 +446,15 @@ YOU HAVE A FEW OPTIONS: :param request: webob.Request object """ try: - _, token = split_path(request.path, minsegs=2) + _junk, token = split_path(request.path, minsegs=2) except ValueError: return HTTPBadRequest() # Retrieves (TTL, account, user, cfaccount) if valid, False otherwise + headers = {} if 'Authorization' in request.headers: validation = self.validate_s3_sign(request, token) + if validation: + headers['X-Auth-Account-Suffix'] = validation[3] else: validation = self.validate_token(token) if not validation: @@ -450,8 +464,9 @@ YOU HAVE A FEW OPTIONS: # admin access to a cfaccount or ".reseller_admin" to access to all # accounts, including creating new ones. groups.append(validation[3]) - return HTTPNoContent(headers={'X-Auth-TTL': validation[0], - 'X-Auth-Groups': ','.join(groups)}) + headers['X-Auth-TTL'] = validation[0] + headers['X-Auth-Groups'] = ','.join(groups) + return HTTPNoContent(headers=headers) def handle_add_user(self, request): """ @@ -474,7 +489,8 @@ YOU HAVE A FEW OPTIONS: :param request: webob.Request object """ try: - _, account_name, user_name = split_path(request.path, minsegs=3) + _junk, account_name, user_name = \ + split_path(request.path, minsegs=3) except ValueError: return HTTPBadRequest() create_reseller_admin = \ @@ -634,7 +650,7 @@ YOU HAVE A FEW OPTIONS: else: return HTTPBadRequest(request=env)(env, start_response) response = handler(req) - except: + except Exception: self.logger.exception( _('ERROR Unhandled exception in ReST request')) return HTTPServiceUnavailable(request=req)(env, start_response) diff --git a/swift/common/bench.py b/swift/common/bench.py index 169497ef13..482c2d77aa 100644 --- a/swift/common/bench.py +++ b/swift/common/bench.py @@ -96,7 +96,7 @@ class Bench(object): self.logger.info(_("CannotSendRequest. Skipping...")) try: hc.close() - except: + except Exception: pass self.failures += 1 hc = self.conn_pool.create() diff --git a/swift/common/bufferedhttp.py b/swift/common/bufferedhttp.py index 4fb090ca92..cf74dbe8fa 100644 --- a/swift/common/bufferedhttp.py +++ b/swift/common/bufferedhttp.py @@ -118,6 +118,8 @@ def http_connect(ipaddr, port, device, partition, method, path, :param ssl: set True if SSL should be used (default: False) :returns: HTTPConnection object """ + if not port: + port = 443 if ssl else 80 if ssl: conn = HTTPSConnection('%s:%s' % (ipaddr, port)) else: @@ -150,6 +152,8 @@ def http_connect_raw(ipaddr, port, method, path, headers=None, :param ssl: set True if SSL should be used (default: False) :returns: HTTPConnection object """ + if not port: + port = 443 if ssl else 80 if ssl: conn = HTTPSConnection('%s:%s' % (ipaddr, port)) else: diff --git a/swift/common/client.py b/swift/common/client.py index e3536e894f..bf402adb76 100644 --- a/swift/common/client.py +++ b/swift/common/client.py @@ -18,22 +18,30 @@ Cloud Files client library used internally """ import socket from cStringIO import StringIO -from httplib import HTTPException, HTTPSConnection +from httplib import HTTPException from re import compile, DOTALL from tokenize import generate_tokens, STRING, NAME, OP from urllib import quote as _quote, unquote from urlparse import urlparse, urlunparse +try: + from eventlet.green.httplib import HTTPSConnection +except ImportError: + from httplib import HTTPSConnection + try: from eventlet import sleep -except: +except ImportError: from time import sleep try: from swift.common.bufferedhttp \ import BufferedHTTPConnection as HTTPConnection -except: - from httplib import HTTPConnection +except ImportError: + try: + from eventlet.green.httplib import HTTPConnection + except ImportError: + from httplib import HTTPConnection def quote(value, safe='/'): @@ -68,7 +76,7 @@ except ImportError: res = [] consts = {'true': True, 'false': False, 'null': None} string = '(' + comments.sub('', string) + ')' - for type, val, _, _, _ in \ + for type, val, _junk, _junk, _junk in \ generate_tokens(StringIO(string).readline): if (type == OP and val not in '[]{}:,()-') or \ (type == NAME and val not in consts): @@ -79,7 +87,7 @@ except ImportError: else: res.append(val) return eval(''.join(res), {}, consts) - except: + except Exception: raise AttributeError() @@ -688,7 +696,7 @@ class Connection(object): """Convenience class to make requests that will also retry the request""" def __init__(self, authurl, user, key, retries=5, preauthurl=None, - preauthtoken=None, snet=False): + preauthtoken=None, snet=False, starting_backoff=1): """ :param authurl: authenitcation URL :param user: user name to authenticate as @@ -708,6 +716,7 @@ class Connection(object): self.token = preauthtoken self.attempts = 0 self.snet = snet + self.starting_backoff = starting_backoff def get_auth(self): return get_auth(self.authurl, self.user, self.key, snet=self.snet) @@ -715,9 +724,9 @@ class Connection(object): def http_connection(self): return http_connection(self.url) - def _retry(self, func, *args, **kwargs): + def _retry(self, reset_func, func, *args, **kwargs): self.attempts = 0 - backoff = 1 + backoff = self.starting_backoff while self.attempts <= self.retries: self.attempts += 1 try: @@ -746,10 +755,12 @@ class Connection(object): raise sleep(backoff) backoff *= 2 + if reset_func: + reset_func(func, *args, **kwargs) def head_account(self): """Wrapper for :func:`head_account`""" - return self._retry(head_account) + return self._retry(None, head_account) def get_account(self, marker=None, limit=None, prefix=None, full_listing=False): @@ -757,16 +768,16 @@ class Connection(object): # TODO(unknown): With full_listing=True this will restart the entire # listing with each retry. Need to make a better version that just # retries where it left off. - return self._retry(get_account, marker=marker, limit=limit, + return self._retry(None, get_account, marker=marker, limit=limit, prefix=prefix, full_listing=full_listing) def post_account(self, headers): """Wrapper for :func:`post_account`""" - return self._retry(post_account, headers) + return self._retry(None, post_account, headers) def head_container(self, container): """Wrapper for :func:`head_container`""" - return self._retry(head_container, container) + return self._retry(None, head_container, container) def get_container(self, container, marker=None, limit=None, prefix=None, delimiter=None, full_listing=False): @@ -774,43 +785,55 @@ class Connection(object): # TODO(unknown): With full_listing=True this will restart the entire # listing with each retry. Need to make a better version that just # retries where it left off. - return self._retry(get_container, container, marker=marker, + return self._retry(None, get_container, container, marker=marker, limit=limit, prefix=prefix, delimiter=delimiter, full_listing=full_listing) def put_container(self, container, headers=None): """Wrapper for :func:`put_container`""" - return self._retry(put_container, container, headers=headers) + return self._retry(None, put_container, container, headers=headers) def post_container(self, container, headers): """Wrapper for :func:`post_container`""" - return self._retry(post_container, container, headers) + return self._retry(None, post_container, container, headers) def delete_container(self, container): """Wrapper for :func:`delete_container`""" - return self._retry(delete_container, container) + return self._retry(None, delete_container, container) def head_object(self, container, obj): """Wrapper for :func:`head_object`""" - return self._retry(head_object, container, obj) + return self._retry(None, head_object, container, obj) def get_object(self, container, obj, resp_chunk_size=None): """Wrapper for :func:`get_object`""" - return self._retry(get_object, container, obj, + return self._retry(None, get_object, container, obj, resp_chunk_size=resp_chunk_size) def put_object(self, container, obj, contents, content_length=None, etag=None, chunk_size=65536, content_type=None, headers=None): """Wrapper for :func:`put_object`""" - return self._retry(put_object, container, obj, contents, + + def _default_reset(*args, **kwargs): + raise ClientException('put_object(%r, %r, ...) failure and no ' + 'ability to reset contents for reupload.' % (container, obj)) + + reset_func = _default_reset + tell = getattr(contents, 'tell', None) + seek = getattr(contents, 'seek', None) + if tell and seek: + orig_pos = tell() + reset_func = lambda *a, **k: seek(orig_pos) + + return self._retry(reset_func, put_object, container, obj, contents, content_length=content_length, etag=etag, chunk_size=chunk_size, content_type=content_type, headers=headers) def post_object(self, container, obj, headers): """Wrapper for :func:`post_object`""" - return self._retry(post_object, container, obj, headers) + return self._retry(None, post_object, container, obj, headers) def delete_object(self, container, obj): """Wrapper for :func:`delete_object`""" - return self._retry(delete_object, container, obj) + return self._retry(None, delete_object, container, obj) diff --git a/swift/common/db.py b/swift/common/db.py index b3c80dbc8c..be96411619 100644 --- a/swift/common/db.py +++ b/swift/common/db.py @@ -269,7 +269,7 @@ class DatabaseBroker(object): yield conn conn.rollback() self.conn = conn - except: + except Exception: conn.close() raise @@ -288,13 +288,13 @@ class DatabaseBroker(object): conn.execute('BEGIN IMMEDIATE') try: yield True - except: + except Exception: pass try: conn.execute('ROLLBACK') conn.isolation_level = orig_isolation_level self.conn = conn - except: # pragma: no cover + except Exception: logging.exception( _('Broker error trying to rollback locked connection')) conn.close() @@ -749,7 +749,7 @@ class ContainerBroker(DatabaseBroker): timestamp, 'size': size, 'content_type': content_type, 'etag': etag, 'deleted': deleted}) - except: + except Exception: self.logger.exception( _('Invalid pending entry %(file)s: %(entry)s'), {'file': self.pending_file, 'entry': entry}) @@ -932,7 +932,7 @@ class ContainerBroker(DatabaseBroker): if not row: return [] max_rowid = row['ROWID'] - for _ in xrange(min(max_count, max_rowid)): + for _junk in xrange(min(max_count, max_rowid)): row = conn.execute(''' SELECT name FROM object WHERE ROWID >= ? AND +deleted = 0 LIMIT 1 @@ -1216,7 +1216,7 @@ class AccountBroker(DatabaseBroker): 'object_count': object_count, 'bytes_used': bytes_used, 'deleted': deleted}) - except: + except Exception: self.logger.exception( _('Invalid pending entry %(file)s: %(entry)s'), {'file': self.pending_file, 'entry': entry}) @@ -1435,7 +1435,7 @@ class AccountBroker(DatabaseBroker): if not row: return [] max_rowid = row['ROWID'] - for _ in xrange(min(max_count, max_rowid)): + for _junk in xrange(min(max_count, max_rowid)): row = conn.execute(''' SELECT name FROM container WHERE ROWID >= ? AND +deleted = 0 diff --git a/swift/common/db_replicator.py b/swift/common/db_replicator.py index 4c479a0ed0..49756f1f7b 100644 --- a/swift/common/db_replicator.py +++ b/swift/common/db_replicator.py @@ -21,7 +21,7 @@ import math import time import shutil -from eventlet import GreenPool, sleep, Timeout +from eventlet import GreenPool, sleep, Timeout, TimeoutError from eventlet.green import subprocess import simplejson from webob import Response @@ -79,7 +79,7 @@ class ReplConnection(BufferedHTTPConnection): response = self.getresponse() response.data = response.read() return response - except: + except Exception: self.logger.exception( _('ERROR reading HTTP response from %s'), self.node) return None @@ -359,7 +359,7 @@ class Replicator(Daemon): except DriveNotMounted: repl_nodes.append(more_nodes.next()) self.logger.error(_('ERROR Remote drive not mounted %s'), node) - except: + except (Exception, TimeoutError): self.logger.exception(_('ERROR syncing %(file)s with node' ' %(node)s'), {'file': object_file, 'node': node}) self.stats['success' if success else 'failure'] += 1 @@ -432,7 +432,7 @@ class Replicator(Daemon): while True: try: self.run_once() - except: + except (Exception, TimeoutError): self.logger.exception(_('ERROR trying to replicate')) sleep(self.run_pause) diff --git a/swift/common/middleware/auth.py b/swift/common/middleware/auth.py index 59cf83ddba..a51788f7b7 100644 --- a/swift/common/middleware/auth.py +++ b/swift/common/middleware/auth.py @@ -59,8 +59,8 @@ class DevAuth(object): if s3 or (token and token.startswith(self.reseller_prefix)): # Note: Empty reseller_prefix will match all tokens. # Attempt to auth my token with my auth server - groups = \ - self.get_groups(env, token, memcache_client=cache_from_env(env)) + groups = self.get_groups(env, token, + memcache_client=cache_from_env(env)) if groups: env['REMOTE_USER'] = groups user = groups and groups.split(',', 1)[0] or '' @@ -134,8 +134,7 @@ class DevAuth(object): headers = {} if env.get('HTTP_AUTHORIZATION'): groups = None - if env.get('HTTP_AUTHORIZATION'): - headers["Authorization"] = env.get('HTTP_AUTHORIZATION') + headers["Authorization"] = env.get('HTTP_AUTHORIZATION') if not groups: with Timeout(self.timeout): @@ -153,6 +152,15 @@ class DevAuth(object): if memcache_client: memcache_client.set(key, (time(), expiration, groups), timeout=expiration) + + if env.get('HTTP_AUTHORIZATION'): + account, user, sign = \ + env['HTTP_AUTHORIZATION'].split(' ')[-1].split(':') + cfaccount = resp.getheader('x-auth-account-suffix') + path = env['PATH_INFO'] + env['PATH_INFO'] = \ + path.replace("%s:%s" % (account, user), cfaccount, 1) + return groups def authorize(self, req): diff --git a/swift/common/middleware/cname_lookup.py b/swift/common/middleware/cname_lookup.py index e48d209e54..f13155c1fe 100644 --- a/swift/common/middleware/cname_lookup.py +++ b/swift/common/middleware/cname_lookup.py @@ -17,6 +17,7 @@ from webob import Request from webob.exc import HTTPBadRequest import dns.resolver from dns.exception import DNSException +from dns.resolver import NXDOMAIN, NoAnswer from swift.common.utils import cache_from_env, get_logger @@ -34,7 +35,7 @@ def lookup_cname(domain): # pragma: no cover result = answer.items[0].to_text() result = result.rstrip('.') return ttl, result - except DNSException: + except (DNSException, NXDOMAIN, NoAnswer): return 0, None diff --git a/swift/common/middleware/domain_remap.py b/swift/common/middleware/domain_remap.py index 4812182587..a6ed943bb2 100644 --- a/swift/common/middleware/domain_remap.py +++ b/swift/common/middleware/domain_remap.py @@ -27,6 +27,24 @@ class DomainRemapMiddleware(object): account.storageurl/path_root/container/object gets translated to account.storageurl/path_root/account/container/object + + Browsers can convert a host header to lowercase, so check that reseller + prefix on the account is the correct case. This is done by comparing the + items in the reseller_prefixes config option to the found prefix. If they + match except for case, the item from reseller_prefixes will be used + instead of the found reseller prefix. The reseller_prefixes list is + exclusive. If defined, any request with an account prefix not in that list + will be ignored by this middleware. reseller_prefixes defaults to 'AUTH'. + + Note that this middleware requires that container names and account names + (except as described above) must be DNS-compatible. This means that the + account name created in the system and the containers created by users + cannot exceed 63 characters or have UTF-8 characters. These are + restrictions over and above what swift requires and are not explicitly + checked. Simply put, the this middleware will do a best-effort attempt to + derive account and container names from elements in the domain name and + put those derived values into the URL path (leaving the Host header + unchanged). """ def __init__(self, app, conf): @@ -35,6 +53,11 @@ class DomainRemapMiddleware(object): if self.storage_domain and self.storage_domain[0] != '.': self.storage_domain = '.' + self.storage_domain self.path_root = conf.get('path_root', 'v1').strip('/') + prefixes = conf.get('reseller_prefixes', 'AUTH') + self.reseller_prefixes = [x.strip() for x in prefixes.split(',') + if x.strip()] + self.reseller_prefixes_lower = [x.lower() + for x in self.reseller_prefixes] def __call__(self, env, start_response): if not self.storage_domain: @@ -58,6 +81,16 @@ class DomainRemapMiddleware(object): return resp(env, start_response) if '_' not in account and '-' in account: account = account.replace('-', '_', 1) + account_reseller_prefix = account.split('_', 1)[0].lower() + if account_reseller_prefix not in self.reseller_prefixes_lower: + # account prefix is not in config list. bail. + return self.app(env, start_response) + prefix_index = self.reseller_prefixes_lower.index( + account_reseller_prefix) + real_prefix = self.reseller_prefixes[prefix_index] + if not account.startswith(real_prefix): + account_suffix = account[len(real_prefix):] + account = real_prefix + account_suffix path = env['PATH_INFO'].strip('/') new_path_parts = ['', self.path_root, account] if container: diff --git a/swift/common/middleware/ratelimit.py b/swift/common/middleware/ratelimit.py index c0827da88b..4657b6abcd 100644 --- a/swift/common/middleware/ratelimit.py +++ b/swift/common/middleware/ratelimit.py @@ -20,7 +20,7 @@ from swift.common.utils import split_path, cache_from_env, get_logger from swift.proxy.server import get_container_memcache_key -class MaxSleepTimeHit(Exception): +class MaxSleepTimeHitError(Exception): pass @@ -32,6 +32,8 @@ class RateLimitMiddleware(object): configurable. """ + BLACK_LIST_SLEEP = 1 + def __init__(self, app, conf, logger=None): self.app = app if logger: @@ -39,17 +41,16 @@ class RateLimitMiddleware(object): else: self.logger = get_logger(conf) self.account_ratelimit = float(conf.get('account_ratelimit', 0)) - self.max_sleep_time_seconds = float(conf.get('max_sleep_time_seconds', - 60)) - self.log_sleep_time_seconds = float(conf.get('log_sleep_time_seconds', - 0)) + self.max_sleep_time_seconds = \ + float(conf.get('max_sleep_time_seconds', 60)) + self.log_sleep_time_seconds = \ + float(conf.get('log_sleep_time_seconds', 0)) self.clock_accuracy = int(conf.get('clock_accuracy', 1000)) + self.rate_buffer_seconds = int(conf.get('rate_buffer_seconds', 5)) self.ratelimit_whitelist = [acc.strip() for acc in - conf.get('account_whitelist', '').split(',') - if acc.strip()] + conf.get('account_whitelist', '').split(',') if acc.strip()] self.ratelimit_blacklist = [acc.strip() for acc in - conf.get('account_blacklist', '').split(',') - if acc.strip()] + conf.get('account_blacklist', '').split(',') if acc.strip()] self.memcache_client = None conf_limits = [] for conf_key in conf.keys(): @@ -92,8 +93,7 @@ class RateLimitMiddleware(object): return None def get_ratelimitable_key_tuples(self, req_method, account_name, - container_name=None, - obj_name=None): + container_name=None, obj_name=None): """ Returns a list of key (used in memcache), ratelimit tuples. Keys should be checked in order. @@ -105,19 +105,20 @@ class RateLimitMiddleware(object): """ keys = [] if self.account_ratelimit and account_name and ( - not (container_name or obj_name) or - (container_name and not obj_name and req_method == 'PUT')): + not (container_name or obj_name) or + (container_name and not obj_name and + req_method in ('PUT', 'DELETE'))): keys.append(("ratelimit/%s" % account_name, self.account_ratelimit)) if account_name and container_name and ( - (not obj_name and req_method in ('GET', 'HEAD')) or - (obj_name and req_method in ('PUT', 'DELETE'))): + (not obj_name and req_method in ('GET', 'HEAD')) or + (obj_name and req_method in ('PUT', 'DELETE'))): container_size = None memcache_key = get_container_memcache_key(account_name, container_name) container_info = self.memcache_client.get(memcache_key) - if type(container_info) == dict: + if isinstance(container_info, dict): container_size = container_info.get('container_size', 0) container_rate = self.get_container_maxrate(container_size) if container_rate: @@ -129,31 +130,32 @@ class RateLimitMiddleware(object): def _get_sleep_time(self, key, max_rate): ''' Returns the amount of time (a float in seconds) that the app - should sleep. Throws a MaxSleepTimeHit exception if maximum - sleep time is exceeded. + should sleep. :param key: a memcache key :param max_rate: maximum rate allowed in requests per second + :raises: MaxSleepTimeHitError if max sleep time is exceeded. ''' now_m = int(round(time.time() * self.clock_accuracy)) time_per_request_m = int(round(self.clock_accuracy / max_rate)) running_time_m = self.memcache_client.incr(key, delta=time_per_request_m) need_to_sleep_m = 0 - request_time_limit = now_m + (time_per_request_m * max_rate) - if running_time_m < now_m: + if (now_m - running_time_m > + self.rate_buffer_seconds * self.clock_accuracy): next_avail_time = int(now_m + time_per_request_m) self.memcache_client.set(key, str(next_avail_time), serialize=False) - elif running_time_m - now_m - time_per_request_m > 0: - need_to_sleep_m = running_time_m - now_m - time_per_request_m + else: + need_to_sleep_m = \ + max(running_time_m - now_m - time_per_request_m, 0) max_sleep_m = self.max_sleep_time_seconds * self.clock_accuracy if max_sleep_m - need_to_sleep_m <= self.clock_accuracy * 0.01: # treat as no-op decrement time self.memcache_client.decr(key, delta=time_per_request_m) - raise MaxSleepTimeHit("Max Sleep Time Exceeded: %s" % - need_to_sleep_m) + raise MaxSleepTimeHitError("Max Sleep Time Exceeded: %s" % + need_to_sleep_m) return float(need_to_sleep_m) / self.clock_accuracy @@ -168,26 +170,25 @@ class RateLimitMiddleware(object): ''' if account_name in self.ratelimit_blacklist: self.logger.error(_('Returning 497 because of blacklisting')) + eventlet.sleep(self.BLACK_LIST_SLEEP) return Response(status='497 Blacklisted', body='Your account has been blacklisted', request=req) if account_name in self.ratelimit_whitelist: return None for key, max_rate in self.get_ratelimitable_key_tuples( - req.method, - account_name, - container_name=container_name, - obj_name=obj_name): + req.method, account_name, container_name=container_name, + obj_name=obj_name): try: need_to_sleep = self._get_sleep_time(key, max_rate) if self.log_sleep_time_seconds and \ need_to_sleep > self.log_sleep_time_seconds: - self.logger.info(_("Ratelimit sleep log: %(sleep)s for " + self.logger.warning(_("Ratelimit sleep log: %(sleep)s for " "%(account)s/%(container)s/%(object)s"), {'sleep': need_to_sleep, 'account': account_name, 'container': container_name, 'object': obj_name}) if need_to_sleep > 0: eventlet.sleep(need_to_sleep) - except MaxSleepTimeHit, e: + except MaxSleepTimeHitError, e: self.logger.error(_('Returning 498 because of ops rate ' 'limiting (Max Sleep) %s') % str(e)) error_resp = Response(status='498 Rate Limited', diff --git a/swift/common/middleware/swauth.py b/swift/common/middleware/swauth.py index 568b00fb35..d5a9a61cc6 100644 --- a/swift/common/middleware/swauth.py +++ b/swift/common/middleware/swauth.py @@ -22,8 +22,12 @@ from time import gmtime, strftime, time from traceback import format_exc from urllib import quote, unquote from uuid import uuid4 +from hashlib import md5, sha1 +import hmac +import base64 from eventlet.timeout import Timeout +from eventlet import TimeoutError from webob import Response, Request from webob.exc import HTTPAccepted, HTTPBadRequest, HTTPConflict, \ HTTPCreated, HTTPForbidden, HTTPNoContent, HTTPNotFound, \ @@ -62,22 +66,21 @@ class Swauth(object): self.default_swift_cluster = conf.get('default_swift_cluster', 'local#http://127.0.0.1:8080/v1') # This setting is a little messy because of the options it has to - # provide. The basic format is cluster_name:url, such as the default - # value of local#http://127.0.0.1:8080/v1. But, often the url given to - # the user needs to be different than the url used by Swauth to - # create/delete accounts. So there's a more complex format of - # cluster_name::url::url, such as - # local##https://public.com:8080/v1##http://private.com:8080/v1. - # The double colon is what sets the two apart. - if '##' in self.default_swift_cluster: - self.dsc_name, self.dsc_url, self.dsc_url2 = \ - self.default_swift_cluster.split('##', 2) - self.dsc_url = self.dsc_url.rstrip('/') - self.dsc_url2 = self.dsc_url2.rstrip('/') + # provide. The basic format is cluster_name#url, such as the default + # value of local#http://127.0.0.1:8080/v1. + # If the URL given to the user needs to differ from the url used by + # Swauth to create/delete accounts, there's a more complex format: + # cluster_name#url#url, such as + # local#https://public.com:8080/v1#http://private.com:8080/v1. + cluster_parts = self.default_swift_cluster.split('#', 2) + self.dsc_name = cluster_parts[0] + if len(cluster_parts) == 3: + self.dsc_url = cluster_parts[1].rstrip('/') + self.dsc_url2 = cluster_parts[2].rstrip('/') + elif len(cluster_parts) == 2: + self.dsc_url = self.dsc_url2 = cluster_parts[1].rstrip('/') else: - self.dsc_name, self.dsc_url = \ - self.default_swift_cluster.split('#', 1) - self.dsc_url = self.dsc_url2 = self.dsc_url.rstrip('/') + raise Exception('Invalid cluster format') self.dsc_parsed = urlparse(self.dsc_url) if self.dsc_parsed.scheme not in ('http', 'https'): raise Exception('Cannot handle protocol scheme %s for url %s' % @@ -123,8 +126,9 @@ class Swauth(object): env['HTTP_X_CF_TRANS_ID'] = 'tx' + str(uuid4()) if env.get('PATH_INFO', '').startswith(self.auth_prefix): return self.handle(env, start_response) + s3 = env.get('HTTP_AUTHORIZATION') token = env.get('HTTP_X_AUTH_TOKEN', env.get('HTTP_X_STORAGE_TOKEN')) - if token and token.startswith(self.reseller_prefix): + if s3 or (token and token.startswith(self.reseller_prefix)): # Note: Empty reseller_prefix will match all tokens. groups = self.get_groups(env, token) if groups: @@ -132,7 +136,8 @@ class Swauth(object): user = groups and groups.split(',', 1)[0] or '' # We know the proxy logs the token, so we augment it just a bit # to also log the authenticated user. - env['HTTP_X_AUTH_TOKEN'] = '%s,%s' % (user, token) + env['HTTP_X_AUTH_TOKEN'] = \ + '%s,%s' % (user, 's3' if s3 else token) env['swift.authorize'] = self.authorize env['swift.clean_acl'] = clean_acl else: @@ -192,6 +197,43 @@ class Swauth(object): expires, groups = cached_auth_data if expires < time(): groups = None + + if env.get('HTTP_AUTHORIZATION'): + account = env['HTTP_AUTHORIZATION'].split(' ')[1] + account, user, sign = account.split(':') + path = quote('/v1/%s/%s/%s' % (self.auth_account, account, user)) + resp = self.make_request(env, 'GET', path).get_response(self.app) + if resp.status_int // 100 != 2: + return None + + if 'x-object-meta-account-id' in resp.headers: + account_id = resp.headers['x-object-meta-account-id'] + else: + path = quote('/v1/%s/%s' % (self.auth_account, account)) + resp2 = self.make_request(env, 'HEAD', + path).get_response(self.app) + if resp2.status_int // 100 != 2: + return None + account_id = resp2.headers['x-container-meta-account-id'] + + path = env['PATH_INFO'] + env['PATH_INFO'] = path.replace("%s:%s" % (account, user), + account_id, 1) + detail = json.loads(resp.body) + + password = detail['auth'].split(':')[-1] + msg = base64.urlsafe_b64decode(unquote(token)) + s = base64.encodestring(hmac.new(detail['auth'].split(':')[-1], + msg, sha1).digest()).strip() + if s != sign: + return None + groups = [g['name'] for g in detail['groups']] + if '.admin' in groups: + groups.remove('.admin') + groups.append(account_id) + groups = ','.join(groups) + return groups + if not groups: path = quote('/v1/%s/.token_%s/%s' % (self.auth_account, token[-1], token)) @@ -283,7 +325,7 @@ class Swauth(object): response = self.handle_request(req)(env, start_response) self.posthooklogger(env, req) return response - except: + except (Exception, TimeoutError): print "EXCEPTION IN handle: %s: %s" % (format_exc(), env) start_response('500 Server Error', [('Content-Type', 'text/plain')]) @@ -299,8 +341,8 @@ class Swauth(object): req.start_time = time() handler = None try: - version, account, user, _ = split_path(req.path_info, minsegs=1, - maxsegs=4, rest_with_last=True) + version, account, user, _junk = split_path(req.path_info, + minsegs=1, maxsegs=4, rest_with_last=True) except ValueError: return HTTPNotFound(request=req) if version in ('v1', 'v1.0', 'auth'): @@ -589,7 +631,7 @@ class Swauth(object): if resp.status // 100 != 2: raise Exception('Could not create account on the Swift ' 'cluster: %s %s %s' % (path, resp.status, resp.reason)) - except: + except (Exception, TimeoutError): self.logger.error(_('ERROR: Exception while trying to communicate ' 'with %(scheme)s://%(host)s:%(port)s/%(path)s'), {'scheme': self.dsc_parsed2.scheme, @@ -839,6 +881,15 @@ class Swauth(object): return HTTPForbidden(request=req) elif not self.is_account_admin(req, account): return HTTPForbidden(request=req) + + path = quote('/v1/%s/%s' % (self.auth_account, account)) + resp = self.make_request(req.environ, 'HEAD', + path).get_response(self.app) + if resp.status_int // 100 != 2: + raise Exception('Could not retrieve account id value: %s %s' % + (path, resp.status)) + headers = {'X-Object-Meta-Account-Id': + resp.headers['x-container-meta-account-id']} # Create the object in the main auth account (this object represents # the user) path = quote('/v1/%s/%s/%s' % (self.auth_account, account, user)) @@ -847,9 +898,10 @@ class Swauth(object): groups.append('.admin') if reseller_admin: groups.append('.reseller_admin') - resp = self.make_request(req.environ, 'PUT', path, json.dumps({'auth': - 'plaintext:%s' % key, - 'groups': [{'name': g} for g in groups]})).get_response(self.app) + resp = self.make_request(req.environ, 'PUT', path, + json.dumps({'auth': 'plaintext:%s' % key, + 'groups': [{'name': g} for g in groups]}), + headers=headers).get_response(self.app) if resp.status_int == 404: return HTTPNotFound(request=req) if resp.status_int // 100 != 2: diff --git a/swift/common/middleware/swift3.py b/swift/common/middleware/swift3.py index 85f03902ac..26626375bc 100644 --- a/swift/common/middleware/swift3.py +++ b/swift/common/middleware/swift3.py @@ -32,8 +32,8 @@ To add this middleware to your configuration, add the swift3 middleware in front of the auth middleware, and before any other middleware that look at swift requests (like rate limiting). -To set up your client, the access key will be the account string that -should look like AUTH_d305e9dbedbc47df8b25ab46f3152f81, and the +To set up your client, the access key will be the concatenation of the +account and user strings that should look like test:tester, and the secret access key is the account password. The host should also point to the swift storage hostname. It also will have to use the old style calling format, and not the hostname based container format. @@ -42,7 +42,7 @@ An example client using the python boto library might look like the following for an SAIO setup:: connection = boto.s3.Connection( - aws_access_key_id='AUTH_d305e9dbedbc47df8b25ab46f3152f81', + aws_access_key_id='test:tester', aws_secret_access_key='testing', port=8080, host='127.0.0.1', @@ -139,11 +139,9 @@ class ServiceController(Controller): return get_err_response('InvalidURI') containers = loads(''.join(list(body_iter))) - resp = Response(content_type='text/xml') - resp.status = 200 # we don't keep the creation time of a backet (s3cmd doesn't # work without that) so we use something bogus. - resp.body = '' \ + body = '' \ '' \ '%s' \ @@ -151,6 +149,7 @@ class ServiceController(Controller): % ("".join(['%s' \ '2009-02-03T16:45:09.000Z' % xml_escape(i['name']) for i in containers])) + resp = Response(status=200, content_type='text/xml', body=body) return resp @@ -400,11 +399,12 @@ class Swift3Middleware(object): h += header.lower() + ":" + str(req.headers[header]) + "\n" h += req.path try: - account, _ = req.headers['Authorization'].split(' ')[-1].split(':') - except: + account, user, _junk = \ + req.headers['Authorization'].split(' ')[-1].split(':') + except Exception: return None, None token = base64.urlsafe_b64encode(h) - return account, token + return '%s:%s' % (account, user), token def __call__(self, env, start_response): req = Request(env) diff --git a/swift/common/ring/builder.py b/swift/common/ring/builder.py index 5b66b8a9bf..3f728e307a 100644 --- a/swift/common/ring/builder.py +++ b/swift/common/ring/builder.py @@ -239,7 +239,7 @@ class RingBuilder(object): (sum(d['parts'] for d in self.devs if d is not None), self.parts * self.replicas)) if stats: - dev_usage = array('I', (0 for _ in xrange(len(self.devs)))) + dev_usage = array('I', (0 for _junk in xrange(len(self.devs)))) for part in xrange(self.parts): zones = {} for replica in xrange(self.replicas): @@ -342,8 +342,9 @@ class RingBuilder(object): '%08x.%04x' % (dev['parts_wanted'], randint(0, 0xffff)) available_devs = sorted((d for d in self.devs if d is not None), key=lambda x: x['sort_key']) - self._replica2part2dev = [array('H') for _ in xrange(self.replicas)] - for _ in xrange(self.parts): + self._replica2part2dev = \ + [array('H') for _junk in xrange(self.replicas)] + for _junk in xrange(self.parts): other_zones = array('H') for replica in xrange(self.replicas): index = len(available_devs) - 1 @@ -365,7 +366,7 @@ class RingBuilder(object): index = mid + 1 available_devs.insert(index, dev) other_zones.append(dev['zone']) - self._last_part_moves = array('B', (0 for _ in xrange(self.parts))) + self._last_part_moves = array('B', (0 for _junk in xrange(self.parts))) self._last_part_moves_epoch = int(time()) for dev in self.devs: del dev['sort_key'] diff --git a/swift/common/utils.py b/swift/common/utils.py index 05b15e99fa..ee24e52240 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -383,7 +383,7 @@ class NamedFormatter(logging.Formatter): return msg -def get_logger(conf, name=None, log_to_console=False): +def get_logger(conf, name=None, log_to_console=False, log_route=None): """ Get the current system logger using config settings. @@ -397,33 +397,41 @@ def get_logger(conf, name=None, log_to_console=False): :param name: Name of the logger :param log_to_console: Add handler which writes to console on stderr """ - root_logger = logging.getLogger() - if hasattr(get_logger, 'handler') and get_logger.handler: - root_logger.removeHandler(get_logger.handler) - get_logger.handler.close() - get_logger.handler = None + if not conf: + conf = {} + if not hasattr(get_logger, 'root_logger_configured'): + get_logger.root_logger_configured = True + get_logger(conf, name, log_to_console, log_route='root') + if name is None: + name = conf.get('log_name', 'swift') + if not log_route: + log_route = name + if log_route == 'root': + logger = logging.getLogger() + else: + logger = logging.getLogger(log_route) + if not hasattr(get_logger, 'handlers'): + get_logger.handlers = {} + facility = getattr(SysLogHandler, conf.get('log_facility', 'LOG_LOCAL0'), + SysLogHandler.LOG_LOCAL0) + if facility in get_logger.handlers: + logger.removeHandler(get_logger.handlers[facility]) + get_logger.handlers[facility].close() + del get_logger.handlers[facility] if log_to_console: # check if a previous call to get_logger already added a console logger if hasattr(get_logger, 'console') and get_logger.console: - root_logger.removeHandler(get_logger.console) + logger.removeHandler(get_logger.console) get_logger.console = logging.StreamHandler(sys.__stderr__) - root_logger.addHandler(get_logger.console) - if conf is None: - root_logger.setLevel(logging.INFO) - adapted_logger = LogAdapter(root_logger) - return adapted_logger - if name is None: - name = conf.get('log_name', 'swift') - get_logger.handler = SysLogHandler(address='/dev/log', - facility=getattr(SysLogHandler, - conf.get('log_facility', 'LOG_LOCAL0'), - SysLogHandler.LOG_LOCAL0)) - root_logger.addHandler(get_logger.handler) - root_logger.setLevel( + logger.addHandler(get_logger.console) + get_logger.handlers[facility] = \ + SysLogHandler(address='/dev/log', facility=facility) + logger.addHandler(get_logger.handlers[facility]) + logger.setLevel( getattr(logging, conf.get('log_level', 'INFO').upper(), logging.INFO)) - adapted_logger = LogAdapter(root_logger) + adapted_logger = LogAdapter(logger) formatter = NamedFormatter(name, adapted_logger) - get_logger.handler.setFormatter(formatter) + get_logger.handlers[facility].setFormatter(formatter) if hasattr(get_logger, 'console'): get_logger.console.setFormatter(formatter) return adapted_logger @@ -821,7 +829,7 @@ def audit_location_generator(devices, datadir, mount_check=True, logger=None): yield path, device, partition -def ratelimit_sleep(running_time, max_rate, incr_by=1): +def ratelimit_sleep(running_time, max_rate, incr_by=1, rate_buffer=5): ''' Will eventlet.sleep() for the appropriate time so that the max_rate is never exceeded. If max_rate is 0, will not ratelimit. The @@ -835,13 +843,17 @@ def ratelimit_sleep(running_time, max_rate, incr_by=1): :param incr_by: How much to increment the counter. Useful if you want to ratelimit 1024 bytes/sec and have differing sizes of requests. Must be >= 0. + :param rate_buffer: Number of seconds the rate counter can drop and be + allowed to catch up (at a faster than listed rate). + A larger number will result in larger spikes in rate + but better average accuracy. ''' if not max_rate or incr_by <= 0: return running_time clock_accuracy = 1000.0 now = time.time() * clock_accuracy time_per_request = clock_accuracy * (float(incr_by) / max_rate) - if running_time < now: + if now - running_time > rate_buffer * clock_accuracy: running_time = now elif running_time - now > time_per_request: eventlet.sleep((running_time - now) / clock_accuracy) diff --git a/swift/container/server.py b/swift/container/server.py index 1ffba8a909..c43e369cb8 100644 --- a/swift/container/server.py +++ b/swift/container/server.py @@ -24,6 +24,7 @@ from datetime import datetime import simplejson from eventlet.timeout import Timeout +from eventlet import TimeoutError from webob import Request, Response from webob.exc import HTTPAccepted, HTTPBadRequest, HTTPConflict, \ HTTPCreated, HTTPInternalServerError, HTTPNoContent, \ @@ -118,7 +119,7 @@ class ContainerController(object): 'device': account_device, 'status': account_response.status, 'reason': account_response.reason}) - except: + except (Exception, TimeoutError): self.logger.exception(_('ERROR account update failed with ' '%(ip)s:%(port)s/%(device)s (will retry later)'), {'ip': account_ip, 'port': account_port, @@ -393,7 +394,7 @@ class ContainerController(object): res = getattr(self, req.method)(req) else: res = HTTPMethodNotAllowed() - except: + except Exception: self.logger.exception(_('ERROR __call__ error with %(method)s' ' %(path)s '), {'method': req.method, 'path': req.path}) res = HTTPInternalServerError(body=traceback.format_exc()) diff --git a/swift/container/updater.py b/swift/container/updater.py index d6b1beb2b1..883dd17101 100644 --- a/swift/container/updater.py +++ b/swift/container/updater.py @@ -19,8 +19,9 @@ import signal import sys import time from random import random, shuffle +from tempfile import mkstemp -from eventlet import spawn, patcher, Timeout +from eventlet import spawn, patcher, Timeout, TimeoutError from swift.container.server import DATADIR from swift.common.bufferedhttp import http_connect @@ -51,6 +52,10 @@ class ContainerUpdater(Daemon): self.no_changes = 0 self.successes = 0 self.failures = 0 + self.account_suppressions = {} + self.account_suppression_time = \ + float(conf.get('account_suppression_time', 60)) + self.new_account_suppressions = None def get_account_ring(self): """Get the account ring. Load it if it hasn't been yet.""" @@ -80,6 +85,19 @@ class ContainerUpdater(Daemon): shuffle(paths) return paths + def _load_suppressions(self, filename): + try: + with open(filename, 'r') as tmpfile: + for line in tmpfile: + account, until = line.split() + until = float(until) + self.account_suppressions[account] = until + except Exception: + self.logger.exception( + _('ERROR with loading suppressions from %s: ') % filename) + finally: + os.unlink(filename) + def run_forever(self): # pragma: no cover """ Run the updator continuously. @@ -88,21 +106,33 @@ class ContainerUpdater(Daemon): while True: self.logger.info(_('Begin container update sweep')) begin = time.time() - pids = [] + now = time.time() + expired_suppressions = \ + [a for a, u in self.account_suppressions.iteritems() if u < now] + for account in expired_suppressions: + del self.account_suppressions[account] + pid2filename = {} # read from account ring to ensure it's fresh self.get_account_ring().get_nodes('') for path in self.get_paths(): - while len(pids) >= self.concurrency: - pids.remove(os.wait()[0]) + while len(pid2filename) >= self.concurrency: + pid = os.wait()[0] + try: + self._load_suppressions(pid2filename[pid]) + finally: + del pid2filename[pid] + fd, tmpfilename = mkstemp() + os.close(fd) pid = os.fork() if pid: - pids.append(pid) + pid2filename[pid] = tmpfilename else: signal.signal(signal.SIGTERM, signal.SIG_DFL) patcher.monkey_patch(all=False, socket=True) self.no_changes = 0 self.successes = 0 self.failures = 0 + self.new_account_suppressions = open(tmpfilename, 'w') forkbegin = time.time() self.container_sweep(path) elapsed = time.time() - forkbegin @@ -114,8 +144,12 @@ class ContainerUpdater(Daemon): 'success': self.successes, 'fail': self.failures, 'no_change': self.no_changes}) sys.exit() - while pids: - pids.remove(os.wait()[0]) + while pid2filename: + pid = os.wait()[0] + try: + self._load_suppressions(pid2filename[pid]) + finally: + del pid2filename[pid] elapsed = time.time() - begin self.logger.info(_('Container update sweep completed: %.02fs'), elapsed) @@ -165,6 +199,8 @@ class ContainerUpdater(Daemon): # definitely doesn't have up to date statistics. if float(info['put_timestamp']) <= 0: return + if self.account_suppressions.get(info['account'], 0) > time.time(): + return if info['put_timestamp'] > info['reported_put_timestamp'] or \ info['delete_timestamp'] > info['reported_delete_timestamp'] \ or info['object_count'] != info['reported_object_count'] or \ @@ -195,6 +231,11 @@ class ContainerUpdater(Daemon): self.logger.debug( _('Update report failed for %(container)s %(dbfile)s'), {'container': container, 'dbfile': dbfile}) + self.account_suppressions[info['account']] = until = \ + time.time() + self.account_suppression_time + if self.new_account_suppressions: + print >>self.new_account_suppressions, \ + info['account'], until else: self.no_changes += 1 @@ -221,7 +262,7 @@ class ContainerUpdater(Daemon): 'X-Object-Count': count, 'X-Bytes-Used': bytes, 'X-Account-Override-Deleted': 'yes'}) - except: + except (Exception, TimeoutError): self.logger.exception(_('ERROR account update failed with ' '%(ip)s:%(port)s/%(device)s (will retry later): '), node) return 500 @@ -230,7 +271,7 @@ class ContainerUpdater(Daemon): resp = conn.getresponse() resp.read() return resp.status - except: + except (Exception, TimeoutError): if self.logger.getEffectiveLevel() <= logging.DEBUG: self.logger.exception( _('Exception with %(ip)s:%(port)s/%(device)s'), node) diff --git a/swift/obj/auditor.py b/swift/obj/auditor.py index 62fc747e86..09fdd77774 100644 --- a/swift/obj/auditor.py +++ b/swift/obj/auditor.py @@ -38,6 +38,7 @@ class ObjectAuditor(Daemon): self.max_files_per_second = float(conf.get('files_per_second', 20)) self.max_bytes_per_second = float(conf.get('bytes_per_second', 10000000)) + self.log_time = int(conf.get('log_time', 3600)) self.files_running_time = 0 self.bytes_running_time = 0 self.bytes_processed = 0 @@ -46,7 +47,6 @@ class ObjectAuditor(Daemon): self.passes = 0 self.quarantines = 0 self.errors = 0 - self.log_time = 3600 # once an hour def run_forever(self): """Run the object audit until stopped.""" diff --git a/swift/obj/replicator.py b/swift/obj/replicator.py index ec76fb384c..dcfcb926f9 100644 --- a/swift/obj/replicator.py +++ b/swift/obj/replicator.py @@ -223,10 +223,10 @@ class ObjectReplicator(Daemon): self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7)) self.partition_times = [] self.run_pause = int(conf.get('run_pause', 30)) - self.rsync_timeout = int(conf.get('rsync_timeout', 300)) - self.rsync_io_timeout = conf.get('rsync_io_timeout', '10') + self.rsync_timeout = int(conf.get('rsync_timeout', 900)) + self.rsync_io_timeout = conf.get('rsync_io_timeout', '30') self.http_timeout = int(conf.get('http_timeout', 60)) - self.lockup_timeout = int(conf.get('lockup_timeout', 900)) + self.lockup_timeout = int(conf.get('lockup_timeout', 1800)) def _rsync(self, args): """ @@ -252,7 +252,10 @@ class ObjectReplicator(Daemon): continue if result.startswith('cd+'): continue - self.logger.info(result) + if not ret_val: + self.logger.info(result) + else: + self.logger.error(result) if ret_val: self.logger.error(_('Bad rsync return code: %(args)s -> %(ret)d'), {'args': str(args), 'ret': ret_val}) @@ -407,7 +410,8 @@ class ObjectReplicator(Daemon): conn.getresponse().read() self.suffix_sync += len(suffixes) except (Exception, Timeout): - self.logger.exception(_("Error syncing with node: %s") % node) + self.logger.exception(_("Error syncing with node: %s") % + node) self.suffix_count += len(local_hash) except (Exception, Timeout): self.logger.exception(_("Error syncing partition")) diff --git a/swift/obj/server.py b/swift/obj/server.py index f20b40d57a..5ca37a40b6 100644 --- a/swift/obj/server.py +++ b/swift/obj/server.py @@ -33,7 +33,7 @@ from webob.exc import HTTPAccepted, HTTPBadRequest, HTTPCreated, \ HTTPNotModified, HTTPPreconditionFailed, \ HTTPRequestTimeout, HTTPUnprocessableEntity, HTTPMethodNotAllowed from xattr import getxattr, setxattr -from eventlet import sleep, Timeout, tpool +from eventlet import sleep, Timeout, TimeoutError, tpool from swift.common.utils import mkdirs, normalize_timestamp, \ storage_directory, hash_path, renamer, fallocate, \ @@ -51,6 +51,7 @@ ASYNCDIR = 'async_pending' PICKLE_PROTOCOL = 2 METADATA_KEY = 'user.swift.metadata' MAX_OBJECT_NAME_LENGTH = 1024 +KEEP_CACHE_SIZE = (5 * 1024 * 1024) def read_metadata(fd): @@ -113,6 +114,7 @@ class DiskFile(object): self.meta_file = None self.data_file = None self.fp = None + self.keep_cache = False if not os.path.exists(self.datadir): return files = sorted(os.listdir(self.datadir), reverse=True) @@ -150,12 +152,12 @@ class DiskFile(object): if chunk: read += len(chunk) if read - dropped_cache > (1024 * 1024): - drop_buffer_cache(self.fp.fileno(), dropped_cache, + self.drop_cache(self.fp.fileno(), dropped_cache, read - dropped_cache) dropped_cache = read yield chunk else: - drop_buffer_cache(self.fp.fileno(), dropped_cache, + self.drop_cache(self.fp.fileno(), dropped_cache, read - dropped_cache) break finally: @@ -226,7 +228,7 @@ class DiskFile(object): timestamp = normalize_timestamp(metadata['X-Timestamp']) write_metadata(fd, metadata) if 'Content-Length' in metadata: - drop_buffer_cache(fd, 0, int(metadata['Content-Length'])) + self.drop_cache(fd, 0, int(metadata['Content-Length'])) tpool.execute(os.fsync, fd) invalidate_hash(os.path.dirname(self.datadir)) renamer(tmppath, os.path.join(self.datadir, timestamp + extension)) @@ -248,6 +250,11 @@ class DiskFile(object): if err.errno != errno.ENOENT: raise + def drop_cache(self, fd, offset, length): + """Method for no-oping buffer cache drop method.""" + if not self.keep_cache: + drop_buffer_cache(fd, offset, length) + class ObjectController(object): """Implements the WSGI application for the Swift Object Server.""" @@ -308,7 +315,7 @@ class ObjectController(object): 'response from %(ip)s:%(port)s/%(dev)s'), {'status': response.status, 'ip': ip, 'port': port, 'dev': contdevice}) - except: + except (Exception, TimeoutError): self.logger.exception(_('ERROR container update failed with ' '%(ip)s:%(port)s/%(dev)s (saving for async update later)'), {'ip': ip, 'port': port, 'dev': contdevice}) @@ -482,6 +489,10 @@ class ObjectController(object): response.etag = file.metadata['ETag'] response.last_modified = float(file.metadata['X-Timestamp']) response.content_length = int(file.metadata['Content-Length']) + if response.content_length < KEEP_CACHE_SIZE and \ + 'X-Auth-Token' not in request.headers and \ + 'X-Storage-Token' not in request.headers: + file.keep_cache = True if 'Content-Encoding' in file.metadata: response.content_encoding = file.metadata['Content-Encoding'] return request.get_response(response) @@ -566,7 +577,7 @@ class ObjectController(object): if suffix: recalculate_hashes(path, suffix.split('-')) return Response() - _, hashes = get_hashes(path, do_listdir=False) + _junk, hashes = get_hashes(path, do_listdir=False) return Response(body=pickle.dumps(hashes)) def __call__(self, env, start_response): @@ -582,7 +593,7 @@ class ObjectController(object): res = getattr(self, req.method)(req) else: res = HTTPMethodNotAllowed() - except: + except Exception: self.logger.exception(_('ERROR __call__ error with %(method)s' ' %(path)s '), {'method': req.method, 'path': req.path}) res = HTTPInternalServerError(body=traceback.format_exc()) diff --git a/swift/obj/updater.py b/swift/obj/updater.py index a226d4523e..2b28ff08c5 100644 --- a/swift/obj/updater.py +++ b/swift/obj/updater.py @@ -20,7 +20,7 @@ import sys import time from random import random -from eventlet import patcher, Timeout +from eventlet import patcher, Timeout, TimeoutError from swift.common.bufferedhttp import http_connect from swift.common.exceptions import ConnectionTimeout @@ -202,7 +202,7 @@ class ObjectUpdater(Daemon): resp = conn.getresponse() resp.read() return resp.status - except: + except (Exception, TimeoutError): self.logger.exception(_('ERROR with remote server ' '%(ip)s:%(port)s/%(device)s'), node) return 500 diff --git a/swift/proxy/server.py b/swift/proxy/server.py index 32c7ad9004..1eae0dfc30 100644 --- a/swift/proxy/server.py +++ b/swift/proxy/server.py @@ -29,8 +29,9 @@ from urllib import unquote, quote import uuid import functools from hashlib import md5 +from random import shuffle -from eventlet import sleep +from eventlet import sleep, TimeoutError from eventlet.timeout import Timeout from webob.exc import HTTPBadRequest, HTTPMethodNotAllowed, \ HTTPNotFound, HTTPPreconditionFailed, \ @@ -383,7 +384,7 @@ class Controller(object): attempts_left -= 1 if attempts_left <= 0: break - except: + except (Exception, TimeoutError): self.exception_occurred(node, _('Account'), _('Trying to get account info for %s') % path) if self.app.memcache and result_code in (200, 404): @@ -461,7 +462,7 @@ class Controller(object): attempts_left -= 1 if attempts_left <= 0: break - except: + except (Exception, TimeoutError): self.exception_occurred(node, _('Container'), _('Trying to get container info for %s') % path) if self.app.memcache and result_code in (200, 404): @@ -592,7 +593,7 @@ class Controller(object): query_string=req.query_string) with Timeout(self.app.node_timeout): source = conn.getresponse() - except: + except (Exception, TimeoutError): self.exception_occurred(node, server_type, _('Trying to %(method)s %(path)s') % {'method': req.method, 'path': req.path}) @@ -624,7 +625,7 @@ class Controller(object): except GeneratorExit: res.client_disconnect = True self.app.logger.info(_('Client disconnected on read')) - except: + except (Exception, TimeoutError): self.exception_occurred(node, _('Object'), _('Trying to read during GET of %s') % req.path) raise @@ -691,7 +692,7 @@ class ObjectController(Controller): _('ERROR %(status)d %(body)s From Object Server') % {'status': response.status, 'body': body[:1024]}) return response.status, response.reason, body - except: + except (Exception, TimeoutError): self.exception_occurred(node, _('Object'), _('Trying to %(method)s %(path)s') % {'method': req.method, 'path': req.path}) @@ -707,6 +708,7 @@ class ObjectController(Controller): return aresp partition, nodes = self.app.object_ring.get_nodes( self.account_name, self.container_name, self.object_name) + shuffle(nodes) resp = self.GETorHEAD_base(req, _('Object'), partition, self.iter_nodes(partition, nodes, self.app.object_ring), req.path_info, self.app.object_ring.replica_count) @@ -998,7 +1000,7 @@ class ObjectController(Controller): conn.node = node with Timeout(self.app.node_timeout): resp = conn.getexpect() - except: + except (Exception, TimeoutError): self.exception_occurred(node, _('Object'), _('Expect: 100-continue on %s') % req.path) if conn and resp: @@ -1038,7 +1040,7 @@ class ObjectController(Controller): conn.send('%x\r\n%s\r\n' % (len_chunk, chunk)) else: conn.send(chunk) - except: + except (Exception, TimeoutError): self.exception_occurred(conn.node, _('Object'), _('Trying to write to %s') % req.path) conns.remove(conn) @@ -1055,7 +1057,7 @@ class ObjectController(Controller): self.app.logger.info( _('ERROR Client read timeout (%ss)'), err.seconds) return HTTPRequestTimeout(request=req) - except: + except Exception: req.client_disconnect = True self.app.logger.exception( _('ERROR Exception causing client disconnect')) @@ -1083,7 +1085,7 @@ class ObjectController(Controller): 'body': bodies[-1][:1024], 'path': req.path}) elif 200 <= response.status < 300: etags.add(response.getheader('etag').strip('"')) - except: + except (Exception, TimeoutError): self.exception_occurred(conn.node, _('Object'), _('Trying to get final status of PUT to %s') % req.path) if len(etags) > 1: @@ -1294,7 +1296,7 @@ class ContainerController(Controller): if source.status == 507: self.error_limit(node) accounts.insert(0, account) - except: + except (Exception, TimeoutError): accounts.insert(0, account) self.exception_occurred(node, _('Container'), _('Trying to PUT to %s') % req.path) @@ -1350,7 +1352,7 @@ class ContainerController(Controller): bodies.append(body) elif source.status == 507: self.error_limit(node) - except: + except (Exception, TimeoutError): self.exception_occurred(node, _('Container'), _('Trying to POST %s') % req.path) if len(statuses) >= len(containers): @@ -1406,7 +1408,7 @@ class ContainerController(Controller): if source.status == 507: self.error_limit(node) accounts.insert(0, account) - except: + except (Exception, TimeoutError): accounts.insert(0, account) self.exception_occurred(node, _('Container'), _('Trying to DELETE %s') % req.path) @@ -1491,7 +1493,7 @@ class AccountController(Controller): else: if source.status == 507: self.error_limit(node) - except: + except (Exception, TimeoutError): self.exception_occurred(node, _('Account'), _('Trying to PUT to %s') % req.path) if len(statuses) >= len(accounts): @@ -1539,7 +1541,7 @@ class AccountController(Controller): bodies.append(body) elif source.status == 507: self.error_limit(node) - except: + except (Exception, TimeoutError): self.exception_occurred(node, _('Account'), _('Trying to POST %s') % req.path) if len(statuses) >= len(accounts): @@ -1584,7 +1586,7 @@ class AccountController(Controller): bodies.append(body) elif source.status == 507: self.error_limit(node) - except: + except (Exception, TimeoutError): self.exception_occurred(node, _('Account'), _('Trying to DELETE %s') % req.path) if len(statuses) >= len(accounts): @@ -1685,7 +1687,7 @@ class BaseApplication(object): response = self.handle_request(req)(env, start_response) self.posthooklogger(env, req) return response - except: + except Exception: print "EXCEPTION IN __call__: %s: %s" % \ (traceback.format_exc(), env) start_response('500 Server Error', diff --git a/swift/stats/access_processor.py b/swift/stats/access_processor.py index 08c3971a84..2aee505415 100644 --- a/swift/stats/access_processor.py +++ b/swift/stats/access_processor.py @@ -40,7 +40,7 @@ class AccessLogProcessor(object): '''given a raw access log line, return a dict of the good parts''' d = {} try: - (_, + (unused, server, client_ip, lb_ip, @@ -57,7 +57,8 @@ class AccessLogProcessor(object): etag, trans_id, headers, - processing_time) = (unquote(x) for x in raw_log[16:].split(' ')) + processing_time) = (unquote(x) for x in + raw_log[16:].split(' ')[:18]) except ValueError: self.logger.debug(_('Bad line data: %s') % repr(raw_log)) return {} diff --git a/swift/stats/account_stats.py b/swift/stats/account_stats.py index 91d31f39ad..6a9688831f 100644 --- a/swift/stats/account_stats.py +++ b/swift/stats/account_stats.py @@ -55,7 +55,8 @@ class AccountStat(Daemon): self.logger.info(_("Gathering account stats")) start = time.time() self.find_and_process() - self.logger.info(_("Gathering account stats complete (%0.2f minutes)") % + self.logger.info( + _("Gathering account stats complete (%0.2f minutes)") % ((time.time() - start) / 60)) def find_and_process(self): @@ -70,8 +71,8 @@ class AccountStat(Daemon): # Account Name, Container Count, Object Count, Bytes Used for device in os.listdir(self.devices): if self.mount_check and not check_mount(self.devices, device): - self.logger.error(_("Device %s is not mounted, skipping.") % - device) + self.logger.error( + _("Device %s is not mounted, skipping.") % device) continue accounts = os.path.join(self.devices, device, @@ -87,11 +88,11 @@ class AccountStat(Daemon): broker = AccountBroker(db_path) if not broker.is_deleted(): (account_name, - _, _, _, + _junk, _junk, _junk, container_count, object_count, bytes_used, - _, _) = broker.get_info() + _junk, _junk) = broker.get_info() line_data = '"%s",%d,%d,%d\n' % ( account_name, container_count, object_count, bytes_used) diff --git a/swift/stats/log_processor.py b/swift/stats/log_processor.py index f8938ddbc2..5dbc92afbe 100644 --- a/swift/stats/log_processor.py +++ b/swift/stats/log_processor.py @@ -273,14 +273,15 @@ class LogProcessorDaemon(Daemon): already_processed_files = cPickle.loads(buf) else: already_processed_files = set() - except: + except Exception: already_processed_files = set() self.logger.debug(_('found %d processed files') % \ len(already_processed_files)) logs_to_process = self.log_processor.get_data_list(lookback_start, lookback_end, already_processed_files) - self.logger.info(_('loaded %d files to process') % len(logs_to_process)) + self.logger.info(_('loaded %d files to process') % + len(logs_to_process)) if not logs_to_process: self.logger.info(_("Log processing done (%0.2f minutes)") % ((time.time() - start) / 60)) @@ -365,7 +366,7 @@ def multiprocess_collate(processor_args, logs_to_process, worker_count): results = [] in_queue = multiprocessing.Queue() out_queue = multiprocessing.Queue() - for _ in range(worker_count): + for _junk in range(worker_count): p = multiprocessing.Process(target=collate_worker, args=(processor_args, in_queue, @@ -374,7 +375,7 @@ def multiprocess_collate(processor_args, logs_to_process, worker_count): results.append(p) for x in logs_to_process: in_queue.put(x) - for _ in range(worker_count): + for _junk in range(worker_count): in_queue.put(None) count = 0 while True: diff --git a/swift/stats/stats_processor.py b/swift/stats/stats_processor.py index dc07d85199..95dba7604c 100644 --- a/swift/stats/stats_processor.py +++ b/swift/stats/stats_processor.py @@ -26,7 +26,7 @@ class StatsLogProcessor(object): data_object_name): '''generate hourly groupings of data from one stats log file''' account_totals = {} - year, month, day, hour, _ = data_object_name.split('/') + year, month, day, hour, _junk = data_object_name.split('/') for line in obj_stream: if not line: continue diff --git a/test/probe/common.py b/test/probe/common.py index c29f142713..08e8309a4b 100644 --- a/test/probe/common.py +++ b/test/probe/common.py @@ -49,7 +49,7 @@ def kill_pids(pids): for pid in pids.values(): try: kill(pid, SIGTERM) - except: + except Exception: pass diff --git a/test/probe/test_object_handoff.py b/test/probe/test_object_handoff.py index ba81e4c559..212fcc2c5e 100755 --- a/test/probe/test_object_handoff.py +++ b/test/probe/test_object_handoff.py @@ -95,7 +95,7 @@ class TestObjectHandoff(unittest.TestCase): try: direct_client.direct_get_object(onode, opart, self.account, container, obj) - except: + except Exception: exc = True if not exc: raise Exception('Previously downed object server had test object') @@ -119,7 +119,7 @@ class TestObjectHandoff(unittest.TestCase): try: direct_client.direct_get_object(another_onode, opart, self.account, container, obj) - except: + except Exception: exc = True if not exc: raise Exception('Handoff object server still had test object') @@ -134,7 +134,7 @@ class TestObjectHandoff(unittest.TestCase): try: direct_client.direct_get_object(another_onode, opart, self.account, container, obj) - except: + except Exception: exc = True if not exc: raise Exception('Handoff server claimed it had the object when ' @@ -171,7 +171,7 @@ class TestObjectHandoff(unittest.TestCase): exc = False try: client.head_object(self.url, self.token, container, obj) - except: + except Exception: exc = True if not exc: raise Exception('Regular object HEAD was still successful') @@ -209,7 +209,7 @@ class TestObjectHandoff(unittest.TestCase): try: direct_client.direct_get_object(another_onode, opart, self.account, container, obj) - except: + except Exception: exc = True if not exc: raise Exception('Handoff object server still had the object') diff --git a/test/unit/__init__.py b/test/unit/__init__.py index 1895098c2e..50b06766de 100644 --- a/test/unit/__init__.py +++ b/test/unit/__init__.py @@ -37,6 +37,36 @@ def tmpfile(content): finally: os.unlink(file_name) +xattr_data = {} + + +def _get_inode(fd): + if not isinstance(fd, int): + try: + fd = fd.fileno() + except AttributeError: + return os.stat(fd).st_ino + return os.fstat(fd).st_ino + + +def _setxattr(fd, k, v): + inode = _get_inode(fd) + data = xattr_data.get(inode, {}) + data[k] = v + xattr_data[inode] = data + + +def _getxattr(fd, k): + inode = _get_inode(fd) + data = xattr_data.get(inode, {}).get(k) + if not data: + raise IOError + return data + +import xattr +xattr.setxattr = _setxattr +xattr.getxattr = _getxattr + class MockTrue(object): """ diff --git a/test/unit/auth/test_server.py b/test/unit/auth/test_server.py index bd63b44b12..4060766d65 100644 --- a/test/unit/auth/test_server.py +++ b/test/unit/auth/test_server.py @@ -119,7 +119,7 @@ class TestAuthServer(unittest.TestCase): headers={'X-Storage-User': 'tester', 'X-Storage-Pass': 'testing'})) token = res.headers['x-storage-token'] - ttl, _, _, _ = self.controller.validate_token(token) + ttl, _junk, _junk, _junk = self.controller.validate_token(token) self.assert_(ttl > 0, repr(ttl)) def test_validate_token_expired(self): @@ -134,7 +134,7 @@ class TestAuthServer(unittest.TestCase): headers={'X-Storage-User': 'tester', 'X-Storage-Pass': 'testing'})) token = res.headers['x-storage-token'] - ttl, _, _, _ = self.controller.validate_token(token) + ttl, _junk, _junk, _junk = self.controller.validate_token(token) self.assert_(ttl > 0, repr(ttl)) auth_server.time = lambda: 1 + self.controller.token_life self.assertEquals(self.controller.validate_token(token), False) @@ -318,7 +318,7 @@ class TestAuthServer(unittest.TestCase): headers={'X-Storage-User': 'tester', 'X-Storage-Pass': 'testing'})) token = res.headers['x-storage-token'] - ttl, _, _, _ = self.controller.validate_token(token) + ttl, _junk, _junk, _junk = self.controller.validate_token(token) self.assert_(ttl > 0, repr(ttl)) def test_auth_SOSO_good_Mosso_headers(self): @@ -330,7 +330,7 @@ class TestAuthServer(unittest.TestCase): headers={'X-Auth-User': 'test:tester', 'X-Auth-Key': 'testing'})) token = res.headers['x-storage-token'] - ttl, _, _, _ = self.controller.validate_token(token) + ttl, _junk, _junk, _junk = self.controller.validate_token(token) self.assert_(ttl > 0, repr(ttl)) def test_auth_SOSO_bad_Mosso_headers(self): @@ -438,7 +438,7 @@ class TestAuthServer(unittest.TestCase): headers={'X-Auth-User': 'test:tester', 'X-Auth-Key': 'testing'})) token = res.headers['x-storage-token'] - ttl, _, _, _ = self.controller.validate_token(token) + ttl, _junk, _junk, _junk = self.controller.validate_token(token) self.assert_(ttl > 0, repr(ttl)) def test_auth_Mosso_good_SOSO_header_names(self): @@ -450,7 +450,7 @@ class TestAuthServer(unittest.TestCase): headers={'X-Storage-User': 'test:tester', 'X-Storage-Pass': 'testing'})) token = res.headers['x-storage-token'] - ttl, _, _, _ = self.controller.validate_token(token) + ttl, _junk, _junk, _junk = self.controller.validate_token(token) self.assert_(ttl > 0, repr(ttl)) def test_basic_logging(self): @@ -712,7 +712,7 @@ class TestAuthServer(unittest.TestCase): res = self.controller.handle_auth(Request.blank('/v1.0', environ={'REQUEST_METHOD': 'GET'}, headers={'X-Auth-User': 'act:usr', 'X-Auth-Key': 'pas'})) - _, _, _, stgact = \ + _junk, _junk, _junk, stgact = \ self.controller.validate_token(res.headers['x-auth-token']) self.assertEquals(stgact, '') @@ -723,7 +723,7 @@ class TestAuthServer(unittest.TestCase): res = self.controller.handle_auth(Request.blank('/v1.0', environ={'REQUEST_METHOD': 'GET'}, headers={'X-Auth-User': 'act:usr', 'X-Auth-Key': 'pas'})) - _, _, _, vstgact = \ + _junk, _junk, _junk, vstgact = \ self.controller.validate_token(res.headers['x-auth-token']) self.assertEquals(stgact, vstgact) @@ -734,7 +734,7 @@ class TestAuthServer(unittest.TestCase): res = self.controller.handle_auth(Request.blank('/v1.0', environ={'REQUEST_METHOD': 'GET'}, headers={'X-Auth-User': 'act:usr', 'X-Auth-Key': 'pas'})) - _, _, _, stgact = \ + _junk, _junk, _junk, stgact = \ self.controller.validate_token(res.headers['x-auth-token']) self.assertEquals(stgact, '.reseller_admin') diff --git a/test/unit/common/middleware/test_auth.py b/test/unit/common/middleware/test_auth.py index cabc7a9523..f6718f68bc 100644 --- a/test/unit/common/middleware/test_auth.py +++ b/test/unit/common/middleware/test_auth.py @@ -51,7 +51,7 @@ class FakeMemcache(object): def delete(self, key): try: del self.store[key] - except: + except Exception: pass return True @@ -95,7 +95,7 @@ class Logger(object): self.error_value = (msg, args, kwargs) def exception(self, msg, *args, **kwargs): - _, exc, _ = sys.exc_info() + _junk, exc, _junk = sys.exc_info() self.exception_value = (msg, '%s %s' % (exc.__class__.__name__, str(exc)), args, kwargs) diff --git a/test/unit/common/middleware/test_domain_remap.py b/test/unit/common/middleware/test_domain_remap.py index fe079cbeda..b7b000a053 100644 --- a/test/unit/common/middleware/test_domain_remap.py +++ b/test/unit/common/middleware/test_domain_remap.py @@ -47,49 +47,49 @@ class TestDomainRemap(unittest.TestCase): def test_domain_remap_account(self): req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'}, - headers={'Host': 'a.example.com'}) + headers={'Host': 'AUTH_a.example.com'}) resp = self.app(req.environ, start_response) - self.assertEquals(resp, '/v1/a') + self.assertEquals(resp, '/v1/AUTH_a') req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'}, - headers={'Host': 'a-uuid.example.com'}) + headers={'Host': 'AUTH-uuid.example.com'}) resp = self.app(req.environ, start_response) - self.assertEquals(resp, '/v1/a_uuid') + self.assertEquals(resp, '/v1/AUTH_uuid') def test_domain_remap_account_container(self): req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'}, - headers={'Host': 'c.a.example.com'}) + headers={'Host': 'c.AUTH_a.example.com'}) resp = self.app(req.environ, start_response) - self.assertEquals(resp, '/v1/a/c') + self.assertEquals(resp, '/v1/AUTH_a/c') def test_domain_remap_extra_subdomains(self): req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'}, - headers={'Host': 'x.y.c.a.example.com'}) + headers={'Host': 'x.y.c.AUTH_a.example.com'}) resp = self.app(req.environ, start_response) self.assertEquals(resp, ['Bad domain in host header']) def test_domain_remap_account_with_path_root(self): req = Request.blank('/v1', environ={'REQUEST_METHOD': 'GET'}, - headers={'Host': 'a.example.com'}) + headers={'Host': 'AUTH_a.example.com'}) resp = self.app(req.environ, start_response) - self.assertEquals(resp, '/v1/a') + self.assertEquals(resp, '/v1/AUTH_a') def test_domain_remap_account_container_with_path_root(self): req = Request.blank('/v1', environ={'REQUEST_METHOD': 'GET'}, - headers={'Host': 'c.a.example.com'}) + headers={'Host': 'c.AUTH_a.example.com'}) resp = self.app(req.environ, start_response) - self.assertEquals(resp, '/v1/a/c') + self.assertEquals(resp, '/v1/AUTH_a/c') def test_domain_remap_account_container_with_path(self): req = Request.blank('/obj', environ={'REQUEST_METHOD': 'GET'}, - headers={'Host': 'c.a.example.com'}) + headers={'Host': 'c.AUTH_a.example.com'}) resp = self.app(req.environ, start_response) - self.assertEquals(resp, '/v1/a/c/obj') + self.assertEquals(resp, '/v1/AUTH_a/c/obj') def test_domain_remap_account_container_with_path_root_and_path(self): req = Request.blank('/v1/obj', environ={'REQUEST_METHOD': 'GET'}, - headers={'Host': 'c.a.example.com'}) + headers={'Host': 'c.AUTH_a.example.com'}) resp = self.app(req.environ, start_response) - self.assertEquals(resp, '/v1/a/c/obj') + self.assertEquals(resp, '/v1/AUTH_a/c/obj') def test_domain_remap_account_matching_ending_not_domain(self): req = Request.blank('/dontchange', environ={'REQUEST_METHOD': 'GET'}, @@ -101,7 +101,23 @@ class TestDomainRemap(unittest.TestCase): self.app = domain_remap.DomainRemapMiddleware(FakeApp(), {'storage_domain': ''}) req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'}, - headers={'Host': 'c.a.example.com'}) + headers={'Host': 'c.AUTH_a.example.com'}) + resp = self.app(req.environ, start_response) + self.assertEquals(resp, '/test') + + def test_domain_remap_configured_with_prefixes(self): + conf = {'reseller_prefixes': 'PREFIX'} + self.app = domain_remap.DomainRemapMiddleware(FakeApp(), conf) + req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'}, + headers={'Host': 'c.prefix_uuid.example.com'}) + resp = self.app(req.environ, start_response) + self.assertEquals(resp, '/v1/PREFIX_uuid/c/test') + + def test_domain_remap_configured_with_bad_prefixes(self): + conf = {'reseller_prefixes': 'UNKNOWN'} + self.app = domain_remap.DomainRemapMiddleware(FakeApp(), conf) + req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'}, + headers={'Host': 'c.prefix_uuid.example.com'}) resp = self.app(req.environ, start_response) self.assertEquals(resp, '/test') diff --git a/test/unit/common/middleware/test_ratelimit.py b/test/unit/common/middleware/test_ratelimit.py index 3f993a0402..ef1abca91e 100644 --- a/test/unit/common/middleware/test_ratelimit.py +++ b/test/unit/common/middleware/test_ratelimit.py @@ -51,7 +51,7 @@ class FakeMemcache(object): def delete(self, key): try: del self.store[key] - except: + except Exception: pass return True @@ -95,13 +95,13 @@ class FakeApp(object): class FakeLogger(object): # a thread safe logger - def error(self, msg): + def error(self, *args, **kwargs): pass - def info(self, msg): + def info(self, *args, **kwargs): pass - def warning(self, msg): + def warning(self, *args, **kwargs): pass @@ -224,6 +224,7 @@ class TestRateLimit(unittest.TestCase): 'account_whitelist': 'a', 'account_blacklist': 'b'} self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp()) + self.test_ratelimit.BLACK_LIST_SLEEP = 0 ratelimit.http_connect = mock_http_connect(204) req = Request.blank('/v/b/c') req.environ['swift.cache'] = FakeMemcache() @@ -260,6 +261,7 @@ class TestRateLimit(unittest.TestCase): # making clock less accurate for nosetests running slow self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp()) ratelimit.http_connect = mock_http_connect(204) + self.test_ratelimit.log_sleep_time_seconds = .00001 req = Request.blank('/v/a') req.environ['swift.cache'] = FakeMemcache() begin = time.time() @@ -402,7 +404,5 @@ class TestRateLimit(unittest.TestCase): self._run(make_app_call, num_calls, current_rate) - - if __name__ == '__main__': unittest.main() diff --git a/test/unit/common/middleware/test_swauth.py b/test/unit/common/middleware/test_swauth.py index a6edab9c2c..ce3681ac06 100644 --- a/test/unit/common/middleware/test_swauth.py +++ b/test/unit/common/middleware/test_swauth.py @@ -49,7 +49,7 @@ class FakeMemcache(object): def delete(self, key): try: del self.store[key] - except: + except Exception: pass return True @@ -165,7 +165,7 @@ class TestAuth(unittest.TestCase): self.assertEquals(ath.dsc_url2, 'https://host/path') ath = auth.filter_factory({'super_admin_key': 'supertest', 'default_swift_cluster': - 'local##https://host/path/##http://host2/path2/'})(app) + 'local#https://host/path/#http://host2/path2/'})(app) self.assertEquals(ath.dsc_url, 'https://host/path') self.assertEquals(ath.dsc_url2, 'http://host2/path2') @@ -2561,6 +2561,7 @@ class TestAuth(unittest.TestCase): def test_put_user_regular_success(self): self.test_auth.app = FakeApp(iter([ + ('200 Ok', {'X-Container-Meta-Account-Id': 'AUTH_cfa'}, ''), # PUT of user object ('201 Created', {}, '')])) resp = Request.blank('/auth/v2/act/usr', @@ -2570,13 +2571,14 @@ class TestAuth(unittest.TestCase): 'X-Auth-User-Key': 'key'} ).get_response(self.test_auth) self.assertEquals(resp.status_int, 201) - self.assertEquals(self.test_auth.app.calls, 1) + self.assertEquals(self.test_auth.app.calls, 2) self.assertEquals(json.loads(self.test_auth.app.request.body), {"groups": [{"name": "act:usr"}, {"name": "act"}], "auth": "plaintext:key"}) def test_put_user_account_admin_success(self): self.test_auth.app = FakeApp(iter([ + ('200 Ok', {'X-Container-Meta-Account-Id': 'AUTH_cfa'}, ''), # PUT of user object ('201 Created', {}, '')])) resp = Request.blank('/auth/v2/act/usr', @@ -2587,7 +2589,7 @@ class TestAuth(unittest.TestCase): 'X-Auth-User-Admin': 'true'} ).get_response(self.test_auth) self.assertEquals(resp.status_int, 201) - self.assertEquals(self.test_auth.app.calls, 1) + self.assertEquals(self.test_auth.app.calls, 2) self.assertEquals(json.loads(self.test_auth.app.request.body), {"groups": [{"name": "act:usr"}, {"name": "act"}, {"name": ".admin"}], @@ -2595,6 +2597,7 @@ class TestAuth(unittest.TestCase): def test_put_user_reseller_admin_success(self): self.test_auth.app = FakeApp(iter([ + ('200 Ok', {'X-Container-Meta-Account-Id': 'AUTH_cfa'}, ''), # PUT of user object ('201 Created', {}, '')])) resp = Request.blank('/auth/v2/act/usr', @@ -2605,7 +2608,7 @@ class TestAuth(unittest.TestCase): 'X-Auth-User-Reseller-Admin': 'true'} ).get_response(self.test_auth) self.assertEquals(resp.status_int, 201) - self.assertEquals(self.test_auth.app.calls, 1) + self.assertEquals(self.test_auth.app.calls, 2) self.assertEquals(json.loads(self.test_auth.app.request.body), {"groups": [{"name": "act:usr"}, {"name": "act"}, {"name": ".admin"}, {"name": ".reseller_admin"}], @@ -2613,6 +2616,7 @@ class TestAuth(unittest.TestCase): def test_put_user_fail_not_found(self): self.test_auth.app = FakeApp(iter([ + ('200 Ok', {'X-Container-Meta-Account-Id': 'AUTH_cfa'}, ''), # PUT of user object ('404 Not Found', {}, '')])) resp = Request.blank('/auth/v2/act/usr', @@ -2622,7 +2626,7 @@ class TestAuth(unittest.TestCase): 'X-Auth-User-Key': 'key'} ).get_response(self.test_auth) self.assertEquals(resp.status_int, 404) - self.assertEquals(self.test_auth.app.calls, 1) + self.assertEquals(self.test_auth.app.calls, 2) def test_put_user_fail(self): self.test_auth.app = FakeApp(iter([ diff --git a/test/unit/common/middleware/test_swift3.py b/test/unit/common/middleware/test_swift3.py index c7c974c965..f84a0ffe8a 100644 --- a/test/unit/common/middleware/test_swift3.py +++ b/test/unit/common/middleware/test_swift3.py @@ -209,7 +209,7 @@ class TestSwift3(unittest.TestCase): def test_bad_path(self): req = Request.blank('/bucket/object/bad', environ={'REQUEST_METHOD': 'GET'}, - headers={'Authorization': 'AUTH_something:hoge'}) + headers={'Authorization': 'AWS test:tester:hmac'}) resp = self.app(req.environ, start_response) dom = xml.dom.minidom.parseString("".join(resp)) self.assertEquals(dom.firstChild.nodeName, 'Error') @@ -219,7 +219,7 @@ class TestSwift3(unittest.TestCase): def test_bad_method(self): req = Request.blank('/', environ={'REQUEST_METHOD': 'PUT'}, - headers={'Authorization': 'AUTH_something:hoge'}) + headers={'Authorization': 'AWS test:tester:hmac'}) resp = self.app(req.environ, start_response) dom = xml.dom.minidom.parseString("".join(resp)) self.assertEquals(dom.firstChild.nodeName, 'Error') @@ -230,7 +230,7 @@ class TestSwift3(unittest.TestCase): local_app = swift3.filter_factory({})(cl(status)) req = Request.blank(path, environ={'REQUEST_METHOD': method}, - headers={'Authorization': 'AUTH_who:password'}) + headers={'Authorization': 'AWS test:tester:hmac'}) resp = local_app(req.environ, start_response) dom = xml.dom.minidom.parseString("".join(resp)) self.assertEquals(dom.firstChild.nodeName, 'Error') @@ -246,7 +246,7 @@ class TestSwift3(unittest.TestCase): local_app = swift3.filter_factory({})(FakeAppService()) req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'}, - headers={'Authorization': 'AUTH_who:password'}) + headers={'Authorization': 'AWS test:tester:hmac'}) resp = local_app(req.environ, local_app.app.do_start_response) self.assertEquals(local_app.app.response_args[0].split()[0], '200') @@ -279,7 +279,7 @@ class TestSwift3(unittest.TestCase): bucket_name = 'junk' req = Request.blank('/%s' % bucket_name, environ={'REQUEST_METHOD': 'GET'}, - headers={'Authorization': 'AUTH_who:password'}) + headers={'Authorization': 'AWS test:tester:hmac'}) resp = local_app(req.environ, local_app.app.do_start_response) self.assertEquals(local_app.app.response_args[0].split()[0], '200') @@ -307,7 +307,7 @@ class TestSwift3(unittest.TestCase): req = Request.blank('/%s' % bucket_name, environ={'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'max-keys=3'}, - headers={'Authorization': 'AUTH_who:password'}) + headers={'Authorization': 'AWS test:tester:hmac'}) resp = local_app(req.environ, local_app.app.do_start_response) dom = xml.dom.minidom.parseString("".join(resp)) self.assertEquals(dom.getElementsByTagName('IsTruncated')[0]. @@ -316,7 +316,7 @@ class TestSwift3(unittest.TestCase): req = Request.blank('/%s' % bucket_name, environ={'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'max-keys=2'}, - headers={'Authorization': 'AUTH_who:password'}) + headers={'Authorization': 'AWS test:tester:hmac'}) resp = local_app(req.environ, local_app.app.do_start_response) dom = xml.dom.minidom.parseString("".join(resp)) self.assertEquals(dom.getElementsByTagName('IsTruncated')[0]. @@ -335,7 +335,7 @@ class TestSwift3(unittest.TestCase): req = Request.blank('/%s' % bucket_name, environ={'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'max-keys=5'}, - headers={'Authorization': 'AUTH_who:password'}) + headers={'Authorization': 'AWS test:tester:hmac'}) resp = local_app(req.environ, lambda *args: None) dom = xml.dom.minidom.parseString("".join(resp)) self.assertEquals(dom.getElementsByTagName('MaxKeys')[0]. @@ -346,7 +346,7 @@ class TestSwift3(unittest.TestCase): req = Request.blank('/%s' % bucket_name, environ={'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'max-keys=5000'}, - headers={'Authorization': 'AUTH_who:password'}) + headers={'Authorization': 'AWS test:tester:hmac'}) resp = local_app(req.environ, lambda *args: None) dom = xml.dom.minidom.parseString("".join(resp)) self.assertEquals(dom.getElementsByTagName('MaxKeys')[0]. @@ -366,7 +366,7 @@ class TestSwift3(unittest.TestCase): req = Request.blank('/%s' % bucket_name, environ={'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'delimiter=a&marker=b&prefix=c'}, - headers={'Authorization': 'AUTH_who:password'}) + headers={'Authorization': 'AWS test:tester:hmac'}) resp = local_app(req.environ, lambda *args: None) dom = xml.dom.minidom.parseString("".join(resp)) self.assertEquals(dom.getElementsByTagName('Prefix')[0]. @@ -392,7 +392,7 @@ class TestSwift3(unittest.TestCase): local_app = swift3.filter_factory({})(FakeAppBucket(201)) req = Request.blank('/bucket', environ={'REQUEST_METHOD': 'PUT'}, - headers={'Authorization': 'AUTH_who:password'}) + headers={'Authorization': 'AWS test:tester:hmac'}) resp = local_app(req.environ, local_app.app.do_start_response) self.assertEquals(local_app.app.response_args[0].split()[0], '200') @@ -410,7 +410,7 @@ class TestSwift3(unittest.TestCase): local_app = swift3.filter_factory({})(FakeAppBucket(204)) req = Request.blank('/bucket', environ={'REQUEST_METHOD': 'DELETE'}, - headers={'Authorization': 'AUTH_who:password'}) + headers={'Authorization': 'AWS test:tester:hmac'}) resp = local_app(req.environ, local_app.app.do_start_response) self.assertEquals(local_app.app.response_args[0].split()[0], '204') @@ -418,7 +418,7 @@ class TestSwift3(unittest.TestCase): local_app = swift3.filter_factory({})(FakeAppObject()) req = Request.blank('/bucket/object', environ={'REQUEST_METHOD': method}, - headers={'Authorization': 'AUTH_who:password'}) + headers={'Authorization': 'AWS test:tester:hmac'}) resp = local_app(req.environ, local_app.app.do_start_response) self.assertEquals(local_app.app.response_args[0].split()[0], '200') @@ -468,7 +468,7 @@ class TestSwift3(unittest.TestCase): local_app = swift3.filter_factory({})(FakeAppObject(201)) req = Request.blank('/bucket/object', environ={'REQUEST_METHOD': 'PUT'}, - headers={'Authorization': 'AUTH_who:password', + headers={'Authorization': 'AWS test:tester:hmac', 'x-amz-storage-class': 'REDUCED_REDUNDANCY', 'Content-MD5': 'Gyz1NfJ3Mcl0NDZFo5hTKA=='}) req.date = datetime.now() @@ -490,7 +490,7 @@ class TestSwift3(unittest.TestCase): local_app = swift3.filter_factory({})(app) req = Request.blank('/bucket/object', environ={'REQUEST_METHOD': 'PUT'}, - headers={'Authorization': 'AUTH_who:password', + headers={'Authorization': 'AWS test:tester:hmac', 'X-Amz-Storage-Class': 'REDUCED_REDUNDANCY', 'X-Amz-Meta-Something': 'oh hai', 'X-Amz-Copy-Source': '/some/source', @@ -518,7 +518,7 @@ class TestSwift3(unittest.TestCase): local_app = swift3.filter_factory({})(FakeAppObject(204)) req = Request.blank('/bucket/object', environ={'REQUEST_METHOD': 'DELETE'}, - headers={'Authorization': 'AUTH_who:password'}) + headers={'Authorization': 'AWS test:tester:hmac'}) resp = local_app(req.environ, local_app.app.do_start_response) self.assertEquals(local_app.app.response_args[0].split()[0], '204') diff --git a/test/unit/common/test_client.py b/test/unit/common/test_client.py index 739cba75e3..e6e1abb1dc 100644 --- a/test/unit/common/test_client.py +++ b/test/unit/common/test_client.py @@ -14,7 +14,10 @@ # limitations under the License. # TODO: More tests +import socket import unittest +from StringIO import StringIO +from urlparse import urlparse # TODO: mock http connection class with more control over headers from test.unit.proxy.test_server import fake_http_connect @@ -32,10 +35,10 @@ class TestHttpHelpers(unittest.TestCase): def test_http_connection(self): url = 'http://www.test.com' - _, conn = c.http_connection(url) + _junk, conn = c.http_connection(url) self.assertTrue(isinstance(conn, c.HTTPConnection)) url = 'https://www.test.com' - _, conn = c.http_connection(url) + _junk, conn = c.http_connection(url) self.assertTrue(isinstance(conn, c.HTTPSConnection)) url = 'ftp://www.test.com' self.assertRaises(c.ClientException, c.http_connection, url) @@ -377,5 +380,97 @@ class TestConnection(MockHttpTest): self.assertEquals(conn.url, 'http://www.new.com') self.assertEquals(conn.token, 'new') + def test_reset_stream(self): + + class LocalContents(object): + + def __init__(self, tell_value=0): + self.already_read = False + self.seeks = [] + self.tell_value = tell_value + + def tell(self): + return self.tell_value + + def seek(self, position): + self.seeks.append(position) + self.already_read = False + + def read(self, size=-1): + if self.already_read: + return '' + else: + self.already_read = True + return 'abcdef' + + class LocalConnection(object): + + def putrequest(self, *args, **kwargs): + return + + def putheader(self, *args, **kwargs): + return + + def endheaders(self, *args, **kwargs): + return + + def send(self, *args, **kwargs): + raise socket.error('oops') + + def request(self, *args, **kwargs): + return + + def getresponse(self, *args, **kwargs): + self.status = 200 + return self + + def getheader(self, *args, **kwargs): + return '' + + def read(self, *args, **kwargs): + return '' + + def local_http_connection(url): + parsed = urlparse(url) + return parsed, LocalConnection() + + orig_conn = c.http_connection + try: + c.http_connection = local_http_connection + conn = c.Connection('http://www.example.com', 'asdf', 'asdf', + retries=1, starting_backoff=.0001) + + contents = LocalContents() + exc = None + try: + conn.put_object('c', 'o', contents) + except socket.error, err: + exc = err + self.assertEquals(contents.seeks, [0]) + self.assertEquals(str(exc), 'oops') + + contents = LocalContents(tell_value=123) + exc = None + try: + conn.put_object('c', 'o', contents) + except socket.error, err: + exc = err + self.assertEquals(contents.seeks, [123]) + self.assertEquals(str(exc), 'oops') + + contents = LocalContents() + contents.tell = None + exc = None + try: + conn.put_object('c', 'o', contents) + except c.ClientException, err: + exc = err + self.assertEquals(contents.seeks, []) + self.assertEquals(str(exc), "put_object('c', 'o', ...) failure " + "and no ability to reset contents for reupload.") + finally: + c.http_connection = orig_conn + + if __name__ == '__main__': unittest.main() diff --git a/test/unit/common/test_db.py b/test/unit/common/test_db.py index 49bc8a9229..e63397c954 100644 --- a/test/unit/common/test_db.py +++ b/test/unit/common/test_db.py @@ -165,14 +165,14 @@ class TestDatabaseBroker(unittest.TestCase): try: with broker.get() as conn: conn.execute('SELECT 1') - except: + except Exception: got_exc = True broker = DatabaseBroker(os.path.join(self.testdir, '1.db')) got_exc = False try: with broker.get() as conn: conn.execute('SELECT 1') - except: + except Exception: got_exc = True self.assert_(got_exc) def stub(*args, **kwargs): @@ -186,7 +186,7 @@ class TestDatabaseBroker(unittest.TestCase): conn.execute('INSERT INTO test (one) VALUES ("1")') raise Exception('test') conn.commit() - except: + except Exception: pass broker = DatabaseBroker(os.path.join(self.testdir, '1.db')) with broker.get() as conn: @@ -230,7 +230,7 @@ class TestDatabaseBroker(unittest.TestCase): try: with broker.lock(): raise Exception('test') - except: + except Exception: pass with broker.lock(): pass @@ -548,7 +548,7 @@ class TestContainerBroker(unittest.TestCase): with broker.get() as conn: self.assertEquals(first_conn, conn) raise Exception('OMG') - except: + except Exception: pass self.assert_(broker.conn == None) @@ -1363,7 +1363,7 @@ class TestAccountBroker(unittest.TestCase): try: with broker.get() as conn: pass - except: + except Exception: got_exc = True self.assert_(got_exc) broker.initialize(normalize_timestamp('1')) @@ -1384,7 +1384,7 @@ class TestAccountBroker(unittest.TestCase): with broker.get() as conn: self.assertEquals(first_conn, conn) raise Exception('OMG') - except: + except Exception: pass self.assert_(broker.conn == None) diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index b9e8a3f81b..0d5c2e1fed 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -107,7 +107,7 @@ class TestUtils(unittest.TestCase): testroot = os.path.join(os.path.dirname(__file__), 'mkdirs') try: os.unlink(testroot) - except: + except Exception: pass rmtree(testroot, ignore_errors=1) self.assert_(not os.path.exists(testroot)) @@ -211,14 +211,14 @@ class TestUtils(unittest.TestCase): try: for line in lfo: pass - except: + except Exception: got_exc = True self.assert_(got_exc) got_exc = False try: for line in lfo.xreadlines(): pass - except: + except Exception: got_exc = True self.assert_(got_exc) self.assertRaises(IOError, lfo.read) @@ -456,15 +456,6 @@ log_name = yarr''' # make sure its accurate to 10th of a second self.assertTrue(abs(25 - (time.time() - start) * 100) < 10) - def test_ratelimit_sleep_with_sleep(self): - running_time = 0 - start = time.time() - for i in range(25): - running_time = utils.ratelimit_sleep(running_time, 50) - time.sleep(1.0 / 75) - # make sure its accurate to 10th of a second - self.assertTrue(abs(50 - (time.time() - start) * 100) < 10) - def test_ratelimit_sleep_with_incr(self): running_time = 0 start = time.time() @@ -499,5 +490,16 @@ log_name = yarr''' parsed = utils.urlparse('www.example.com') self.assertEquals(parsed.hostname, '') + def test_ratelimit_sleep_with_sleep(self): + running_time = 0 + start = time.time() + sleeps = [0] * 7 + [.2] * 3 + [0] * 30 + for i in sleeps: + running_time = utils.ratelimit_sleep(running_time, 40, + rate_buffer=1) + time.sleep(i) + # make sure its accurate to 10th of a second + self.assertTrue(abs(100 - (time.time() - start) * 100) < 10) + if __name__ == '__main__': unittest.main() diff --git a/test/unit/container/test_server.py b/test/unit/container/test_server.py index 2f9d5badea..194ffff83d 100644 --- a/test/unit/container/test_server.py +++ b/test/unit/container/test_server.py @@ -19,6 +19,7 @@ import unittest from shutil import rmtree from StringIO import StringIO from time import time +from tempfile import mkdtemp from eventlet import spawn, TimeoutError, listen from eventlet.timeout import Timeout @@ -33,17 +34,8 @@ class TestContainerController(unittest.TestCase): """ Test swift.container_server.ContainerController """ def setUp(self): """ Set up for testing swift.object_server.ObjectController """ - self.path_to_test_xfs = os.environ.get('PATH_TO_TEST_XFS') - if not self.path_to_test_xfs or \ - not os.path.exists(self.path_to_test_xfs): - print >>sys.stderr, 'WARNING: PATH_TO_TEST_XFS not set or not ' \ - 'pointing to a valid directory.\n' \ - 'Please set PATH_TO_TEST_XFS to a directory on an XFS file ' \ - 'system for testing.' - self.testdir = '/tmp/SWIFTUNITTEST' - else: - self.testdir = os.path.join(self.path_to_test_xfs, - 'tmp_test_object_server_ObjectController') + self.testdir = os.path.join(mkdtemp(), + 'tmp_test_object_server_ObjectController') mkdirs(self.testdir) rmtree(self.testdir) mkdirs(os.path.join(self.testdir, 'sda1')) @@ -53,7 +45,7 @@ class TestContainerController(unittest.TestCase): def tearDown(self): """ Tear down for testing swift.object_server.ObjectController """ - rmtree(self.testdir, ignore_errors=1) + rmtree(os.path.dirname(self.testdir), ignore_errors=1) def test_acl_container(self): # Ensure no acl by default diff --git a/test/unit/container/test_updater.py b/test/unit/container/test_updater.py index 092944c4be..134e73d96a 100644 --- a/test/unit/container/test_updater.py +++ b/test/unit/container/test_updater.py @@ -19,6 +19,7 @@ import sys import unittest from gzip import GzipFile from shutil import rmtree +from tempfile import mkdtemp from eventlet import spawn, TimeoutError, listen from eventlet.timeout import Timeout @@ -35,17 +36,7 @@ class TestContainerUpdater(unittest.TestCase): def setUp(self): utils.HASH_PATH_SUFFIX = 'endcap' - self.path_to_test_xfs = os.environ.get('PATH_TO_TEST_XFS') - if not self.path_to_test_xfs or \ - not os.path.exists(self.path_to_test_xfs): - print >>sys.stderr, 'WARNING: PATH_TO_TEST_XFS not set or not ' \ - 'pointing to a valid directory.\n' \ - 'Please set PATH_TO_TEST_XFS to a directory on an XFS file ' \ - 'system for testing.' - self.testdir = '/tmp/SWIFTUNITTEST' - else: - self.testdir = os.path.join(self.path_to_test_xfs, - 'tmp_test_container_updater') + self.testdir = os.path.join(mkdtemp(), 'tmp_test_container_updater') rmtree(self.testdir, ignore_errors=1) os.mkdir(self.testdir) pickle.dump(RingData([[0, 1, 0, 1], [1, 0, 1, 0]], @@ -60,7 +51,7 @@ class TestContainerUpdater(unittest.TestCase): os.mkdir(self.sda1) def tearDown(self): - rmtree(self.testdir, ignore_errors=1) + rmtree(os.path.dirname(self.testdir), ignore_errors=1) def test_creation(self): cu = container_updater.ContainerUpdater({ @@ -87,6 +78,7 @@ class TestContainerUpdater(unittest.TestCase): 'interval': '1', 'concurrency': '1', 'node_timeout': '15', + 'account_suppression_time': 0 }) cu.run_once() containers_dir = os.path.join(self.sda1, container_server.DATADIR) @@ -142,7 +134,7 @@ class TestContainerUpdater(unittest.TestCase): bindsock = listen(('127.0.0.1', 0)) def spawn_accepts(): events = [] - for _ in xrange(2): + for _junk in xrange(2): sock, addr = bindsock.accept() events.append(spawn(accept, sock, addr, 201)) return events @@ -195,7 +187,7 @@ class TestContainerUpdater(unittest.TestCase): bindsock = listen(('127.0.0.1', 0)) def spawn_accepts(): events = [] - for _ in xrange(2): + for _junk in xrange(2): with Timeout(3): sock, addr = bindsock.accept() events.append(spawn(accept, sock, addr)) diff --git a/test/unit/obj/test_auditor.py b/test/unit/obj/test_auditor.py index 9ee42cb3ec..66540a3693 100644 --- a/test/unit/obj/test_auditor.py +++ b/test/unit/obj/test_auditor.py @@ -14,13 +14,16 @@ # limitations under the License. # TODO: Tests +from test import unit as _setup_mocks import unittest import tempfile import os import time from shutil import rmtree from hashlib import md5 +from tempfile import mkdtemp from swift.obj import auditor +from swift.obj import server as object_server from swift.obj.server import DiskFile, write_metadata from swift.common.utils import hash_path, mkdirs, normalize_timestamp, renamer from swift.obj.replicator import invalidate_hash @@ -30,18 +33,8 @@ from swift.common.exceptions import AuditException class TestAuditor(unittest.TestCase): def setUp(self): - self.path_to_test_xfs = os.environ.get('PATH_TO_TEST_XFS') - if not self.path_to_test_xfs or \ - not os.path.exists(self.path_to_test_xfs): - print >> sys.stderr, 'WARNING: PATH_TO_TEST_XFS not set or not ' \ - 'pointing to a valid directory.\n' \ - 'Please set PATH_TO_TEST_XFS to a directory on an XFS file ' \ - 'system for testing.' - self.testdir = '/tmp/SWIFTUNITTEST' - else: - self.testdir = os.path.join(self.path_to_test_xfs, - 'tmp_test_object_auditor') - + self.testdir = \ + os.path.join(mkdtemp(), 'tmp_test_object_auditor') self.devices = os.path.join(self.testdir, 'node') rmtree(self.testdir, ignore_errors=1) os.mkdir(self.testdir) @@ -63,7 +56,7 @@ class TestAuditor(unittest.TestCase): mount_check='false') def tearDown(self): - rmtree(self.testdir, ignore_errors=1) + rmtree(os.path.dirname(self.testdir), ignore_errors=1) def test_object_audit_extra_data(self): self.auditor = auditor.ObjectAuditor(self.conf) @@ -130,25 +123,21 @@ class TestAuditor(unittest.TestCase): self.assertEquals(self.auditor.quarantines, pre_quarantines + 1) def test_object_audit_no_meta(self): - self.auditor = auditor.ObjectAuditor(self.conf) cur_part = '0' disk_file = DiskFile(self.devices, 'sda', cur_part, 'a', 'c', 'o') - data = '0' * 1024 - etag = md5() + timestamp = str(normalize_timestamp(time.time())) + path = os.path.join(disk_file.datadir, timestamp + '.data') + mkdirs(disk_file.datadir) + fp = open(path, 'w') + fp.write('0' * 1024) + fp.close() + invalidate_hash(os.path.dirname(disk_file.datadir)) + self.auditor = auditor.ObjectAuditor(self.conf) pre_quarantines = self.auditor.quarantines - with disk_file.mkstemp() as (fd, tmppath): - os.write(fd, data) - etag.update(data) - etag = etag.hexdigest() - timestamp = str(normalize_timestamp(time.time())) - os.fsync(fd) - invalidate_hash(os.path.dirname(disk_file.datadir)) - renamer(tmppath, os.path.join(disk_file.datadir, - timestamp + '.data')) - self.auditor.object_audit( - os.path.join(disk_file.datadir, timestamp + '.data'), - 'sda', cur_part) - self.assertEquals(self.auditor.quarantines, pre_quarantines + 1) + self.auditor.object_audit( + os.path.join(disk_file.datadir, timestamp + '.data'), + 'sda', cur_part) + self.assertEquals(self.auditor.quarantines, pre_quarantines + 1) def test_object_audit_bad_args(self): self.auditor = auditor.ObjectAuditor(self.conf) diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py index 64c58ff7ca..22d2fe20a4 100644 --- a/test/unit/obj/test_server.py +++ b/test/unit/obj/test_server.py @@ -23,10 +23,12 @@ from nose import SkipTest from shutil import rmtree from StringIO import StringIO from time import gmtime, sleep, strftime, time +from tempfile import mkdtemp from eventlet import sleep, spawn, wsgi, listen from webob import Request -from xattr import getxattr, setxattr +from test.unit import _getxattr as getxattr +from test.unit import _setxattr as setxattr from test.unit import connect_tcp, readuntil2crlfs from swift.obj import server as object_server @@ -39,17 +41,8 @@ class TestObjectController(unittest.TestCase): def setUp(self): """ Set up for testing swift.object_server.ObjectController """ - self.path_to_test_xfs = os.environ.get('PATH_TO_TEST_XFS') - if not self.path_to_test_xfs or \ - not os.path.exists(self.path_to_test_xfs): - print >> sys.stderr, 'WARNING: PATH_TO_TEST_XFS not set or not ' \ - 'pointing to a valid directory.\n' \ - 'Please set PATH_TO_TEST_XFS to a directory on an XFS file ' \ - 'system for testing.' - self.testdir = '/tmp/SWIFTUNITTEST' - else: - self.testdir = os.path.join(self.path_to_test_xfs, - 'tmp_test_object_server_ObjectController') + self.testdir = \ + os.path.join(mkdtemp(), 'tmp_test_object_server_ObjectController') mkdirs(self.testdir) rmtree(self.testdir) mkdirs(os.path.join(self.testdir, 'sda1')) @@ -60,12 +53,10 @@ class TestObjectController(unittest.TestCase): def tearDown(self): """ Tear down for testing swift.object_server.ObjectController """ - rmtree(self.testdir) + rmtree(os.path.dirname(self.testdir)) def test_POST_update_meta(self): """ Test swift.object_server.ObjectController.POST """ - if not self.path_to_test_xfs: - raise SkipTest timestamp = normalize_timestamp(time()) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': timestamp, @@ -93,8 +84,6 @@ class TestObjectController(unittest.TestCase): self.assertEquals(resp.headers['Content-Type'], 'application/x-test') def test_POST_not_exist(self): - if not self.path_to_test_xfs: - raise SkipTest timestamp = normalize_timestamp(time()) req = Request.blank('/sda1/p/a/c/fail', environ={'REQUEST_METHOD': 'POST'}, @@ -116,8 +105,6 @@ class TestObjectController(unittest.TestCase): self.assertEquals(resp.status_int, 400) def test_POST_container_connection(self): - if not self.path_to_test_xfs: - raise SkipTest def mock_http_connect(response, with_exc=False): @@ -222,8 +209,6 @@ class TestObjectController(unittest.TestCase): self.assertEquals(resp.status_int, 411) def test_PUT_common(self): - if not self.path_to_test_xfs: - raise SkipTest timestamp = normalize_timestamp(time()) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': timestamp, @@ -247,8 +232,6 @@ class TestObjectController(unittest.TestCase): 'name': '/a/c/o'}) def test_PUT_overwrite(self): - if not self.path_to_test_xfs: - raise SkipTest req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': normalize_timestamp(time()), 'Content-Length': '6', @@ -281,8 +264,6 @@ class TestObjectController(unittest.TestCase): 'Content-Encoding': 'gzip'}) def test_PUT_no_etag(self): - if not self.path_to_test_xfs: - raise SkipTest req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': normalize_timestamp(time()), 'Content-Type': 'text/plain'}) @@ -300,8 +281,6 @@ class TestObjectController(unittest.TestCase): self.assertEquals(resp.status_int, 422) def test_PUT_user_metadata(self): - if not self.path_to_test_xfs: - raise SkipTest timestamp = normalize_timestamp(time()) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': timestamp, @@ -329,8 +308,6 @@ class TestObjectController(unittest.TestCase): 'X-Object-Meta-Two': 'Two'}) def test_PUT_container_connection(self): - if not self.path_to_test_xfs: - raise SkipTest def mock_http_connect(response, with_exc=False): @@ -399,8 +376,6 @@ class TestObjectController(unittest.TestCase): def test_HEAD(self): """ Test swift.object_server.ObjectController.HEAD """ - if not self.path_to_test_xfs: - raise SkipTest req = Request.blank('/sda1/p/a/c') resp = self.object_controller.HEAD(req) self.assertEquals(resp.status_int, 400) @@ -466,8 +441,6 @@ class TestObjectController(unittest.TestCase): def test_GET(self): """ Test swift.object_server.ObjectController.GET """ - if not self.path_to_test_xfs: - raise SkipTest req = Request.blank('/sda1/p/a/c') resp = self.object_controller.GET(req) self.assertEquals(resp.status_int, 400) @@ -555,8 +528,6 @@ class TestObjectController(unittest.TestCase): self.assertEquals(resp.status_int, 404) def test_GET_if_match(self): - if not self.path_to_test_xfs: - raise SkipTest req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={ 'X-Timestamp': normalize_timestamp(time()), @@ -610,8 +581,6 @@ class TestObjectController(unittest.TestCase): self.assertEquals(resp.status_int, 412) def test_GET_if_none_match(self): - if not self.path_to_test_xfs: - raise SkipTest req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={ 'X-Timestamp': normalize_timestamp(time()), @@ -661,8 +630,6 @@ class TestObjectController(unittest.TestCase): self.assertEquals(resp.etag, etag) def test_GET_if_modified_since(self): - if not self.path_to_test_xfs: - raise SkipTest timestamp = normalize_timestamp(time()) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={ @@ -698,8 +665,6 @@ class TestObjectController(unittest.TestCase): self.assertEquals(resp.status_int, 304) def test_GET_if_unmodified_since(self): - if not self.path_to_test_xfs: - raise SkipTest timestamp = normalize_timestamp(time()) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={ @@ -737,8 +702,6 @@ class TestObjectController(unittest.TestCase): def test_DELETE(self): """ Test swift.object_server.ObjectController.DELETE """ - if not self.path_to_test_xfs: - raise SkipTest req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE'}) resp = self.object_controller.DELETE(req) @@ -865,8 +828,6 @@ class TestObjectController(unittest.TestCase): self.assertEquals(outbuf.getvalue()[:4], '405 ') def test_chunked_put(self): - if not self.path_to_test_xfs: - raise SkipTest listener = listen(('localhost', 0)) port = listener.getsockname()[1] killer = spawn(wsgi.server, listener, self.object_controller, @@ -891,8 +852,6 @@ class TestObjectController(unittest.TestCase): killer.kill() def test_max_object_name_length(self): - if not self.path_to_test_xfs: - raise SkipTest timestamp = normalize_timestamp(time()) req = Request.blank('/sda1/p/a/c/' + ('1' * 1024), environ={'REQUEST_METHOD': 'PUT'}, @@ -912,8 +871,6 @@ class TestObjectController(unittest.TestCase): self.assertEquals(resp.status_int, 400) def test_disk_file_app_iter_corners(self): - if not self.path_to_test_xfs: - raise SkipTest df = object_server.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o') mkdirs(df.datadir) f = open(os.path.join(df.datadir, @@ -946,8 +903,6 @@ class TestObjectController(unittest.TestCase): self.assert_(os.path.exists(tmpdir)) def test_max_upload_time(self): - if not self.path_to_test_xfs: - raise SkipTest class SlowBody(): @@ -996,8 +951,6 @@ class TestObjectController(unittest.TestCase): self.assertEquals(resp.status_int, 499) def test_bad_sinces(self): - if not self.path_to_test_xfs: - raise SkipTest req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': normalize_timestamp(time()), 'Content-Length': '4', 'Content-Type': 'text/plain'}, @@ -1022,8 +975,6 @@ class TestObjectController(unittest.TestCase): self.assertEquals(resp.status_int, 412) def test_content_encoding(self): - if not self.path_to_test_xfs: - raise SkipTest req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': normalize_timestamp(time()), 'Content-Length': '4', 'Content-Type': 'text/plain', @@ -1042,8 +993,6 @@ class TestObjectController(unittest.TestCase): self.assertEquals(resp.headers['content-encoding'], 'gzip') def test_manifest_header(self): - if not self.path_to_test_xfs: - raise SkipTest timestamp = normalize_timestamp(time()) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': timestamp, diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index e5a4e40652..e991d84084 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -47,11 +47,104 @@ from swift.common import ring from swift.common.constraints import MAX_META_NAME_LENGTH, \ MAX_META_VALUE_LENGTH, MAX_META_COUNT, MAX_META_OVERALL_SIZE, MAX_FILE_SIZE from swift.common.utils import mkdirs, normalize_timestamp, NullLogger +from swift.common.wsgi import monkey_patch_mimetools # mocks logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) +def setup(): + global _testdir, _test_servers, _test_sockets, \ + _orig_container_listing_limit, _test_coros + monkey_patch_mimetools() + # Since we're starting up a lot here, we're going to test more than + # just chunked puts; we're also going to test parts of + # proxy_server.Application we couldn't get to easily otherwise. + _testdir = \ + os.path.join(mkdtemp(), 'tmp_test_proxy_server_chunked') + mkdirs(_testdir) + rmtree(_testdir) + mkdirs(os.path.join(_testdir, 'sda1')) + mkdirs(os.path.join(_testdir, 'sda1', 'tmp')) + mkdirs(os.path.join(_testdir, 'sdb1')) + mkdirs(os.path.join(_testdir, 'sdb1', 'tmp')) + _orig_container_listing_limit = proxy_server.CONTAINER_LISTING_LIMIT + conf = {'devices': _testdir, 'swift_dir': _testdir, + 'mount_check': 'false'} + prolis = listen(('localhost', 0)) + acc1lis = listen(('localhost', 0)) + acc2lis = listen(('localhost', 0)) + con1lis = listen(('localhost', 0)) + con2lis = listen(('localhost', 0)) + obj1lis = listen(('localhost', 0)) + obj2lis = listen(('localhost', 0)) + _test_sockets = \ + (prolis, acc1lis, acc2lis, con2lis, con2lis, obj1lis, obj2lis) + pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]], + [{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1', + 'port': acc1lis.getsockname()[1]}, + {'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1', + 'port': acc2lis.getsockname()[1]}], 30), + GzipFile(os.path.join(_testdir, 'account.ring.gz'), 'wb')) + pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]], + [{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1', + 'port': con1lis.getsockname()[1]}, + {'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1', + 'port': con2lis.getsockname()[1]}], 30), + GzipFile(os.path.join(_testdir, 'container.ring.gz'), 'wb')) + pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]], + [{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1', + 'port': obj1lis.getsockname()[1]}, + {'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1', + 'port': obj2lis.getsockname()[1]}], 30), + GzipFile(os.path.join(_testdir, 'object.ring.gz'), 'wb')) + prosrv = proxy_server.Application(conf, FakeMemcacheReturnsNone()) + acc1srv = account_server.AccountController(conf) + acc2srv = account_server.AccountController(conf) + con1srv = container_server.ContainerController(conf) + con2srv = container_server.ContainerController(conf) + obj1srv = object_server.ObjectController(conf) + obj2srv = object_server.ObjectController(conf) + _test_servers = \ + (prosrv, acc1srv, acc2srv, con2srv, con2srv, obj1srv, obj2srv) + nl = NullLogger() + prospa = spawn(wsgi.server, prolis, prosrv, nl) + acc1spa = spawn(wsgi.server, acc1lis, acc1srv, nl) + acc2spa = spawn(wsgi.server, acc2lis, acc2srv, nl) + con1spa = spawn(wsgi.server, con1lis, con1srv, nl) + con2spa = spawn(wsgi.server, con2lis, con2srv, nl) + obj1spa = spawn(wsgi.server, obj1lis, obj1srv, nl) + obj2spa = spawn(wsgi.server, obj2lis, obj2srv, nl) + _test_coros = \ + (prospa, acc1spa, acc2spa, con2spa, con2spa, obj1spa, obj2spa) + # Create account + ts = normalize_timestamp(time()) + partition, nodes = prosrv.account_ring.get_nodes('a') + for node in nodes: + conn = proxy_server.http_connect(node['ip'], node['port'], + node['device'], partition, 'PUT', '/a', + {'X-Timestamp': ts, 'X-CF-Trans-Id': 'test'}) + resp = conn.getresponse() + assert(resp.status == 201) + # Create container + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('PUT /v1/a/c HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nX-Auth-Token: t\r\n' + 'Content-Length: 0\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 201' + assert(headers[:len(exp)] == exp) + + +def teardown(): + for server in _test_coros: + server.kill() + proxy_server.CONTAINER_LISTING_LIMIT = _orig_container_listing_limit + rmtree(os.path.dirname(_testdir)) + + def fake_http_connect(*code_iter, **kwargs): class FakeConn(object): @@ -187,7 +280,7 @@ class FakeMemcache(object): def delete(self, key): try: del self.store[key] - except: + except Exception: pass return True @@ -426,6 +519,11 @@ class TestObjectController(unittest.TestCase): self.app = proxy_server.Application(None, FakeMemcache(), account_ring=FakeRing(), container_ring=FakeRing(), object_ring=FakeRing()) + monkey_patch_mimetools() + + + def tearDown(self): + proxy_server.CONTAINER_LISTING_LIMIT = _orig_container_listing_limit def assert_status_map(self, method, statuses, expected, raise_exc=False): with save_globals(): @@ -1044,12 +1142,13 @@ class TestObjectController(unittest.TestCase): def test_error_limiting(self): with save_globals(): + proxy_server.shuffle = lambda l: None controller = proxy_server.ObjectController(self.app, 'account', 'container', 'object') self.assert_status_map(controller.HEAD, (503, 200, 200), 200) self.assertEquals(controller.app.object_ring.devs[0]['errors'], 2) self.assert_('last_error' in controller.app.object_ring.devs[0]) - for _ in xrange(self.app.error_suppression_limit): + for _junk in xrange(self.app.error_suppression_limit): self.assert_status_map(controller.HEAD, (503, 503, 503), 503) self.assertEquals(controller.app.object_ring.devs[0]['errors'], self.app.error_suppression_limit + 1) @@ -1534,7 +1633,6 @@ class TestObjectController(unittest.TestCase): self.assertEquals(resp.headers.get('x-object-meta-ours'), 'okay') def test_chunked_put(self): - # quick test of chunked put w/o PATH_TO_TEST_XFS class ChunkedFile(): @@ -1585,576 +1683,527 @@ class TestObjectController(unittest.TestCase): finally: server.MAX_FILE_SIZE = MAX_FILE_SIZE - def test_chunked_put_and_a_bit_more(self): - # Since we're starting up a lot here, we're going to test more than - # just chunked puts; we're also going to test parts of - # proxy_server.Application we couldn't get to easily otherwise. - path_to_test_xfs = os.environ.get('PATH_TO_TEST_XFS') - if not path_to_test_xfs or not os.path.exists(path_to_test_xfs): - print >> sys.stderr, 'WARNING: PATH_TO_TEST_XFS not set or not ' \ - 'pointing to a valid directory.\n' \ - 'Please set PATH_TO_TEST_XFS to a directory on an XFS file ' \ - 'system for testing.' - raise SkipTest - testdir = \ - os.path.join(path_to_test_xfs, 'tmp_test_proxy_server_chunked') - mkdirs(testdir) - rmtree(testdir) - mkdirs(os.path.join(testdir, 'sda1')) - mkdirs(os.path.join(testdir, 'sda1', 'tmp')) - mkdirs(os.path.join(testdir, 'sdb1')) - mkdirs(os.path.join(testdir, 'sdb1', 'tmp')) - try: - orig_container_listing_limit = proxy_server.CONTAINER_LISTING_LIMIT - conf = {'devices': testdir, 'swift_dir': testdir, - 'mount_check': 'false'} - prolis = listen(('localhost', 0)) - acc1lis = listen(('localhost', 0)) - acc2lis = listen(('localhost', 0)) - con1lis = listen(('localhost', 0)) - con2lis = listen(('localhost', 0)) - obj1lis = listen(('localhost', 0)) - obj2lis = listen(('localhost', 0)) - pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]], - [{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1', - 'port': acc1lis.getsockname()[1]}, - {'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1', - 'port': acc2lis.getsockname()[1]}], 30), - GzipFile(os.path.join(testdir, 'account.ring.gz'), 'wb')) - pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]], - [{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1', - 'port': con1lis.getsockname()[1]}, - {'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1', - 'port': con2lis.getsockname()[1]}], 30), - GzipFile(os.path.join(testdir, 'container.ring.gz'), 'wb')) - pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]], - [{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1', - 'port': obj1lis.getsockname()[1]}, - {'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1', - 'port': obj2lis.getsockname()[1]}], 30), - GzipFile(os.path.join(testdir, 'object.ring.gz'), 'wb')) - prosrv = proxy_server.Application(conf, FakeMemcacheReturnsNone()) - acc1srv = account_server.AccountController(conf) - acc2srv = account_server.AccountController(conf) - con1srv = container_server.ContainerController(conf) - con2srv = container_server.ContainerController(conf) - obj1srv = object_server.ObjectController(conf) - obj2srv = object_server.ObjectController(conf) - nl = NullLogger() - prospa = spawn(wsgi.server, prolis, prosrv, nl) - acc1spa = spawn(wsgi.server, acc1lis, acc1srv, nl) - acc2spa = spawn(wsgi.server, acc2lis, acc2srv, nl) - con1spa = spawn(wsgi.server, con1lis, con1srv, nl) - con2spa = spawn(wsgi.server, con2lis, con2srv, nl) - obj1spa = spawn(wsgi.server, obj1lis, obj1srv, nl) - obj2spa = spawn(wsgi.server, obj2lis, obj2srv, nl) - try: - # Check bad version - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v0 HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nContent-Length: 0\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 412' - self.assertEquals(headers[:len(exp)], exp) - # Check bad path - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET invalid HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nContent-Length: 0\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 404' - self.assertEquals(headers[:len(exp)], exp) - # Check invalid utf-8 - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a%80 HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Auth-Token: t\r\n' - 'Content-Length: 0\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 412' - self.assertEquals(headers[:len(exp)], exp) - # Check bad path, no controller - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1 HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Auth-Token: t\r\n' - 'Content-Length: 0\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 412' - self.assertEquals(headers[:len(exp)], exp) - # Check bad method - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('LICK /v1/a HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Auth-Token: t\r\n' - 'Content-Length: 0\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 405' - self.assertEquals(headers[:len(exp)], exp) - # Check unhandled exception - orig_update_request = prosrv.update_request + def test_chunked_put_bad_version(self): + # Check bad version + (prolis, acc1lis, acc2lis, con2lis, con2lis, obj1lis, obj2lis) = \ + _test_sockets + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('GET /v0 HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nContent-Length: 0\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 412' + self.assertEquals(headers[:len(exp)], exp) - def broken_update_request(env, req): - raise Exception('fake') + def test_chunked_put_bad_path(self): + # Check bad path + (prolis, acc1lis, acc2lis, con2lis, con2lis, obj1lis, obj2lis) = \ + _test_sockets + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('GET invalid HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nContent-Length: 0\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 404' + self.assertEquals(headers[:len(exp)], exp) - prosrv.update_request = broken_update_request - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('HEAD /v1/a HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Auth-Token: t\r\n' - 'Content-Length: 0\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 500' - self.assertEquals(headers[:len(exp)], exp) - prosrv.update_request = orig_update_request - # Okay, back to chunked put testing; Create account - ts = normalize_timestamp(time()) - partition, nodes = prosrv.account_ring.get_nodes('a') - for node in nodes: - conn = proxy_server.http_connect(node['ip'], node['port'], - node['device'], partition, 'PUT', '/a', - {'X-Timestamp': ts, 'X-CF-Trans-Id': 'test'}) - resp = conn.getresponse() - self.assertEquals(resp.status, 201) - # Head account, just a double check and really is here to test - # the part Application.log_request that 'enforces' a - # content_length on the response. - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('HEAD /v1/a HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Auth-Token: t\r\n' - 'Content-Length: 0\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 204' - self.assertEquals(headers[:len(exp)], exp) - self.assert_('\r\nContent-Length: 0\r\n' in headers) - # Create container - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('PUT /v1/a/c HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Auth-Token: t\r\n' - 'Content-Length: 0\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 201' - self.assertEquals(headers[:len(exp)], exp) - # GET account with a query string to test that - # Application.log_request logs the query string. Also, throws - # in a test for logging x-forwarded-for (first entry only). + def test_chunked_put_bad_utf8(self): + # Check invalid utf-8 + (prolis, acc1lis, acc2lis, con2lis, con2lis, obj1lis, obj2lis) = \ + _test_sockets + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('GET /v1/a%80 HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nX-Auth-Token: t\r\n' + 'Content-Length: 0\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 412' + self.assertEquals(headers[:len(exp)], exp) - class Logger(object): + def test_chunked_put_bad_path_no_controller(self): + # Check bad path, no controller + (prolis, acc1lis, acc2lis, con2lis, con2lis, obj1lis, obj2lis) = \ + _test_sockets + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('GET /v1 HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nX-Auth-Token: t\r\n' + 'Content-Length: 0\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 412' + self.assertEquals(headers[:len(exp)], exp) - def info(self, msg): - self.msg = msg + def test_chunked_put_bad_method(self): + # Check bad method + (prolis, acc1lis, acc2lis, con2lis, con2lis, obj1lis, obj2lis) = \ + _test_sockets + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('LICK /v1/a HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nX-Auth-Token: t\r\n' + 'Content-Length: 0\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 405' + self.assertEquals(headers[:len(exp)], exp) - orig_logger = prosrv.logger - prosrv.logger = Logger() - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write( - 'GET /v1/a?format=json HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Auth-Token: t\r\n' - 'Content-Length: 0\r\nX-Forwarded-For: host1, host2\r\n' - '\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) - self.assert_('/v1/a%3Fformat%3Djson' in prosrv.logger.msg, - prosrv.logger.msg) - exp = 'host1' - self.assertEquals(prosrv.logger.msg[:len(exp)], exp) - prosrv.logger = orig_logger - # Turn on header logging. + def test_chunked_put_unhandled_exception(self): + # Check unhandled exception + (prosrv, acc1srv, acc2srv, con2srv, con2srv, obj1srv, obj2srv) = \ + _test_servers + (prolis, acc1lis, acc2lis, con2lis, con2lis, obj1lis, obj2lis) = \ + _test_sockets + orig_update_request = prosrv.update_request - class Logger(object): + def broken_update_request(env, req): + raise Exception('fake') - def info(self, msg): - self.msg = msg + prosrv.update_request = broken_update_request + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('HEAD /v1/a HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nX-Auth-Token: t\r\n' + 'Content-Length: 0\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 500' + self.assertEquals(headers[:len(exp)], exp) + prosrv.update_request = orig_update_request - orig_logger = prosrv.logger - prosrv.logger = Logger() - prosrv.log_headers = True - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Auth-Token: t\r\n' - 'Content-Length: 0\r\nGoofy-Header: True\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) - self.assert_('Goofy-Header%3A%20True' in prosrv.logger.msg, - prosrv.logger.msg) - prosrv.log_headers = False - prosrv.logger = orig_logger - # Test UTF-8 Unicode all the way through the system - ustr = '\xe1\xbc\xb8\xce\xbf\xe1\xbd\xba \xe1\xbc\xb0\xce' \ - '\xbf\xe1\xbd\xbb\xce\x87 \xcf\x84\xe1\xbd\xb0 \xcf' \ - '\x80\xe1\xbd\xb1\xce\xbd\xcf\x84\xca\xbc \xe1\xbc' \ - '\x82\xce\xbd \xe1\xbc\x90\xce\xbe\xe1\xbd\xb5\xce' \ - '\xba\xce\xbf\xce\xb9 \xcf\x83\xce\xb1\xcf\x86\xe1' \ - '\xbf\x86.Test' - ustr_short = '\xe1\xbc\xb8\xce\xbf\xe1\xbd\xbatest' - # Create ustr container - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('PUT /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Storage-Token: t\r\n' - 'Content-Length: 0\r\n\r\n' % quote(ustr)) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 201' - self.assertEquals(headers[:len(exp)], exp) - # List account with ustr container (test plain) - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Storage-Token: t\r\n' - 'Content-Length: 0\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) - containers = fd.read().split('\n') - self.assert_(ustr in containers) - # List account with ustr container (test json) - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a?format=json HTTP/1.1\r\n' - 'Host: localhost\r\nConnection: close\r\n' - 'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) - listing = simplejson.loads(fd.read()) - self.assertEquals(listing[1]['name'], ustr.decode('utf8')) - # List account with ustr container (test xml) - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a?format=xml HTTP/1.1\r\n' - 'Host: localhost\r\nConnection: close\r\n' - 'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) - self.assert_('%s' % ustr in fd.read()) - # Create ustr object with ustr metadata in ustr container - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Storage-Token: t\r\n' - 'X-Object-Meta-%s: %s\r\nContent-Length: 0\r\n\r\n' % - (quote(ustr), quote(ustr), quote(ustr_short), - quote(ustr))) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 201' - self.assertEquals(headers[:len(exp)], exp) - # List ustr container with ustr object (test plain) - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Storage-Token: t\r\n' - 'Content-Length: 0\r\n\r\n' % quote(ustr)) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) - objects = fd.read().split('\n') - self.assert_(ustr in objects) - # List ustr container with ustr object (test json) - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a/%s?format=json HTTP/1.1\r\n' - 'Host: localhost\r\nConnection: close\r\n' - 'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n' % - quote(ustr)) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) - listing = simplejson.loads(fd.read()) - self.assertEquals(listing[0]['name'], ustr.decode('utf8')) - # List ustr container with ustr object (test xml) - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a/%s?format=xml HTTP/1.1\r\n' - 'Host: localhost\r\nConnection: close\r\n' - 'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n' % - quote(ustr)) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) - self.assert_('%s' % ustr in fd.read()) - # Retrieve ustr object with ustr metadata - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Storage-Token: t\r\n' - 'Content-Length: 0\r\n\r\n' % - (quote(ustr), quote(ustr))) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) - self.assert_('\r\nX-Object-Meta-%s: %s\r\n' % - (quote(ustr_short).lower(), quote(ustr)) in headers) - # Do chunked object put - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - # Also happens to assert that x-storage-token is taken as a - # replacement for x-auth-token. - fd.write('PUT /v1/a/c/o/chunky HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Storage-Token: t\r\n' - 'Transfer-Encoding: chunked\r\n\r\n' - '2\r\noh\r\n4\r\n hai\r\nf\r\n123456789abcdef\r\n' - '0\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 201' - self.assertEquals(headers[:len(exp)], exp) - # Ensure we get what we put - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a/c/o/chunky HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Auth-Token: t\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) - body = fd.read() - self.assertEquals(body, 'oh hai123456789abcdef') - # Create a container for our segmented/manifest object testing - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('PUT /v1/a/segmented HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Storage-Token: t\r\n' - 'Content-Length: 0\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 201' - self.assertEquals(headers[:len(exp)], exp) - # Create the object segments - for segment in xrange(5): - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('PUT /v1/a/segmented/name/%s HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Storage-Token: ' - 't\r\nContent-Length: 5\r\n\r\n1234 ' % str(segment)) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 201' - self.assertEquals(headers[:len(exp)], exp) - # Create the object manifest file - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('PUT /v1/a/segmented/name HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Storage-Token: ' - 't\r\nContent-Length: 0\r\nX-Object-Manifest: ' - 'segmented/name/\r\nContent-Type: text/jibberish\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 201' - self.assertEquals(headers[:len(exp)], exp) - # Ensure retrieving the manifest file gets the whole object - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a/segmented/name HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Auth-Token: ' - 't\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) - self.assert_('X-Object-Manifest: segmented/name/' in headers) - self.assert_('Content-Type: text/jibberish' in headers) - body = fd.read() - self.assertEquals(body, '1234 1234 1234 1234 1234 ') - # Do it again but exceeding the container listing limit - proxy_server.CONTAINER_LISTING_LIMIT = 2 - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a/segmented/name HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Auth-Token: ' - 't\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) - self.assert_('X-Object-Manifest: segmented/name/' in headers) - self.assert_('Content-Type: text/jibberish' in headers) - body = fd.read() - # A bit fragile of a test; as it makes the assumption that all - # will be sent in a single chunk. - self.assertEquals(body, - '19\r\n1234 1234 1234 1234 1234 \r\n0\r\n\r\n') - # Make a copy of the manifested object, which should - # error since the number of segments exceeds - # CONTAINER_LISTING_LIMIT. - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('PUT /v1/a/segmented/copy HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Auth-Token: ' - 't\r\nX-Copy-From: segmented/name\r\nContent-Length: ' - '0\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 413' - self.assertEquals(headers[:len(exp)], exp) - body = fd.read() - # After adjusting the CONTAINER_LISTING_LIMIT, make a copy of - # the manifested object which should consolidate the segments. - proxy_server.CONTAINER_LISTING_LIMIT = 10000 - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('PUT /v1/a/segmented/copy HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Auth-Token: ' - 't\r\nX-Copy-From: segmented/name\r\nContent-Length: ' - '0\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 201' - self.assertEquals(headers[:len(exp)], exp) - body = fd.read() - # Retrieve and validate the copy. - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a/segmented/copy HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Auth-Token: ' - 't\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) - self.assert_('x-object-manifest:' not in headers.lower()) - self.assert_('Content-Length: 25\r' in headers) - body = fd.read() - self.assertEquals(body, '1234 1234 1234 1234 1234 ') - # Create an object manifest file pointing to nothing - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('PUT /v1/a/segmented/empty HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Storage-Token: ' - 't\r\nContent-Length: 0\r\nX-Object-Manifest: ' - 'segmented/empty/\r\nContent-Type: text/jibberish\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 201' - self.assertEquals(headers[:len(exp)], exp) - # Ensure retrieving the manifest file gives a zero-byte file - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a/segmented/empty HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Auth-Token: ' - 't\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) - self.assert_('X-Object-Manifest: segmented/empty/' in headers) - self.assert_('Content-Type: text/jibberish' in headers) - body = fd.read() - self.assertEquals(body, '') - # Check copy content type - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('PUT /v1/a/c/obj HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Storage-Token: ' - 't\r\nContent-Length: 0\r\nContent-Type: text/jibberish' - '\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 201' - self.assertEquals(headers[:len(exp)], exp) - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('PUT /v1/a/c/obj2 HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Storage-Token: ' - 't\r\nContent-Length: 0\r\nX-Copy-From: c/obj\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 201' - self.assertEquals(headers[:len(exp)], exp) - # Ensure getting the copied file gets original content-type - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a/c/obj2 HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Auth-Token: ' - 't\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) - self.assert_('Content-Type: text/jibberish' in headers) - # Check set content type - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('PUT /v1/a/c/obj3 HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Storage-Token: ' - 't\r\nContent-Length: 0\r\nContent-Type: foo/bar' - '\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 201' - self.assertEquals(headers[:len(exp)], exp) - # Ensure getting the copied file gets original content-type - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a/c/obj3 HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Auth-Token: ' - 't\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) - self.assert_('Content-Type: foo/bar' in - headers.split('\r\n'), repr(headers.split('\r\n'))) - # Check set content type with charset - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('PUT /v1/a/c/obj4 HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Storage-Token: ' - 't\r\nContent-Length: 0\r\nContent-Type: foo/bar' - '; charset=UTF-8\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 201' - self.assertEquals(headers[:len(exp)], exp) - # Ensure getting the copied file gets original content-type - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a/c/obj4 HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Auth-Token: ' - 't\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) - self.assert_('Content-Type: foo/bar; charset=UTF-8' in - headers.split('\r\n'), repr(headers.split('\r\n'))) - finally: - prospa.kill() - acc1spa.kill() - acc2spa.kill() - con1spa.kill() - con2spa.kill() - obj1spa.kill() - obj2spa.kill() - finally: - proxy_server.CONTAINER_LISTING_LIMIT = orig_container_listing_limit - rmtree(testdir) + def test_chunked_put_head_account(self): + # Head account, just a double check and really is here to test + # the part Application.log_request that 'enforces' a + # content_length on the response. + (prolis, acc1lis, acc2lis, con2lis, con2lis, obj1lis, obj2lis) = \ + _test_sockets + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('HEAD /v1/a HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nX-Auth-Token: t\r\n' + 'Content-Length: 0\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 204' + self.assertEquals(headers[:len(exp)], exp) + self.assert_('\r\nContent-Length: 0\r\n' in headers) + + def test_chunked_put_logging(self): + # GET account with a query string to test that + # Application.log_request logs the query string. Also, throws + # in a test for logging x-forwarded-for (first entry only). + (prosrv, acc1srv, acc2srv, con2srv, con2srv, obj1srv, obj2srv) = \ + _test_servers + (prolis, acc1lis, acc2lis, con2lis, con2lis, obj1lis, obj2lis) = \ + _test_sockets + + class Logger(object): + + def info(self, msg): + self.msg = msg + + orig_logger = prosrv.logger + prosrv.logger = Logger() + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write( + 'GET /v1/a?format=json HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nX-Auth-Token: t\r\n' + 'Content-Length: 0\r\nX-Forwarded-For: host1, host2\r\n' + '\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 200' + self.assertEquals(headers[:len(exp)], exp) + self.assert_('/v1/a%3Fformat%3Djson' in prosrv.logger.msg, + prosrv.logger.msg) + exp = 'host1' + self.assertEquals(prosrv.logger.msg[:len(exp)], exp) + prosrv.logger = orig_logger + # Turn on header logging. + + orig_logger = prosrv.logger + prosrv.logger = Logger() + prosrv.log_headers = True + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('GET /v1/a HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nX-Auth-Token: t\r\n' + 'Content-Length: 0\r\nGoofy-Header: True\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 200' + self.assertEquals(headers[:len(exp)], exp) + self.assert_('Goofy-Header%3A%20True' in prosrv.logger.msg, + prosrv.logger.msg) + prosrv.log_headers = False + prosrv.logger = orig_logger + + def test_chunked_put_utf8_all_the_way_down(self): + # Test UTF-8 Unicode all the way through the system + (prolis, acc1lis, acc2lis, con2lis, con2lis, obj1lis, obj2lis) = \ + _test_sockets + ustr = '\xe1\xbc\xb8\xce\xbf\xe1\xbd\xba \xe1\xbc\xb0\xce' \ + '\xbf\xe1\xbd\xbb\xce\x87 \xcf\x84\xe1\xbd\xb0 \xcf' \ + '\x80\xe1\xbd\xb1\xce\xbd\xcf\x84\xca\xbc \xe1\xbc' \ + '\x82\xce\xbd \xe1\xbc\x90\xce\xbe\xe1\xbd\xb5\xce' \ + '\xba\xce\xbf\xce\xb9 \xcf\x83\xce\xb1\xcf\x86\xe1' \ + '\xbf\x86.Test' + ustr_short = '\xe1\xbc\xb8\xce\xbf\xe1\xbd\xbatest' + # Create ustr container + (prolis, acc1lis, acc2lis, con2lis, con2lis, obj1lis, obj2lis) = \ + _test_sockets + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('PUT /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nX-Storage-Token: t\r\n' + 'Content-Length: 0\r\n\r\n' % quote(ustr)) + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 201' + self.assertEquals(headers[:len(exp)], exp) + # List account with ustr container (test plain) + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('GET /v1/a HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nX-Storage-Token: t\r\n' + 'Content-Length: 0\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 200' + self.assertEquals(headers[:len(exp)], exp) + containers = fd.read().split('\n') + self.assert_(ustr in containers) + # List account with ustr container (test json) + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('GET /v1/a?format=json HTTP/1.1\r\n' + 'Host: localhost\r\nConnection: close\r\n' + 'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 200' + self.assertEquals(headers[:len(exp)], exp) + listing = simplejson.loads(fd.read()) + self.assert_(ustr.decode('utf8') in [l['name'] for l in listing]) + # List account with ustr container (test xml) + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('GET /v1/a?format=xml HTTP/1.1\r\n' + 'Host: localhost\r\nConnection: close\r\n' + 'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 200' + self.assertEquals(headers[:len(exp)], exp) + self.assert_('%s' % ustr in fd.read()) + # Create ustr object with ustr metadata in ustr container + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nX-Storage-Token: t\r\n' + 'X-Object-Meta-%s: %s\r\nContent-Length: 0\r\n\r\n' % + (quote(ustr), quote(ustr), quote(ustr_short), + quote(ustr))) + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 201' + self.assertEquals(headers[:len(exp)], exp) + # List ustr container with ustr object (test plain) + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('GET /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nX-Storage-Token: t\r\n' + 'Content-Length: 0\r\n\r\n' % quote(ustr)) + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 200' + self.assertEquals(headers[:len(exp)], exp) + objects = fd.read().split('\n') + self.assert_(ustr in objects) + # List ustr container with ustr object (test json) + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('GET /v1/a/%s?format=json HTTP/1.1\r\n' + 'Host: localhost\r\nConnection: close\r\n' + 'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n' % + quote(ustr)) + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 200' + self.assertEquals(headers[:len(exp)], exp) + listing = simplejson.loads(fd.read()) + self.assertEquals(listing[0]['name'], ustr.decode('utf8')) + # List ustr container with ustr object (test xml) + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('GET /v1/a/%s?format=xml HTTP/1.1\r\n' + 'Host: localhost\r\nConnection: close\r\n' + 'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n' % + quote(ustr)) + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 200' + self.assertEquals(headers[:len(exp)], exp) + self.assert_('%s' % ustr in fd.read()) + # Retrieve ustr object with ustr metadata + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('GET /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nX-Storage-Token: t\r\n' + 'Content-Length: 0\r\n\r\n' % + (quote(ustr), quote(ustr))) + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 200' + self.assertEquals(headers[:len(exp)], exp) + self.assert_('\r\nX-Object-Meta-%s: %s\r\n' % + (quote(ustr_short).lower(), quote(ustr)) in headers) + + def test_chunked_put_chunked_put(self): + # Do chunked object put + (prolis, acc1lis, acc2lis, con2lis, con2lis, obj1lis, obj2lis) = \ + _test_sockets + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + # Also happens to assert that x-storage-token is taken as a + # replacement for x-auth-token. + fd.write('PUT /v1/a/c/o/chunky HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nX-Storage-Token: t\r\n' + 'Transfer-Encoding: chunked\r\n\r\n' + '2\r\noh\r\n4\r\n hai\r\nf\r\n123456789abcdef\r\n' + '0\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 201' + self.assertEquals(headers[:len(exp)], exp) + # Ensure we get what we put + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('GET /v1/a/c/o/chunky HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nX-Auth-Token: t\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 200' + self.assertEquals(headers[:len(exp)], exp) + body = fd.read() + self.assertEquals(body, 'oh hai123456789abcdef') + + def test_chunked_put_lobjects(self): + # Create a container for our segmented/manifest object testing + (prolis, acc1lis, acc2lis, con2lis, con2lis, obj1lis, obj2lis) = \ + _test_sockets + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('PUT /v1/a/segmented HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nX-Storage-Token: t\r\n' + 'Content-Length: 0\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 201' + self.assertEquals(headers[:len(exp)], exp) + # Create the object segments + for segment in xrange(5): + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('PUT /v1/a/segmented/name/%s HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Storage-Token: ' + 't\r\nContent-Length: 5\r\n\r\n1234 ' % str(segment)) + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 201' + self.assertEquals(headers[:len(exp)], exp) + # Create the object manifest file + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('PUT /v1/a/segmented/name HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Storage-Token: ' + 't\r\nContent-Length: 0\r\nX-Object-Manifest: ' + 'segmented/name/\r\nContent-Type: text/jibberish\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 201' + self.assertEquals(headers[:len(exp)], exp) + # Ensure retrieving the manifest file gets the whole object + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('GET /v1/a/segmented/name HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Auth-Token: ' + 't\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 200' + self.assertEquals(headers[:len(exp)], exp) + self.assert_('X-Object-Manifest: segmented/name/' in headers) + self.assert_('Content-Type: text/jibberish' in headers) + body = fd.read() + self.assertEquals(body, '1234 1234 1234 1234 1234 ') + # Do it again but exceeding the container listing limit + proxy_server.CONTAINER_LISTING_LIMIT = 2 + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('GET /v1/a/segmented/name HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Auth-Token: ' + 't\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 200' + self.assertEquals(headers[:len(exp)], exp) + self.assert_('X-Object-Manifest: segmented/name/' in headers) + self.assert_('Content-Type: text/jibberish' in headers) + body = fd.read() + # A bit fragile of a test; as it makes the assumption that all + # will be sent in a single chunk. + self.assertEquals(body, + '19\r\n1234 1234 1234 1234 1234 \r\n0\r\n\r\n') + # Make a copy of the manifested object, which should + # error since the number of segments exceeds + # CONTAINER_LISTING_LIMIT. + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('PUT /v1/a/segmented/copy HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Auth-Token: ' + 't\r\nX-Copy-From: segmented/name\r\nContent-Length: ' + '0\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 413' + self.assertEquals(headers[:len(exp)], exp) + body = fd.read() + # After adjusting the CONTAINER_LISTING_LIMIT, make a copy of + # the manifested object which should consolidate the segments. + proxy_server.CONTAINER_LISTING_LIMIT = 10000 + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('PUT /v1/a/segmented/copy HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Auth-Token: ' + 't\r\nX-Copy-From: segmented/name\r\nContent-Length: ' + '0\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 201' + self.assertEquals(headers[:len(exp)], exp) + body = fd.read() + # Retrieve and validate the copy. + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('GET /v1/a/segmented/copy HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Auth-Token: ' + 't\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 200' + self.assertEquals(headers[:len(exp)], exp) + self.assert_('x-object-manifest:' not in headers.lower()) + self.assert_('Content-Length: 25\r' in headers) + body = fd.read() + self.assertEquals(body, '1234 1234 1234 1234 1234 ') + # Create an object manifest file pointing to nothing + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('PUT /v1/a/segmented/empty HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Storage-Token: ' + 't\r\nContent-Length: 0\r\nX-Object-Manifest: ' + 'segmented/empty/\r\nContent-Type: text/jibberish\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 201' + self.assertEquals(headers[:len(exp)], exp) + # Ensure retrieving the manifest file gives a zero-byte file + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('GET /v1/a/segmented/empty HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Auth-Token: ' + 't\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 200' + self.assertEquals(headers[:len(exp)], exp) + self.assert_('X-Object-Manifest: segmented/empty/' in headers) + self.assert_('Content-Type: text/jibberish' in headers) + body = fd.read() + self.assertEquals(body, '') + # Check copy content type + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('PUT /v1/a/c/obj HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Storage-Token: ' + 't\r\nContent-Length: 0\r\nContent-Type: text/jibberish' + '\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 201' + self.assertEquals(headers[:len(exp)], exp) + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('PUT /v1/a/c/obj2 HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Storage-Token: ' + 't\r\nContent-Length: 0\r\nX-Copy-From: c/obj\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 201' + self.assertEquals(headers[:len(exp)], exp) + # Ensure getting the copied file gets original content-type + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('GET /v1/a/c/obj2 HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Auth-Token: ' + 't\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 200' + self.assertEquals(headers[:len(exp)], exp) + print headers + self.assert_('Content-Type: text/jibberish' in headers) + # Check set content type + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('PUT /v1/a/c/obj3 HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Storage-Token: ' + 't\r\nContent-Length: 0\r\nContent-Type: foo/bar' + '\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 201' + self.assertEquals(headers[:len(exp)], exp) + # Ensure getting the copied file gets original content-type + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('GET /v1/a/c/obj3 HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Auth-Token: ' + 't\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 200' + self.assertEquals(headers[:len(exp)], exp) + self.assert_('Content-Type: foo/bar' in + headers.split('\r\n'), repr(headers.split('\r\n'))) + # Check set content type with charset + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('PUT /v1/a/c/obj4 HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Storage-Token: ' + 't\r\nContent-Length: 0\r\nContent-Type: foo/bar' + '; charset=UTF-8\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 201' + self.assertEquals(headers[:len(exp)], exp) + # Ensure getting the copied file gets original content-type + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('GET /v1/a/c/obj4 HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Auth-Token: ' + 't\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 200' + self.assertEquals(headers[:len(exp)], exp) + self.assert_('Content-Type: foo/bar; charset=UTF-8' in + headers.split('\r\n'), repr(headers.split('\r\n'))) def test_mismatched_etags(self): with save_globals(): @@ -2534,7 +2583,7 @@ class TestContainerController(unittest.TestCase): self.assertEquals( controller.app.container_ring.devs[0]['errors'], 2) self.assert_('last_error' in controller.app.container_ring.devs[0]) - for _ in xrange(self.app.error_suppression_limit): + for _junk in xrange(self.app.error_suppression_limit): self.assert_status_map(controller.HEAD, (200, 503, 503, 503), 503) self.assertEquals(controller.app.container_ring.devs[0]['errors'], @@ -3376,4 +3425,8 @@ class TestSegmentedIterable(unittest.TestCase): if __name__ == '__main__': - unittest.main() + setup() + try: + unittest.main() + finally: + teardown() diff --git a/test/unit/stats/test_access_processor.py b/test/unit/stats/test_access_processor.py index 7317c365aa..2e431ac334 100644 --- a/test/unit/stats/test_access_processor.py +++ b/test/unit/stats/test_access_processor.py @@ -21,8 +21,49 @@ from swift.stats import access_processor class TestAccessProcessor(unittest.TestCase): - def test_placeholder(self): - pass + def test_log_line_parser_field_count(self): + p = access_processor.AccessLogProcessor({}) + # too few fields + log_line = [str(x) for x in range(17)] + log_line[1] = 'proxy-server' + log_line[4] = '1/Jan/3/4/5/6' + log_line[6] = '/v1/a/c/o' + log_line = 'x'*16 + ' '.join(log_line) + res = p.log_line_parser(log_line) + expected = {} + self.assertEquals(res, expected) + # right amount of fields + log_line = [str(x) for x in range(18)] + log_line[1] = 'proxy-server' + log_line[4] = '1/Jan/3/4/5/6' + log_line[6] = '/v1/a/c/o' + log_line = 'x'*16 + ' '.join(log_line) + res = p.log_line_parser(log_line) + expected = {'code': 8, 'processing_time': '17', 'auth_token': '11', + 'month': '01', 'second': '6', 'year': '3', 'tz': '+0000', + 'http_version': '7', 'object_name': 'o', 'etag': '14', + 'method': '5', 'trans_id': '15', 'client_ip': '2', + 'bytes_out': 13, 'container_name': 'c', 'day': '1', + 'minute': '5', 'account': 'a', 'hour': '4', + 'referrer': '9', 'request': '/v1/a/c/o', + 'user_agent': '10', 'bytes_in': 12, 'lb_ip': '3'} + self.assertEquals(res, expected) + # too many fields + log_line = [str(x) for x in range(19)] + log_line[1] = 'proxy-server' + log_line[4] = '1/Jan/3/4/5/6' + log_line[6] = '/v1/a/c/o' + log_line = 'x'*16 + ' '.join(log_line) + res = p.log_line_parser(log_line) + expected = {'code': 8, 'processing_time': '17', 'auth_token': '11', + 'month': '01', 'second': '6', 'year': '3', 'tz': '+0000', + 'http_version': '7', 'object_name': 'o', 'etag': '14', + 'method': '5', 'trans_id': '15', 'client_ip': '2', + 'bytes_out': 13, 'container_name': 'c', 'day': '1', + 'minute': '5', 'account': 'a', 'hour': '4', + 'referrer': '9', 'request': '/v1/a/c/o', + 'user_agent': '10', 'bytes_in': 12, 'lb_ip': '3'} + self.assertEquals(res, expected) if __name__ == '__main__':