diff --git a/.mailmap b/.mailmap index 4a6368f24a..717b0ec369 100644 --- a/.mailmap +++ b/.mailmap @@ -78,3 +78,6 @@ Jaivish Kothari Kazuhiro Miyahara Alexandra Settle +Kenichiro Matsuda +Atsushi Sakai +Takashi Natsume diff --git a/AUTHORS b/AUTHORS index ebde31a39a..29bc166604 100644 --- a/AUTHORS +++ b/AUTHORS @@ -26,6 +26,7 @@ Chuck Thier (cthier@gmail.com) Contributors ------------ Mehdi Abaakouk (mehdi.abaakouk@enovance.com) +Timur Alperovich (timur.alperovich@gmail.com) Jesse Andrews (anotherjesse@gmail.com) Joe Arnold (joe@swiftstack.com) Ionuț Arțăriși (iartarisi@suse.cz) @@ -47,6 +48,7 @@ Tim Burke (tim.burke@gmail.com) Brian D. Burns (iosctr@gmail.com) Devin Carlen (devin.carlen@gmail.com) Thierry Carrez (thierry@openstack.org) +Carlos Cavanna (ccavanna@ca.ibm.com) Emmanuel Cazenave (contact@emcaz.fr) Mahati Chamarthy (mahati.chamarthy@gmail.com) Zap Chang (zapchang@gmail.com) @@ -55,6 +57,7 @@ Ray Chen (oldsharp@163.com) Harshit Chitalia (harshit@acelio.com) Brian Cline (bcline@softlayer.com) Alistair Coles (alistair.coles@hp.com) +Clément Contini (ccontini@cloudops.com) Brian Curtin (brian.curtin@rackspace.com) Thiago da Silva (thiago@redhat.com) Julien Danjou (julien@danjou.info) @@ -64,6 +67,7 @@ Cedric Dos Santos (cedric.dos.sant@gmail.com) Gerry Drudy (gerry.drudy@hp.com) Morgan Fainberg (morgan.fainberg@gmail.com) ZhiQiang Fan (aji.zqfan@gmail.com) +Oshrit Feder (oshritf@il.ibm.com) Mike Fedosin (mfedosin@mirantis.com) Ricardo Ferreira (ricardo.sff@gmail.com) Flaper Fesp (flaper87@gmail.com) @@ -91,8 +95,10 @@ Dan Hersam (dan.hersam@hp.com) Derek Higgins (derekh@redhat.com) Alex Holden (alex@alexjonasholden.com) Edward Hope-Morley (opentastic@gmail.com) +Charles Hsu (charles0126@gmail.com) Joanna H. Huang (joanna.huitzu.huang@gmail.com) Kun Huang (gareth@unitedstack.com) +Bill Huber (wbhuber@us.ibm.com) Matthieu Huin (mhu@enovance.com) Hodong Hwang (hodong.hwang@kt.com) Motonobu Ichimura (motonobu@gmail.com) @@ -126,6 +132,7 @@ John Leach (john@johnleach.co.uk) Ed Leafe (ed.leafe@rackspace.com) Thomas Leaman (thomas.leaman@hp.com) Eohyung Lee (liquidnuker@gmail.com) +Zhao Lei (zhaolei@cn.fujitsu.com) Jamie Lennox (jlennox@redhat.com) Tong Li (litong01@us.ibm.com) Changbin Liu (changbin.liu@gmail.com) @@ -136,10 +143,12 @@ Zhongyue Luo (zhongyue.nah@intel.com) Paul Luse (paul.e.luse@intel.com) Christopher MacGown (chris@pistoncloud.com) Dragos Manolescu (dragosm@hp.com) +Ben Martin (blmartin@us.ibm.com) Steve Martinelli (stevemar@ca.ibm.com) Juan J. Martinez (juan@memset.com) Marcelo Martins (btorch@gmail.com) Dolph Mathews (dolph.mathews@gmail.com) +Kenichiro Matsuda (matsuda_kenichi@jp.fujitsu.com) Michael Matur (michael.matur@gmail.com) Donagh McCabe (donagh.mccabe@hp.com) Andy McCrae (andy.mccrae@gmail.com) @@ -151,11 +160,13 @@ Jola Mirecka (jola.mirecka@hp.com) Kazuhiro Miyahara (miyahara.kazuhiro@lab.ntt.co.jp) Daisuke Morita (morita.daisuke@lab.ntt.co.jp) Dirk Mueller (dirk@dmllr.de) +Takashi Natsume (natsume.takashi@lab.ntt.co.jp) Russ Nelson (russ@crynwr.com) Maru Newby (mnewby@internap.com) Newptone (xingchao@unitedstack.com) Colin Nicholson (colin.nicholson@iomart.com) Zhenguo Niu (zhenguo@unitedstack.com) +Ondrej Novy (ondrej.novy@firma.seznam.cz) Timothy Okwii (tokwii@cisco.com) Matthew Oliver (matt@oliver.net.au) Hisashi Osanai (osanai.hisashi@jp.fujitsu.com) @@ -169,18 +180,24 @@ Constantine Peresypkin (constantine.peresypk@rackspace.com) Dieter Plaetinck (dieter@vimeo.com) Dan Prince (dprince@redhat.com) Sarvesh Ranjan (saranjan@cisco.com) +Falk Reimann (falk.reimann@sap.com) +Brian Reitz (brian.reitz@oracle.com) Felipe Reyes (freyes@tty.cl) Janie Richling (jrichli@us.ibm.com) Matt Riedemann (mriedem@us.ibm.com) Li Riqiang (lrqrun@gmail.com) Rafael Rivero (rafael@cloudscaling.com) Victor Rodionov (victor.rodionov@nexenta.com) +Eran Rom (eranr@il.ibm.com) Aaron Rosen (arosen@nicira.com) Brent Roskos (broskos@internap.com) +Hamdi Roumani (roumani@ca.ibm.com) Shilla Saebi (shilla.saebi@gmail.com) +Atsushi Sakai (sakaia@jp.fujitsu.com) Cristian A Sanchez (cristian.a.sanchez@intel.com) Christian Schwede (cschwede@redhat.com) Mark Seger (Mark.Seger@hp.com) +Azhagu Selvan SP (tamizhgeek@gmail.com) Alexandra Settle (alexandra.settle@rackspace.com) Andrew Clay Shafer (acs@parvuscaptus.com) Mitsuhiro SHIGEMATSU (shigematsu.mitsuhiro@lab.ntt.co.jp) @@ -198,6 +215,7 @@ Jeremy Stanley (fungi@yuggoth.org) Mauro Stettler (mauro.stettler@gmail.com) Tobias Stevenson (tstevenson@vbridges.com) Victor Stinner (vstinner@redhat.com) +Akihito Takai (takaiak@nttdata.co.jp) Pearl Yajing Tan (pearl.y.tan@seagate.com) Yuriy Taraday (yorik.sar@gmail.com) Monty Taylor (mordred@inaugust.com) @@ -231,5 +249,6 @@ Guang Yee (guang.yee@hp.com) Pete Zaitcev (zaitcev@kotori.zaitcev.us) Hua Zhang (zhuadl@cn.ibm.com) Jian Zhang (jian.zhang@intel.com) +Kai Zhang (zakir.exe@gmail.com) Ning Zhang (ning@zmanda.com) Yuan Zhou (yuan.zhou@intel.com) diff --git a/CHANGELOG b/CHANGELOG index 1e7bd5ff36..f4ce9fcf35 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,4 +1,133 @@ -swift (2.3.0) +swift (2.4.0) + + * Dependency changes + + - Added six requirement. This is part of an ongoing effort to add + support for Python 3. + + - Dropped support for Python 2.6. + + * Config changes + + - Recent versions of Python restrict the number of headers allowed in a + request to 100. This number may be too low for custom middleware. The + new "extra_header_count" config value in swift.conf can be used to + increase the number of headers allowed. + + - Renamed "run_pause" setting to "interval" (current configs with + run_pause still work). Future versions of Swift may remove the + run_pause setting. + + * Versioned writes middleware + + The versioned writes feature has been refactored and reimplemented as + middleware. You should explicitly add the versioned_writes middleware to + your proxy pipeline, but do not remove or disable the existing container + server config setting ("allow_versions"), if it is currently enabled. + The existing container server config setting enables existing + containers to continue being versioned. Please see + http://swift.openstack.org/middleware.html#how-to-enable-object-versioning-in-a-swift-cluster + for further upgrade notes. + + * Allow 1+ object-servers-per-disk deployment + + Enabled by a new > 0 integer config value, "servers_per_port" in the + [DEFAULT] config section for object-server and/or replication server + configs. The setting's integer value determines how many different + object-server workers handle requests for any single unique local port + in the ring. In this mode, the parent swift-object-server process + continues to run as the original user (i.e. root if low-port binding + is required), binds to all ports as defined in the ring, and forks off + the specified number of workers per listen socket. The child, per-port + servers drop privileges and behave pretty much how object-server workers + always have, except that because the ring has unique ports per disk, the + object-servers will only be handling requests for a single disk. The + parent process detects dead servers and restarts them (with the correct + listen socket), starts missing servers when an updated ring file is + found with a device on the server with a new port, and kills extraneous + servers when their port is found to no longer be in the ring. The ring + files are stat'ed at most every "ring_check_interval" seconds, as + configured in the object-server config (same default of 15s). + + In testing, this deployment configuration (with a value of 3) lowers + request latency, improves requests per second, and isolates slow disk + IO as compared to the existing "workers" setting. To use this, each + device must be added to the ring using a different port. + + * Do container listing updates in another (green)thread + + The object server has learned the "container_update_timeout" setting + (with a default of 1 second). This value is the number of seconds that + the object server will wait for the container server to update the + listing before returning the status of the object PUT operation. + + Previously, the object server would wait up to 3 seconds for the + container server response. The new behavior dramatically lowers object + PUT latency when container servers in the cluster are busy (e.g. when + the container is very large). Setting the value too low may result in a + client PUT'ing an object and not being able to immediately find it in + listings. Setting it too high will increase latency for clients when + container servers are busy. + + * TempURL fixes (closes CVE-2015-5223) + + Do not allow PUT tempurls to create pointers to other data. + Specifically, disallow the creation of DLO object manifests via a PUT + tempurl. This prevents discoverability attacks which can use any PUT + tempurl to probe for private data by creating a DLO object manifest and + then using the PUT tempurl to head the object. + + * Ring changes + + - Partition placement no longer uses the port number to place + partitions. This improves dispersion in small clusters running one + object server per drive, and it does not affect dispersion in + clusters running one object server per server. + + - Added ring-builder-analyzer tool to more easily test and analyze a + series of ring management operations. + + - Stop moving partitions unnecessarily when overload is on. + + * Significant improvements and bug fixes have been made to erasure code + support. This feature is suitable for beta testing, but it is not yet + ready for broad production usage. + + * Bulk upload now treats user xattrs on files in the given archive as + object metadata on the resulting created objects. + + * Emit warning log in object replicator if "handoffs_first" or + "handoff_delete" is set. + + * Enable object replicator's failure count in swift-recon. + + * Added storage policy support to dispersion tools. + + * Support keystone v3 domains in swift-dispersion. + + * Added domain_remap information to the /info endpoint. + + * Added support for a "default_reseller_prefix" in domain_remap + middleware config. + + * Allow SLO PUTs to forgo per-segment integrity checks. Previously, each + segment referenced in the manifest also needed the correct etag and + bytes setting. These fields now allow the "null" value to skip those + particular checks on the given segment. + + * Allow rsync to use compression via a "rsync_compress" config. If set to + true, compression is only enabled for an rsync to a device in a + different region. In some cases, this can speed up cross-region + replication data transfer. + + * Added time synchronization check in swift-recon (the --time option). + + * The account reaper now runs faster on large accounts. + + * Various other minor bug fixes and improvements. + + +swift (2.3.0, OpenStack Kilo) * Erasure Code support (beta) @@ -58,6 +187,7 @@ swift (2.3.0) * Various other minor bug fixes and improvements. + swift (2.2.2) * Data placement changes @@ -117,6 +247,7 @@ swift (2.2.2) * Various other minor bug fixes and improvements. + swift (2.2.1) * Swift now rejects object names with Unicode surrogates. @@ -164,7 +295,7 @@ swift (2.2.1) * Various other minor bug fixes and improvements. -swift (2.2.0) +swift (2.2.0, OpenStack Juno) * Added support for Keystone v3 auth. @@ -338,7 +469,7 @@ swift (2.0.0) * Various other minor bug fixes and improvements -swift (1.13.1) +swift (1.13.1, OpenStack Icehouse) * Change the behavior of CORS responses to better match the spec @@ -605,7 +736,7 @@ swift (1.11.0) * Various other bug fixes and improvements -swift (1.10.0) +swift (1.10.0, OpenStack Havana) * Added support for pooling memcache connections @@ -776,7 +907,7 @@ swift (1.9.0) * Various other minor bug fixes and improvements -swift (1.8.0) +swift (1.8.0, OpenStack Grizzly) * Make rings' replica count adjustable @@ -947,7 +1078,7 @@ swift (1.7.5) * Various other minor bug fixes and improvements -swift (1.7.4) +swift (1.7.4, OpenStack Folsom) * Fix issue where early client disconnects may have caused a memory leak @@ -962,14 +1093,14 @@ swift (1.7.0) Serialize RingData in a versioned, custom format which is a combination of a JSON-encoded header and .tostring() dumps of the - replica2part2dev_id arrays. This format deserializes hundreds of times + replica2part2dev_id arrays. This format deserializes hundreds of times faster than rings serialized with Python 2.7's pickle (a significant performance regression for ring loading between Python 2.6 and Python - 2.7). Fixes bug 1031954. + 2.7). Fixes bug 1031954. The new implementation is backward-compatible; if a ring does not begin with a new-style magic string, it is assumed to be an - old-style pickle-dumped ring and is handled as before. So new Swift + old-style pickle-dumped ring and is handled as before. So new Swift code can read old rings, but old Swift code will not be able to read newly-serialized rings. @@ -1153,7 +1284,7 @@ swift (1.5.0) * Various other minor bug fixes and improvements -swift (1.4.8) +swift (1.4.8, OpenStack Essex) * Added optional max_containers_per_account restriction @@ -1296,7 +1427,7 @@ swift (1.4.4) * Query only specific zone via swift-recon. -swift (1.4.3) +swift (1.4.3, OpenStack Diablo) * Additional quarantine catching code. @@ -1421,3 +1552,15 @@ swift (1.4.0) * Stats uploaders now allow overrides for source_filename_pattern and new_log_cutoff values. + +--- + +Changelog entries for previous versions are incomplete + +swift (1.3.0, OpenStack Cactus) + +swift (1.2.0, OpenStack Bexar) + +swift (1.1.0, OpenStack Austin) + +swift (1.0.0, Initial Release) diff --git a/bandit.yaml b/bandit.yaml new file mode 100644 index 0000000000..6599ee50b8 --- /dev/null +++ b/bandit.yaml @@ -0,0 +1,149 @@ +# optional: after how many files to update progress +#show_progress_every: 100 + +# optional: plugins directory name +#plugins_dir: 'plugins' + +# optional: plugins discovery name pattern +plugin_name_pattern: '*.py' + +# optional: terminal escape sequences to display colors +#output_colors: +# DEFAULT: '\033[0m' +# HEADER: '\033[95m' +# LOW: '\033[94m' +# MEDIUM: '\033[93m' +# HIGH: '\033[91m' + +# optional: log format string +#log_format: "[%(module)s]\t%(levelname)s\t%(message)s" + +# globs of files which should be analyzed +include: + - '*.py' + +# a list of strings, which if found in the path will cause files to be +# excluded +# for example /tests/ - to remove all all files in tests directory +#exclude_dirs: +# - '/tests/' + +#configured for swift +profiles: + gate: + include: + - blacklist_calls + - blacklist_imports + - exec_used + - linux_commands_wildcard_injection + - request_with_no_cert_validation + - set_bad_file_permissions + - subprocess_popen_with_shell_equals_true + - ssl_with_bad_version + - password_config_option_not_marked_secret + +# - any_other_function_with_shell_equals_true +# - ssl_with_bad_defaults +# - jinja2_autoescape_false +# - use_of_mako_templates +# - subprocess_without_shell_equals_true +# - any_other_function_with_shell_equals_true +# - start_process_with_a_shell +# - start_process_with_no_shell +# - hardcoded_sql_expressions +# - hardcoded_tmp_director +# - linux_commands_wildcard_injection +#For now some items are commented which could be included as per use later. +blacklist_calls: + bad_name_sets: +# - pickle: +# qualnames: [pickle.loads, pickle.load, pickle.Unpickler, +# cPickle.loads, cPickle.load, cPickle.Unpickler] +# level: LOW +# message: "Pickle library appears to be in use, possible security +#issue." + + - marshal: + qualnames: [marshal.load, marshal.loads] + message: "Deserialization with the marshal module is possibly +dangerous." +# - md5: +# qualnames: [hashlib.md5] +# level: LOW +# message: "Use of insecure MD5 hash function." + - mktemp_q: + qualnames: [tempfile.mktemp] + message: "Use of insecure and deprecated function (mktemp)." +# - eval: +# qualnames: [eval] +# level: LOW +# message: "Use of possibly insecure function - consider using safer +#ast.literal_eval." + - mark_safe: + names: [mark_safe] + message: "Use of mark_safe() may expose cross-site scripting +vulnerabilities and should be reviewed." + - httpsconnection: + qualnames: [httplib.HTTPSConnection] + message: "Use of HTTPSConnection does not provide security, see +https://wiki.openstack.org/wiki/OSSN/OSSN-0033" + - yaml_load: + qualnames: [yaml.load] + message: "Use of unsafe yaml load. Allows instantiation of +arbitrary objects. Consider yaml.safe_load()." + - urllib_urlopen: + qualnames: [urllib.urlopen, urllib.urlretrieve, urllib.URLopener, +urllib.FancyURLopener, urllib2.urlopen, urllib2.Request] + message: "Audit url open for permitted schemes. Allowing use of +file:/ or custom schemes is often unexpected." + - paramiko_injection: + qualnames: [paramiko.exec_command, paramiko.invoke_shell] + message: "Paramiko exec_command() and invoke_shell() usage may +expose command injection vulnerabilities and should be reviewed." + +shell_injection: + # Start a process using the subprocess module, or one of its wrappers. + subprocess: [subprocess.Popen, subprocess.call, subprocess.check_call, + subprocess.check_output, utils.execute, +utils.execute_with_timeout] + # Start a process with a function vulnerable to shell injection. + shell: [os.system, os.popen, os.popen2, os.popen3, os.popen4, + popen2.popen2, popen2.popen3, popen2.popen4, popen2.Popen3, + popen2.Popen4, commands.getoutput, commands.getstatusoutput] + # Start a process with a function that is not vulnerable to shell + # injection. + no_shell: [os.execl, os.execle, os.execlp, os.execlpe, os.execv,os.execve, + os.execvp, os.execvpe, os.spawnl, os.spawnle, os.spawnlp, + os.spawnlpe, os.spawnv, os.spawnve, os.spawnvp, os.spawnvpe, + os.startfile] + +blacklist_imports: + bad_import_sets: + - telnet: + imports: [telnetlib] + level: HIGH + message: "Telnet is considered insecure. Use SSH or some other +encrypted protocol." + - info_libs: + imports: [Crypto] + level: LOW + message: "Consider possible security implications associated with +#{module} module." + +hardcoded_password: + word_list: "wordlist/default-passwords" + +ssl_with_bad_version: + bad_protocol_versions: + - 'PROTOCOL_SSLv2' + - 'SSLv2_METHOD' + - 'SSLv23_METHOD' + - 'PROTOCOL_SSLv3' # strict option + - 'PROTOCOL_TLSv1' # strict option + - 'SSLv3_METHOD' # strict option + - 'TLSv1_METHOD' # strict option + +password_config_option_not_marked_secret: + function_names: + - oslo.config.cfg.StrOpt + - oslo_config.cfg.StrOpt diff --git a/bin/swift-account-info b/bin/swift-account-info index f7f10f855e..61c619900c 100755 --- a/bin/swift-account-info +++ b/bin/swift-account-info @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at diff --git a/bin/swift-container-info b/bin/swift-container-info index 4956722813..8074b22ccd 100755 --- a/bin/swift-container-info +++ b/bin/swift-container-info @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at diff --git a/bin/swift-container-sync b/bin/swift-container-sync index 14d6bc9d7c..b885015703 100755 --- a/bin/swift-container-sync +++ b/bin/swift-container-sync @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bin/swift-dispersion-populate b/bin/swift-dispersion-populate index 2484592201..9a5d79b012 100755 --- a/bin/swift-dispersion-populate +++ b/bin/swift-dispersion-populate @@ -16,13 +16,13 @@ import traceback from ConfigParser import ConfigParser -from cStringIO import StringIO from optparse import OptionParser from sys import exit, stdout from time import time from six.moves import range from eventlet import GreenPool, patcher, sleep from eventlet.pools import Pool +from six.moves import cStringIO as StringIO try: from swiftclient import get_auth @@ -76,8 +76,9 @@ def report(success): return next_report = time() + 5 eta, eta_unit = compute_eta(begun, created, need_to_create) - print '\r\x1B[KCreating %s: %d of %d, %d%s left, %d retries' % (item_type, - created, need_to_create, round(eta), eta_unit, retries_done), + print ('\r\x1B[KCreating %s: %d of %d, %d%s left, %d retries' + % (item_type, created, need_to_create, round(eta), eta_unit, + retries_done)), stdout.flush() @@ -132,6 +133,9 @@ Usage: %%prog [options] [conf_file] retries = int(conf.get('retries', 5)) concurrency = int(conf.get('concurrency', 25)) endpoint_type = str(conf.get('endpoint_type', 'publicURL')) + user_domain_name = str(conf.get('user_domain_name', '')) + project_domain_name = str(conf.get('project_domain_name', '')) + project_name = str(conf.get('project_name', '')) insecure = options.insecure \ or config_true_value(conf.get('keystone_api_insecure', 'no')) container_populate = config_true_value( @@ -146,6 +150,12 @@ Usage: %%prog [options] [conf_file] retries_done = 0 os_options = {'endpoint_type': endpoint_type} + if user_domain_name: + os_options['user_domain_name'] = user_domain_name + if project_domain_name: + os_options['project_domain_name'] = project_domain_name + if project_name: + os_options['project_name'] = project_name url, token = get_auth(conf['auth_url'], conf['auth_user'], conf['auth_key'], diff --git a/bin/swift-dispersion-report b/bin/swift-dispersion-report index b08d02717f..5d524892d0 100755 --- a/bin/swift-dispersion-report +++ b/bin/swift-dispersion-report @@ -26,6 +26,7 @@ except ImportError: from eventlet import GreenPool, hubs, patcher, Timeout from eventlet.pools import Pool +from eventlet.green import urllib2 from swift.common import direct_client try: @@ -126,7 +127,7 @@ def container_dispersion_report(coropool, connpool, account, container_ring, if not json_output: print '\r\x1B[KQuerying containers: %d of %d, %d%s left, %d ' \ 'retries' % (containers_queried[0], containers_listed, - round(eta), eta_unit, retries_done[0]), + round(eta), eta_unit, retries_done[0]), stdout.flush() container_parts = {} for container in containers: @@ -145,7 +146,7 @@ def container_dispersion_report(coropool, connpool, account, container_ring, if not json_output: print '\r\x1B[KQueried %d containers for dispersion reporting, ' \ '%d%s, %d retries' % (containers_listed, round(elapsed), - elapsed_unit, retries_done[0]) + elapsed_unit, retries_done[0]) if containers_listed - distinct_partitions: print 'There were %d overlapping partitions' % ( containers_listed - distinct_partitions) @@ -176,9 +177,10 @@ def object_dispersion_report(coropool, connpool, account, object_ring, try: objects = [o['name'] for o in conn.get_container( container, prefix='dispersion_', full_listing=True)[1]] - except ClientException as err: - if err.http_status != 404: + except urllib2.HTTPError as err: + if err.getcode() != 404: raise + print >>stderr, 'No objects to query. Has ' \ 'swift-dispersion-populate been run?' stderr.flush() @@ -255,7 +257,7 @@ def object_dispersion_report(coropool, connpool, account, object_ring, if not json_output: print '\r\x1B[KQueried %d objects for dispersion reporting, ' \ '%d%s, %d retries' % (objects_listed, round(elapsed), - elapsed_unit, retries_done[0]) + elapsed_unit, retries_done[0]) if objects_listed - distinct_partitions: print 'There were %d overlapping partitions' % ( objects_listed - distinct_partitions) @@ -363,6 +365,9 @@ Usage: %%prog [options] [conf_file] and not options.container_only if not (object_report or container_report): exit("Neither container or object report is set to run") + user_domain_name = str(conf.get('user_domain_name', '')) + project_domain_name = str(conf.get('project_domain_name', '')) + project_name = str(conf.get('project_name', '')) insecure = options.insecure \ or config_true_value(conf.get('keystone_api_insecure', 'no')) if options.debug: @@ -371,6 +376,12 @@ Usage: %%prog [options] [conf_file] coropool = GreenPool(size=concurrency) os_options = {'endpoint_type': endpoint_type} + if user_domain_name: + os_options['user_domain_name'] = user_domain_name + if project_domain_name: + os_options['project_domain_name'] = project_domain_name + if project_name: + os_options['project_name'] = project_name url, token = get_auth(conf['auth_url'], conf['auth_user'], conf['auth_key'], diff --git a/bin/swift-recon b/bin/swift-recon index 9a068f915e..72f6eeef41 100755 --- a/bin/swift-recon +++ b/bin/swift-recon @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # Copyright (c) 2014 Christian Schwede # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bin/swift-ring-builder b/bin/swift-ring-builder index ed1b3843a8..4f85179951 100755 --- a/bin/swift-ring-builder +++ b/bin/swift-ring-builder @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # Copyright (c) 2014 Christian Schwede # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bin/swift-ring-builder-analyzer b/bin/swift-ring-builder-analyzer index 18365777f3..6a70105c14 100755 --- a/bin/swift-ring-builder-analyzer +++ b/bin/swift-ring-builder-analyzer @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # Copyright (c) 2015 Samuel Merritt # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/doc/manpages/account-server.conf.5 b/doc/manpages/account-server.conf.5 index b60baeb60d..cec0752999 100644 --- a/doc/manpages/account-server.conf.5 +++ b/doc/manpages/account-server.conf.5 @@ -188,12 +188,6 @@ Number of replication workers to spawn. The default is 8. Time in seconds to wait between replication passes. The default is 30. .IP \fBinterval\fR Replaces run_pause with the more standard "interval", which means the replicator won't pause unless it takes less than the interval set. The default is 30. -.IP \fBerror_suppression_interval\fR -How long without an error before a node's error count is reset. This will also be how long before a node is re-enabled after suppression is triggered. -The default is 60 seconds. -.IP \fBerror_suppression_limit\fR -How many errors can accumulate before a node is temporarily ignored. The default -is 10 seconds. .IP \fBnode_timeout\fR Request timeout to external services. The default is 10 seconds. .IP \fBconn_timeout\fR diff --git a/doc/manpages/dispersion.conf.5 b/doc/manpages/dispersion.conf.5 index 162846a501..04099a9374 100644 --- a/doc/manpages/dispersion.conf.5 +++ b/doc/manpages/dispersion.conf.5 @@ -43,7 +43,13 @@ Authentication system URL .IP "\fBauth_user\fR" Authentication system account/user name .IP "\fBauth_key\fR" -Authentication system account/user password +Authentication system account/user password +.IP "\fBproject_name\fR" +Project name in case of keystone auth version 3 +.IP "\fBproject_domain_name\fR" +Project domain name in case of keystone auth version 3 +.IP "\fBuser_domain_name\fR" +User domain name in case of keystone auth version 3 .IP "\fBswift_dir\fR" Location of openstack-swift configuration and ring files .IP "\fBdispersion_coverage\fR" @@ -70,6 +76,9 @@ Whether to run the object report. The default is yes. .IP "auth_key = dpstats" .IP "swift_dir = /etc/swift" .IP "# keystone_api_insecure = no" +.IP "# project_name = dpstats" +.IP "# project_domain_name = default" +.IP "# user_domain_name = default" .IP "# dispersion_coverage = 1.0" .IP "# retries = 5" .IP "# concurrency = 25" diff --git a/doc/manpages/object-server.conf.5 b/doc/manpages/object-server.conf.5 index fb2297421a..e82a56ffb5 100644 --- a/doc/manpages/object-server.conf.5 +++ b/doc/manpages/object-server.conf.5 @@ -129,6 +129,8 @@ Logging address. The default is /dev/log. Request timeout to external services. The default is 3 seconds. .IP \fBconn_timeout\fR Connection timeout to external services. The default is 0.5 seconds. +.IP \fBcontainer_update_timeout\fR +Time to wait while sending a container update on object update. The default is 1 second. .RE .PD diff --git a/doc/manpages/swift-dispersion-populate.1 b/doc/manpages/swift-dispersion-populate.1 index dd45e4e8eb..5ce1404569 100644 --- a/doc/manpages/swift-dispersion-populate.1 +++ b/doc/manpages/swift-dispersion-populate.1 @@ -85,6 +85,9 @@ Example \fI/etc/swift/dispersion.conf\fR: .IP "auth_user = dpstats:dpstats" .IP "auth_key = dpstats" .IP "swift_dir = /etc/swift" +.IP "# project_name = dpstats" +.IP "# project_domain_name = default" +.IP "# user_domain_name = default" .IP "# dispersion_coverage = 1.0" .IP "# retries = 5" .IP "# concurrency = 25" diff --git a/doc/manpages/swift-dispersion-report.1 b/doc/manpages/swift-dispersion-report.1 index 357c177dc1..c66eba1af6 100644 --- a/doc/manpages/swift-dispersion-report.1 +++ b/doc/manpages/swift-dispersion-report.1 @@ -101,6 +101,9 @@ Example \fI/etc/swift/dispersion.conf\fR: .IP "auth_user = dpstats:dpstats" .IP "auth_key = dpstats" .IP "swift_dir = /etc/swift" +.IP "# project_name = dpstats" +.IP "# project_domain_name = default" +.IP "# user_domain_name = default" .IP "# dispersion_coverage = 1.0" .IP "# retries = 5" .IP "# concurrency = 25" diff --git a/doc/manpages/swift-recon.1 b/doc/manpages/swift-recon.1 index c635861aca..3120405005 100644 --- a/doc/manpages/swift-recon.1 +++ b/doc/manpages/swift-recon.1 @@ -25,7 +25,7 @@ .SH SYNOPSIS .LP .B swift-recon -\ [-v] [--suppress] [-a] [-r] [-u] [-d] [-l] [--md5] [--auditor] [--updater] [--expirer] [--sockstat] +\ [-v] [--suppress] [-a] [-r] [-u] [-d] [-l] [-T] [--md5] [--auditor] [--updater] [--expirer] [--sockstat] .SH DESCRIPTION .PP @@ -80,8 +80,10 @@ Get md5sum of servers ring and compare to local copy Get cluster socket usage stats .IP "\fB--driveaudit\fR" Get drive audit error stats +.IP "\fB-T, --time\fR" +Check time synchronization .IP "\fB--all\fR" -Perform all checks. Equivalent to \-arudlq \-\-md5 +Perform all checks. Equivalent to \-arudlqT \-\-md5 .IP "\fB--region=REGION\fR" Only query servers in specified region .IP "\fB-z ZONE, --zone=ZONE\fR" diff --git a/doc/saio/swift/container-server/1.conf b/doc/saio/swift/container-server/1.conf index 3062ca3a5a..176096dbe1 100644 --- a/doc/saio/swift/container-server/1.conf +++ b/doc/saio/swift/container-server/1.conf @@ -9,7 +9,6 @@ user = log_facility = LOG_LOCAL2 recon_cache_path = /var/cache/swift eventlet_debug = true -allow_versions = true [pipeline:main] pipeline = recon container-server diff --git a/doc/saio/swift/container-server/2.conf b/doc/saio/swift/container-server/2.conf index 6365215931..7100710b3c 100644 --- a/doc/saio/swift/container-server/2.conf +++ b/doc/saio/swift/container-server/2.conf @@ -9,7 +9,6 @@ user = log_facility = LOG_LOCAL3 recon_cache_path = /var/cache/swift2 eventlet_debug = true -allow_versions = true [pipeline:main] pipeline = recon container-server diff --git a/doc/saio/swift/container-server/3.conf b/doc/saio/swift/container-server/3.conf index b925427ff0..06ec47414d 100644 --- a/doc/saio/swift/container-server/3.conf +++ b/doc/saio/swift/container-server/3.conf @@ -9,7 +9,6 @@ user = log_facility = LOG_LOCAL4 recon_cache_path = /var/cache/swift3 eventlet_debug = true -allow_versions = true [pipeline:main] pipeline = recon container-server diff --git a/doc/saio/swift/container-server/4.conf b/doc/saio/swift/container-server/4.conf index 16799a524a..1acc3b5c54 100644 --- a/doc/saio/swift/container-server/4.conf +++ b/doc/saio/swift/container-server/4.conf @@ -9,7 +9,6 @@ user = log_facility = LOG_LOCAL5 recon_cache_path = /var/cache/swift4 eventlet_debug = true -allow_versions = true [pipeline:main] pipeline = recon container-server diff --git a/doc/saio/swift/container-sync-realms.conf b/doc/saio/swift/container-sync-realms.conf new file mode 100644 index 0000000000..503a71c4f1 --- /dev/null +++ b/doc/saio/swift/container-sync-realms.conf @@ -0,0 +1,5 @@ +[saio] +key = changeme +key2 = changeme +cluster_saio_endpoint = http://127.0.0.1:8080/v1/ + diff --git a/doc/saio/swift/object-expirer.conf b/doc/saio/swift/object-expirer.conf index 5610f69afd..6e77e9cdf7 100644 --- a/doc/saio/swift/object-expirer.conf +++ b/doc/saio/swift/object-expirer.conf @@ -37,7 +37,7 @@ interval = 300 # config value # processes = 0 # process is which of the parts a particular process will work on -# process can also be specified on the command line and will overide the config +# process can also be specified on the command line and will override the config # value # process is "zero based", if you want to use 3 processes, you should run # processes with process set to 0, 1, and 2 diff --git a/doc/saio/swift/proxy-server.conf b/doc/saio/swift/proxy-server.conf index dd037edb8f..d9e5c95148 100644 --- a/doc/saio/swift/proxy-server.conf +++ b/doc/saio/swift/proxy-server.conf @@ -9,7 +9,7 @@ eventlet_debug = true [pipeline:main] # Yes, proxy-logging appears twice. This is so that # middleware-originated requests get logged too. -pipeline = catch_errors gatekeeper healthcheck proxy-logging cache bulk tempurl ratelimit crossdomain tempauth staticweb container-quotas account-quotas slo dlo proxy-logging proxy-server +pipeline = catch_errors gatekeeper healthcheck proxy-logging cache bulk tempurl ratelimit crossdomain container_sync tempauth staticweb container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server [filter:catch_errors] use = egg:swift#catch_errors @@ -35,6 +35,10 @@ use = egg:swift#dlo [filter:slo] use = egg:swift#slo +[filter:container_sync] +use = egg:swift#container_sync +current = //saio/saio_endpoint + [filter:tempurl] use = egg:swift#tempurl @@ -60,6 +64,10 @@ use = egg:swift#memcache [filter:gatekeeper] use = egg:swift#gatekeeper +[filter:versioned_writes] +use = egg:swift#versioned_writes +allow_versioned_writes = true + [app:proxy-server] use = egg:swift#proxy allow_account_management = true diff --git a/doc/source/admin_guide.rst b/doc/source/admin_guide.rst index f27c20741e..7d396664df 100644 --- a/doc/source/admin_guide.rst +++ b/doc/source/admin_guide.rst @@ -154,6 +154,10 @@ until it has been resolved. If the drive is going to be replaced immediately, then it is just best to replace the drive, format it, remount it, and let replication fill it up. +After the drive is unmounted, make sure the mount point is owned by root +(root:root 755). This ensures that rsync will not try to replicate into the +root drive once the failed drive is unmounted. + If the drive can't be replaced immediately, then it is best to leave it unmounted, and set the device weight to 0. This will allow all the replicas that were on that drive to be replicated elsewhere until the drive @@ -270,7 +274,8 @@ configuration file, /etc/swift/dispersion.conf. Example conf file:: There are also options for the conf file for specifying the dispersion coverage (defaults to 1%), retries, concurrency, etc. though usually the defaults are -fine. +fine. If you want to use keystone v3 for authentication there are options like +auth_version, user_domain_name, project_domain_name and project_name. Once the configuration is in place, run `swift-dispersion-populate` to populate the containers and objects throughout the cluster. @@ -544,18 +549,22 @@ Request URI Description /recon/sockstat returns consumable info from /proc/net/sockstat|6 /recon/devices returns list of devices and devices dir i.e. /srv/node /recon/async returns count of async pending -/recon/replication returns object replication times (for backward compatibility) +/recon/replication returns object replication info (for backward compatibility) /recon/replication/ returns replication info for given type (account, container, object) /recon/auditor/ returns auditor stats on last reported scan for given type (account, container, object) /recon/updater/ returns last updater sweep times for given type (container, object) ========================= ======================================================================================== +Note that 'object_replication_last' and 'object_replication_time' in object +replication info are considered to be transitional and will be removed in +the subsequent releases. Use 'replication_last' and 'replication_time' instead. + This information can also be queried via the swift-recon command line utility:: fhines@ubuntu:~$ swift-recon -h Usage: usage: swift-recon [-v] [--suppress] [-a] [-r] [-u] [-d] - [-l] [--md5] [--auditor] [--updater] [--expirer] [--sockstat] + [-l] [-T] [--md5] [--auditor] [--updater] [--expirer] [--sockstat] account|container|object Defaults to object server. @@ -578,7 +587,8 @@ This information can also be queried via the swift-recon command line utility:: -q, --quarantined Get cluster quarantine stats --md5 Get md5sum of servers ring and compare to local copy --sockstat Get cluster socket usage stats - --all Perform all checks. Equal to -arudlq --md5 --sockstat + -T, --time Check time synchronization + --all Perform all checks. Equal to -arudlqT --md5 --sockstat -z ZONE, --zone=ZONE Only query servers in specified zone -t SECONDS, --timeout=SECONDS Time to wait for a response from a server diff --git a/doc/source/api/object_api_v1_overview.rst b/doc/source/api/object_api_v1_overview.rst index 3c1748d1fc..0f6d7d9d22 100644 --- a/doc/source/api/object_api_v1_overview.rst +++ b/doc/source/api/object_api_v1_overview.rst @@ -7,7 +7,7 @@ metadata by using the Object Storage API, which is implemented as a set of Representational State Transfer (REST) web services. For an introduction to OpenStack Object Storage, see `Object -Storage `__ +Storage ` in the *OpenStack Cloud Administrator Guide*. You use the HTTPS (SSL) protocol to interact with Object Storage, and diff --git a/doc/source/cors.rst b/doc/source/cors.rst index a221865b47..1de1d5f6a0 100644 --- a/doc/source/cors.rst +++ b/doc/source/cors.rst @@ -2,7 +2,7 @@ CORS ==== -CORS_ is a mechanisim to allow code running in a browser (Javascript for +CORS_ is a mechanism to allow code running in a browser (Javascript for example) make requests to a domain other then the one from where it originated. Swift supports CORS requests to containers and objects. diff --git a/doc/source/deployment_guide.rst b/doc/source/deployment_guide.rst index b26f3ceff1..da9d0b4fa2 100644 --- a/doc/source/deployment_guide.rst +++ b/doc/source/deployment_guide.rst @@ -340,7 +340,7 @@ paste.deploy works (at least at the time of this writing.) `name3` got the local value from the `app:myapp` subsection because it is using the special paste.deploy syntax of ``set option_name = value``. So, if you want -a default value for most app/filters but want to overridde it in one +a default value for most app/filters but want to override it in one subsection, this is how you do it. `name4` got the global value from `DEFAULT` since it's only in that section @@ -390,6 +390,13 @@ max_header_size 8192 max_header_size is the max number of bytes in See also include_service_catalog in proxy-server.conf-sample (documented in overview_auth.rst). +extra_header_count 0 By default the maximum number of allowed + headers depends on the number of max + allowed metadata settings plus a default + value of 32 for regular http headers. + If for some reason this is not enough (custom + middleware for example) it can be increased + with the extra_header_count constraint. =================== ========== ============================================= --------------------------- @@ -405,76 +412,86 @@ The following configuration options are available: [DEFAULT] -=================== ========== ============================================= -Option Default Description -------------------- ---------- --------------------------------------------- -swift_dir /etc/swift Swift configuration directory -devices /srv/node Parent directory of where devices are mounted -mount_check true Whether or not check if the devices are - mounted to prevent accidentally writing - to the root device -bind_ip 0.0.0.0 IP Address for server to bind to -bind_port 6000 Port for server to bind to -bind_timeout 30 Seconds to attempt bind before giving up -workers auto Override the number of pre-forked workers - that will accept connections. If set it - should be an integer, zero means no fork. If - unset, it will try to default to the number - of effective cpu cores and fallback to one. - Increasing the number of workers helps slow - filesystem operations in one request from - negatively impacting other requests, but only - the :ref:`servers_per_port - ` - option provides complete I/O isolation with - no measurable overhead. -servers_per_port 0 If each disk in each storage policy ring has - unique port numbers for its "ip" value, you - can use this setting to have each - object-server worker only service requests - for the single disk matching the port in the - ring. The value of this setting determines - how many worker processes run for each port - (disk) in the ring. If you have 24 disks - per server, and this setting is 4, then - each storage node will have 1 + (24 * 4) = - 97 total object-server processes running. - This gives complete I/O isolation, drastically - reducing the impact of slow disks on storage - node performance. The object-replicator and - object-reconstructor need to see this setting - too, so it must be in the [DEFAULT] section. - See :ref:`server-per-port-configuration`. -max_clients 1024 Maximum number of clients one worker can - process simultaneously (it will actually - accept(2) N + 1). Setting this to one (1) - will only handle one request at a time, - without accepting another request - concurrently. -disable_fallocate false Disable "fast fail" fallocate checks if the - underlying filesystem does not support it. -log_max_line_length 0 Caps the length of log lines to the - value given; no limit if set to 0, the - default. -log_custom_handlers None Comma-separated list of functions to call - to setup custom log handlers. -eventlet_debug false If true, turn on debug logging for eventlet -fallocate_reserve 0 You can set fallocate_reserve to the number of - bytes you'd like fallocate to reserve, whether - there is space for the given file size or not. - This is useful for systems that behave badly - when they completely run out of space; you can - make the services pretend they're out of space - early. -conn_timeout 0.5 Time to wait while attempting to connect to - another backend node. -node_timeout 3 Time to wait while sending each chunk of data - to another backend node. -client_timeout 60 Time to wait while receiving each chunk of - data from a client or another backend node. -network_chunk_size 65536 Size of chunks to read/write over the network -disk_chunk_size 65536 Size of chunks to read/write to disk -=================== ========== ============================================= +======================== ========== ========================================== +Option Default Description +------------------------ ---------- ------------------------------------------ +swift_dir /etc/swift Swift configuration directory +devices /srv/node Parent directory of where devices are + mounted +mount_check true Whether or not check if the devices are + mounted to prevent accidentally writing + to the root device +bind_ip 0.0.0.0 IP Address for server to bind to +bind_port 6000 Port for server to bind to +bind_timeout 30 Seconds to attempt bind before giving up +workers auto Override the number of pre-forked workers + that will accept connections. If set it + should be an integer, zero means no fork. + If unset, it will try to default to the + number of effective cpu cores and fallback + to one. Increasing the number of workers + helps slow filesystem operations in one + request from negatively impacting other + requests, but only the + :ref:`servers_per_port + ` option + provides complete I/O isolation with no + measurable overhead. +servers_per_port 0 If each disk in each storage policy ring + has unique port numbers for its "ip" + value, you can use this setting to have + each object-server worker only service + requests for the single disk matching the + port in the ring. The value of this + setting determines how many worker + processes run for each port (disk) in the + ring. If you have 24 disks per server, and + this setting is 4, then each storage node + will have 1 + (24 * 4) = 97 total + object-server processes running. This + gives complete I/O isolation, drastically + reducing the impact of slow disks on + storage node performance. The + object-replicator and object-reconstructor + need to see this setting too, so it must + be in the [DEFAULT] section. + See :ref:`server-per-port-configuration`. +max_clients 1024 Maximum number of clients one worker can + process simultaneously (it will actually + accept(2) N + 1). Setting this to one (1) + will only handle one request at a time, + without accepting another request + concurrently. +disable_fallocate false Disable "fast fail" fallocate checks if + the underlying filesystem does not support + it. +log_max_line_length 0 Caps the length of log lines to the + value given; no limit if set to 0, the + default. +log_custom_handlers None Comma-separated list of functions to call + to setup custom log handlers. +eventlet_debug false If true, turn on debug logging for + eventlet +fallocate_reserve 0 You can set fallocate_reserve to the + number of bytes you'd like fallocate to + reserve, whether there is space for the + given file size or not. This is useful for + systems that behave badly when they + completely run out of space; you can + make the services pretend they're out of + space early. +conn_timeout 0.5 Time to wait while attempting to connect + to another backend node. +node_timeout 3 Time to wait while sending each chunk of + data to another backend node. +client_timeout 60 Time to wait while receiving each chunk of + data from a client or another backend node +network_chunk_size 65536 Size of chunks to read/write over the + network +disk_chunk_size 65536 Size of chunks to read/write to disk +container_update_timeout 1 Time to wait while sending a container + update on object update. +======================== ========== ========================================== .. _object-server-options: @@ -1229,6 +1246,10 @@ For a standard swift install, all data drives are mounted directly under be sure to set the `devices` config option in all of the server configs to point to the correct directory. +The mount points for each drive in /srv/node/ should be owned by the root user +almost exclusively (root:root 755). This is required to prevent rsync from +syncing files into the root drive in the event a drive is unmounted. + Swift uses system calls to reserve space for new objects being written into the system. If your filesystem does not support `fallocate()` or `posix_fallocate()`, be sure to set the `disable_fallocate = true` config diff --git a/doc/source/development_guidelines.rst b/doc/source/development_guidelines.rst index 29d9f35a41..a3528e7714 100644 --- a/doc/source/development_guidelines.rst +++ b/doc/source/development_guidelines.rst @@ -42,7 +42,7 @@ To execute the unit tests: Remarks: If you installed using: `cd ~/swift; sudo python setup.py develop`, - you may need to do: `cd ~/swift; sudo chown -R swift:swift swift.egg-info` + you may need to do: `cd ~/swift; sudo chown -R ${USER}:${USER} swift.egg-info` prior to running tox. * Optionally, run only specific tox builds: @@ -71,6 +71,18 @@ The endpoint and authorization credentials to be used by functional tests should be configured in the ``test.conf`` file as described in the section :ref:`setup_scripts`. +The environment variable ``SWIFT_TEST_POLICY`` may be set to specify a +particular storage policy *name* that will be used for testing. When set, tests +that would otherwise not specify a policy or choose a random policy from +those available will instead use the policy specified. Tests that use more than +one policy will include the specified policy in the set of policies used. The +specified policy must be available on the cluster under test. + +For example, this command would run the functional tests using policy +'silver':: + + SWIFT_TEST_POLICY=silver tox -e func + If the ``test.conf`` file is not found then the functional test framework will instantiate a set of Swift servers in the same process that executes the functional tests. This 'in-process test' mode may also be enabled (or disabled) @@ -95,13 +107,14 @@ found in ````, the search will then look in the the corresponding sample config file from ``etc/`` is used (e.g. ``proxy-server.conf-sample`` or ``swift.conf-sample``). -The environment variable ``SWIFT_TEST_POLICY`` may be set to specify -a particular storage policy *name* that will be used for testing. When set, -this policy must exist in the ``swift.conf`` file and its corresponding ring -file must exist in ```` (if specified) or ``etc/``. The -test setup will set the specified policy to be the default and use its ring -file properties for constructing the test object ring. This allows in-process -testing to be run against various policy types and ring files. +When using the 'in-process test' mode ``SWIFT_TEST_POLICY`` may be set to +specify a particular storage policy *name* that will be used for testing as +described above. When set, this policy must exist in the ``swift.conf`` file +and its corresponding ring file must exist in ```` (if +specified) or ``etc/``. The test setup will set the specified policy to be the +default and use its ring file properties for constructing the test object ring. +This allows in-process testing to be run against various policy types and ring +files. For example, this command would run the in-process mode functional tests using config files found in ``$HOME/my_tests`` and policy 'silver':: diff --git a/doc/source/development_ondisk_backends.rst b/doc/source/development_ondisk_backends.rst index 9479cc8eb7..3380733780 100644 --- a/doc/source/development_ondisk_backends.rst +++ b/doc/source/development_ondisk_backends.rst @@ -4,7 +4,7 @@ Pluggable On-Disk Back-end APIs The internal REST API used between the proxy server and the account, container and object server is almost identical to public Swift REST API, but with a few -internal extentsions (for example, update an account with a new container). +internal extensions (for example, update an account with a new container). The pluggable back-end APIs for the three REST API servers (account, container, object) abstracts the needs for servicing the various REST APIs diff --git a/doc/source/development_saio.rst b/doc/source/development_saio.rst index 1e6bed03fb..0f391266ce 100644 --- a/doc/source/development_saio.rst +++ b/doc/source/development_saio.rst @@ -95,6 +95,16 @@ another device when creating the VM, and follow these instructions: # **Make sure to include the trailing slash after /srv/$x/** for x in {1..4}; do sudo chown -R ${USER}:${USER} /srv/$x/; done + Note: We create the mount points and mount the storage disk under + /mnt/sdb1. This disk will contain one directory per simulated swift node, + each owned by the current swift user. + + We then create symlinks to these directories under /srv. + If the disk sdb is unmounted, files will not be written under + /srv/\*, because the symbolic link destination /mnt/sdb1/* will not + exist. This prevents disk sync operations from writing to the root + partition in the event a drive is unmounted. + #. Next, skip to :ref:`common-dev-section`. @@ -135,6 +145,15 @@ these instructions: # **Make sure to include the trailing slash after /srv/$x/** for x in {1..4}; do sudo chown -R ${USER}:${USER} /srv/$x/; done + Note: We create the mount points and mount the loopback file under + /mnt/sdb1. This file will contain one directory per simulated swift node, + each owned by the current swift user. + + We then create symlinks to these directories under /srv. + If the loopback file is unmounted, files will not be written under + /srv/\*, because the symbolic link destination /mnt/sdb1/* will not + exist. This prevents disk sync operations from writing to the root + partition in the event a drive is unmounted. .. _common-dev-section: @@ -184,7 +203,7 @@ Getting the code #. Install swift's test dependencies:: - sudo pip install -r swift/test-requirements.txt + cd $HOME/swift; sudo pip install -r test-requirements.txt ---------------- Setting up rsync @@ -352,6 +371,10 @@ commands are as follows: .. literalinclude:: /../saio/swift/container-reconciler.conf + #. ``/etc/swift/container-sync-realms.conf`` + + .. literalinclude:: /../saio/swift/container-sync-realms.conf + #. ``/etc/swift/account-server/1.conf`` .. literalinclude:: /../saio/swift/account-server/1.conf diff --git a/doc/source/first_contribution_swift.rst b/doc/source/first_contribution_swift.rst new file mode 100644 index 0000000000..a1e3930cb6 --- /dev/null +++ b/doc/source/first_contribution_swift.rst @@ -0,0 +1,204 @@ +=========================== +First Contribution to Swift +=========================== + +------------- +Getting Swift +------------- + +Swift's source code is hosted on github and managed with git. The current +trunk can be checked out like this: + + ``git clone https://github.com/openstack/swift.git`` + +This will clone the Swift repository under your account. + +A source tarball for the latest release of Swift is available on the +`launchpad project page `_. + +Prebuilt packages for Ubuntu and RHEL variants are available. + +* `Swift Ubuntu Packages `_ +* `Swift RDO Packages `_ + +-------------------- +Source Control Setup +-------------------- + +Swift uses `git` for source control. The OpenStack +`Developer's Guide `_ +describes the steps for setting up Git and all the necessary accounts for +contributing code to Swift. + +---------------- +Changes to Swift +---------------- + +Once you have the source code and source control set up, you can make your +changes to Swift. + +------- +Testing +------- + +The `Development Guidelines `_ describes the testing +requirements before submitting Swift code. + +In summary, you can execute tox from the swift home directory (where you +checked out the source code): + + ``tox`` + +Tox will present tests results. Notice that in the beginning, it is very common +to break many coding style guidelines. + +-------------------------- +Proposing changes to Swift +-------------------------- + +The OpenStack +`Developer's Guide `_ +describes the most common `git` commands that you will need. + +Following is a list of the commands that you need to know for your first +contribution to Swift: + +To clone a copy of Swift: + + ``git clone https://github.com/openstack/swift.git`` + +Under the swift directory, set up the Gerrit repository. The following command +configures the repository to know about Gerrit and makes the Change-Id commit +hook get installed. You only need to do this once: + + ``git review -s`` + +To create your development branch (substitute branch_name for a name of your +choice: + + ``git checkout -b `` + +To check the files that have been updated in your branch: + + ``git status`` + +To check the differences between your branch and the repository: + + ``git diff`` + +Assuming you have not added new files, you commit all your changes using: + + ``git commit -a`` + +Read the `Summary of Git commit message structure `_ +for best practices on writing the commit message. When you are ready to send +your changes for review use: + + ``git review`` + +If successful, Git response message will contain a URL you can use to track your +changes. + +If you need to make further changes to the same review, you can commit them +using: + + ``git commit -a --amend`` + +This will commit the changes under the same set of changes you issued earlier. +Notice that in order to send your latest version for review, you will still +need to call: + + ``git review`` + +--------------------- +Tracking your changes +--------------------- + +After you proposed your changes to Swift, you can track the review in: + +* ``_ + +.. _post-rebase-instructions: + +------------------------ +Post rebase instructions +------------------------ + +After rebasing, the following steps should be performed to rebuild the swift +installation. Note that these commands should be performed from the root of the +swift repo directory (e.g. $HOME/swift/): + + ``sudo python setup.py develop`` + + ``sudo pip install -r test-requirements.txt`` + +If using TOX, depending on the changes made during the rebase, you may need to +rebuild the TOX environment (generally this will be the case if +test-requirements.txt was updated such that a new version of a package is +required), this can be accomplished using the '-r' argument to the TOX cli: + + ``tox -r`` + +You can include any of the other TOX arguments as well, for example, to run the +pep8 suite and rebuild the TOX environment the following can be used: + + ``tox -r -e pep8`` + +The rebuild option only needs to be specified once for a particular build (e.g. +pep8), that is further invocations of the same build will not require this +until the next rebase. + +--------------- +Troubleshooting +--------------- + +You may run into the following errors when starting Swift if you rebase +your commit using: + + ``git rebase`` + +.. code-block:: python + + Traceback (most recent call last): + File "/usr/local/bin/swift-init", line 5, in + from pkg_resources import require + File "/usr/lib/python2.7/dist-packages/pkg_resources.py", line 2749, in + working_set = WorkingSet._build_master() + File "/usr/lib/python2.7/dist-packages/pkg_resources.py", line 446, in _build_master + return cls._build_from_requirements(__requires__) + File "/usr/lib/python2.7/dist-packages/pkg_resources.py", line 459, in _build_from_requirements + dists = ws.resolve(reqs, Environment()) + File "/usr/lib/python2.7/dist-packages/pkg_resources.py", line 628, in resolve + raise DistributionNotFound(req) + pkg_resources.DistributionNotFound: swift==2.3.1.devXXX + (where XXX represents a dev version of Swift). + +.. code-block:: python + + Traceback (most recent call last): + File "/usr/local/bin/swift-proxy-server", line 10, in + execfile(__file__) + File "/home/swift/swift/bin/swift-proxy-server", line 23, in + sys.exit(run_wsgi(conf_file, 'proxy-server', **options)) + File "/home/swift/swift/swift/common/wsgi.py", line 888, in run_wsgi + loadapp(conf_path, global_conf=global_conf) + File "/home/swift/swift/swift/common/wsgi.py", line 390, in loadapp + func(PipelineWrapper(ctx)) + File "/home/swift/swift/swift/proxy/server.py", line 602, in modify_wsgi_pipeline + ctx = pipe.create_filter(filter_name) + File "/home/swift/swift/swift/common/wsgi.py", line 329, in create_filter + global_conf=self.context.global_conf) + File "/usr/lib/python2.7/dist-packages/paste/deploy/loadwsgi.py", line 296, in loadcontext + global_conf=global_conf) + File "/usr/lib/python2.7/dist-packages/paste/deploy/loadwsgi.py", line 328, in _loadegg + return loader.get_context(object_type, name, global_conf) + File "/usr/lib/python2.7/dist-packages/paste/deploy/loadwsgi.py", line 620, in get_context + object_type, name=name) + File "/usr/lib/python2.7/dist-packages/paste/deploy/loadwsgi.py", line 659, in find_egg_entry_point + for prot in protocol_options] or '(no entry points)')))) + LookupError: Entry point 'versioned_writes' not found in egg 'swift' (dir: /home/swift/swift; protocols: paste.filter_factory, paste.filter_app_factory; entry_points: ) + +This happens because `git rebase` will retrieve code for a different version of +Swift in the development stream, but the start scripts under `/usr/local/bin` have +not been updated. The solution is to follow the steps described in the +:ref:`post-rebase-instructions` section. diff --git a/doc/source/getting_started.rst b/doc/source/getting_started.rst index 0e3b408ad4..ba8790821b 100644 --- a/doc/source/getting_started.rst +++ b/doc/source/getting_started.rst @@ -18,23 +18,6 @@ Swift is written in Python and has these dependencies: There is no current support for Python 3. -------------- -Getting Swift -------------- - -Swift's source code is hosted on github and managed with git. The current -trunk can be checked out like this: - - ``git clone https://github.com/openstack/swift.git`` - -A source tarball for the latest release of Swift is available on the -`launchpad project page `_. - -Prebuilt packages for Ubuntu and RHEL variants are available. - -* `Swift Ubuntu Packages `_ -* `Swift RDO Packages `_ - ----------- Development ----------- @@ -42,10 +25,10 @@ Development To get started with development with Swift, or to just play around, the following docs will be useful: -* :doc:`Swift All in One ` - Set up a VM with Swift - installed +* :doc:`Swift All in One ` - Set up a VM with Swift installed * :doc:`Development Guidelines ` -* `Associated Projects ` +* :doc:`First Contribution to Swift ` +* :doc:`Associated Projects ` -------------------------- CLI client and SDK library diff --git a/doc/source/howto_installmultinode.rst b/doc/source/howto_installmultinode.rst index 8ab73232d3..1d6b0589c9 100644 --- a/doc/source/howto_installmultinode.rst +++ b/doc/source/howto_installmultinode.rst @@ -6,6 +6,13 @@ Please refer to the latest official `Openstack Installation Guides `_ for the most up-to-date documentation. +Object Storage installation guide for Openstack Kilo +---------------------------------------------------- + + * `openSUSE 13.2 and SUSE Linux Enterprise Server 12 `_ + * `RHEL 7, CentOS 7, and Fedora 21 `_ + * `Ubuntu 14.04 `_ + Object Storage installation guide for Openstack Juno ---------------------------------------------------- diff --git a/doc/source/index.rst b/doc/source/index.rst index 45ee1fd0ef..30bfe31808 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -68,6 +68,7 @@ Developer Documentation development_guidelines development_saio + first_contribution_swift policies_saio development_auth development_middleware diff --git a/doc/source/logs.rst b/doc/source/logs.rst index b71939ae3d..75b669f1a5 100644 --- a/doc/source/logs.rst +++ b/doc/source/logs.rst @@ -59,7 +59,7 @@ client_etag The etag header value given by the client. transaction_id The transaction id of the request. headers The headers given in the request. request_time The duration of the request. -source The "source" of the reuqest. This may be set for requests +source The "source" of the request. This may be set for requests that are generated in order to fulfill client requests, e.g. bulk uploads. log_info Various info that may be useful for diagnostics, e.g. the @@ -102,6 +102,7 @@ DLO :ref:`dynamic-large-objects` LE :ref:`list_endpoints` KS :ref:`keystoneauth` RL :ref:`ratelimit` +VW :ref:`versioned_writes` ======================= ============================= diff --git a/doc/source/middleware.rst b/doc/source/middleware.rst index f78dbb1947..4e304ed6fb 100644 --- a/doc/source/middleware.rst +++ b/doc/source/middleware.rst @@ -155,6 +155,15 @@ Name Check (Forbidden Character Filter) :members: :show-inheritance: +.. _versioned_writes: + +Object Versioning +================= + +.. automodule:: swift.common.middleware.versioned_writes + :members: + :show-inheritance: + Proxy Logging ============= diff --git a/doc/source/overview_architecture.rst b/doc/source/overview_architecture.rst index 1f3452a55c..77a036ac3d 100644 --- a/doc/source/overview_architecture.rst +++ b/doc/source/overview_architecture.rst @@ -13,7 +13,7 @@ architecture. For each request, it will look up the location of the account, container, or object in the ring (see below) and route the request accordingly. For Erasure Code type policies, the Proxy Server is also responsible for encoding and decoding object data. See :doc:`overview_erasure_code` for -complete information on Erasure Code suport. The public API is also exposed +complete information on Erasure Code support. The public API is also exposed through the Proxy Server. A large number of failures are also handled in the Proxy Server. For diff --git a/doc/source/overview_erasure_code.rst b/doc/source/overview_erasure_code.rst index 9927e2ace2..d1b1a5d6a9 100755 --- a/doc/source/overview_erasure_code.rst +++ b/doc/source/overview_erasure_code.rst @@ -425,7 +425,7 @@ The basic flow looks like this: * The proxy waits for a minimal number of two object servers to respond with a success (2xx) status before responding to the client with a successful status. In this particular case it was decided that two responses was - the mininum amount to know that the file would be propagated in case of + the minimum amount to know that the file would be propagated in case of failure from other others and because a greater number would potentially mean more latency, which should be avoided if possible. diff --git a/doc/source/overview_object_versioning.rst b/doc/source/overview_object_versioning.rst index cac5a898d9..78d0b07ad1 100644 --- a/doc/source/overview_object_versioning.rst +++ b/doc/source/overview_object_versioning.rst @@ -1,89 +1,6 @@ -================= Object Versioning ================= --------- -Overview --------- - -Object versioning in swift is implemented by setting a flag on the container -to tell swift to version all objects in the container. The flag is the -``X-Versions-Location`` header on the container, and its value is the -container where the versions are stored. It is recommended to use a different -``X-Versions-Location`` container for each container that is being versioned. - -When data is ``PUT`` into a versioned container (a container with the -versioning flag turned on), the existing data in the file is redirected to a -new object and the data in the ``PUT`` request is saved as the data for the -versioned object. The new object name (for the previous version) is -``//``, where ``length`` -is the 3-character zero-padded hexadecimal length of the ```` and -```` is the timestamp of when the previous version was created. - -A ``GET`` to a versioned object will return the current version of the object -without having to do any request redirects or metadata lookups. - -A ``POST`` to a versioned object will update the object metadata as normal, -but will not create a new version of the object. In other words, new versions -are only created when the content of the object changes. - -A ``DELETE`` to a versioned object will only remove the current version of the -object. If you have 5 total versions of the object, you must delete the -object 5 times to completely remove the object. - -Note: A large object manifest file cannot be versioned, but a large object -manifest may point to versioned segments. - --------------------------------------------------- -How to Enable Object Versioning in a Swift Cluster --------------------------------------------------- - -Set ``allow_versions`` to ``True`` in the container server config. - ------------------------ -Examples Using ``curl`` ------------------------ - -First, create a container with the ``X-Versions-Location`` header or add the -header to an existing container. Also make sure the container referenced by -the ``X-Versions-Location`` exists. In this example, the name of that -container is "versions":: - - curl -i -XPUT -H "X-Auth-Token: " \ - -H "X-Versions-Location: versions" http:///container - curl -i -XPUT -H "X-Auth-Token: " http:///versions - -Create an object (the first version):: - - curl -i -XPUT --data-binary 1 -H "X-Auth-Token: " \ - http:///container/myobject - -Now create a new version of that object:: - - curl -i -XPUT --data-binary 2 -H "X-Auth-Token: " \ - http:///container/myobject - -See a listing of the older versions of the object:: - - curl -i -H "X-Auth-Token: " \ - http:///versions?prefix=008myobject/ - -Now delete the current version of the object and see that the older version is -gone:: - - curl -i -XDELETE -H "X-Auth-Token: " \ - http:///container/myobject - curl -i -H "X-Auth-Token: " \ - http:///versions?prefix=008myobject/ - ---------------------------------------------------- -How to Disable Object Versioning in a Swift Cluster ---------------------------------------------------- - -If you want to disable all functionality, set ``allow_versions`` back to -``False`` in the container server config. - -Disable versioning a versioned container (x is any value except empty):: - - curl -i -XPOST -H "X-Auth-Token: " \ - -H "X-Remove-Versions-Location: x" http:///container +.. automodule:: swift.common.middleware.versioned_writes + :members: + :show-inheritance: diff --git a/doc/source/overview_ring.rst b/doc/source/overview_ring.rst index d1f43affa5..ac82562ff1 100644 --- a/doc/source/overview_ring.rst +++ b/doc/source/overview_ring.rst @@ -168,6 +168,21 @@ on them than the disks in nodes A and B. If 80% full is the warning threshold for the cluster, node C's disks will reach 80% full while A and B's disks are only 72.7% full. +********** +Dispersion +********** + +With each rebalance, the ring builder calculates a dispersion metric. This is +the percentage of partitions in the ring that have too many replicas within a +particular failure domain. + +For example, if you have three servers in a cluster but two replicas for a +partition get placed onto the same server, that partition will count towards the +dispersion metric. + +A lower dispersion value is better, and the value can be used to find the proper +value for "overload". + ********************* Partition Shift Value ********************* diff --git a/etc/account-server.conf-sample b/etc/account-server.conf-sample index 3631986fa2..8cfbfa0098 100644 --- a/etc/account-server.conf-sample +++ b/etc/account-server.conf-sample @@ -100,13 +100,6 @@ use = egg:swift#recon # run_pause is deprecated, use interval instead # run_pause = 30 # -# How long without an error before a node's error count is reset. This will -# also be how long before a node is reenabled after suppression is triggered. -# error_suppression_interval = 60 -# -# How many errors can accumulate before a node is temporarily ignored. -# error_suppression_limit = 10 -# # node_timeout = 10 # conn_timeout = 0.5 # diff --git a/etc/dispersion.conf-sample b/etc/dispersion.conf-sample index 7ce920e87d..865e80fecf 100644 --- a/etc/dispersion.conf-sample +++ b/etc/dispersion.conf-sample @@ -13,6 +13,16 @@ auth_key = testing # auth_key = password # auth_version = 2.0 # +# NOTE: If you want to use keystone (auth version 3.0), then its configuration +# would look something like: +# auth_url = http://localhost:5000/v3/ +# auth_user = user +# auth_key = password +# auth_version = 3.0 +# project_name = project +# project_domain_name = project_domain +# user_domain_name = user_domain +# # endpoint_type = publicURL # keystone_api_insecure = no # diff --git a/etc/object-expirer.conf-sample b/etc/object-expirer.conf-sample index 87840a48bf..6276fd5cfa 100644 --- a/etc/object-expirer.conf-sample +++ b/etc/object-expirer.conf-sample @@ -41,7 +41,7 @@ # config value # processes = 0 # process is which of the parts a particular process will work on -# process can also be specified on the command line and will overide the config +# process can also be specified on the command line and will override the config # value # process is "zero based", if you want to use 3 processes, you should run # processes with process set to 0, 1, and 2 diff --git a/etc/object-server.conf-sample b/etc/object-server.conf-sample index b36ec29aa6..31bd160a3e 100644 --- a/etc/object-server.conf-sample +++ b/etc/object-server.conf-sample @@ -60,6 +60,8 @@ bind_port = 6000 # conn_timeout = 0.5 # Time to wait while sending each chunk of data to another backend node. # node_timeout = 3 +# Time to wait while sending a container update on object update. +# container_update_timeout = 1.0 # Time to wait while receiving each chunk of data from a client or another # backend node. # client_timeout = 60 diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample index 55b6137ae0..b37101c37a 100644 --- a/etc/proxy-server.conf-sample +++ b/etc/proxy-server.conf-sample @@ -77,7 +77,7 @@ bind_port = 8080 # eventlet_debug = false [pipeline:main] -pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth container-quotas account-quotas slo dlo proxy-logging proxy-server +pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server [app:proxy-server] use = egg:swift#proxy @@ -703,3 +703,14 @@ use = egg:swift#xprofile # # unwind the iterator of applications # unwind = false + +# Note: Put after slo, dlo in the pipeline. +# If you don't put it in the pipeline, it will be inserted automatically. +[filter:versioned_writes] +use = egg:swift#versioned_writes +# Enables using versioned writes middleware and exposing configuration +# settings via HTTP GET /info. +# WARNING: Setting this option bypasses the "allow_versions" option +# in the container configuration file, which will be eventually +# deprecated. See documentation for more details. +# allow_versioned_writes = false diff --git a/etc/swift.conf-sample b/etc/swift.conf-sample index 76d1e876ae..18cb047cf5 100644 --- a/etc/swift.conf-sample +++ b/etc/swift.conf-sample @@ -134,7 +134,7 @@ default = yes # headers. If for some reason this is not enough (custom middleware for # example) it can be increased with the extra_header_count constraint. -#extra_header_count = 32 +#extra_header_count = 0 # max_object_name_length is the max number of bytes in the utf8 encoding diff --git a/requirements.txt b/requirements.txt index 35aab42f4b..e85555288d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,4 +10,4 @@ pastedeploy>=1.3.3 simplejson>=2.0.9 six>=1.9.0 xattr>=0.4 -PyECLib>=1.0.7 +PyECLib==1.0.7 # BSD diff --git a/setup.cfg b/setup.cfg index a40fc535ee..a819a57f02 100644 --- a/setup.cfg +++ b/setup.cfg @@ -95,6 +95,7 @@ paste.filter_factory = gatekeeper = swift.common.middleware.gatekeeper:filter_factory container_sync = swift.common.middleware.container_sync:filter_factory xprofile = swift.common.middleware.xprofile:filter_factory + versioned_writes = swift.common.middleware.versioned_writes:filter_factory [build_sphinx] all_files = 1 diff --git a/swift/account/auditor.py b/swift/account/auditor.py index 261acf7e92..0f72999b90 100644 --- a/swift/account/auditor.py +++ b/swift/account/auditor.py @@ -122,11 +122,10 @@ class AccountAuditor(Daemon): continue raise InvalidAccountInfo(_( 'The total %(key)s for the container (%(total)s) does not ' - 'match the sum of %(key)s across policies (%(sum)s)') % { - 'key': key, - 'total': info[key], - 'sum': policy_totals[key], - }) + 'match the sum of %(key)s across policies (%(sum)s)') + % {'key': key, + 'total': info[key], + 'sum': policy_totals[key]}) def account_audit(self, path): """ diff --git a/swift/account/backend.py b/swift/account/backend.py index 0500c4f6de..c91f37fe9b 100644 --- a/swift/account/backend.py +++ b/swift/account/backend.py @@ -18,7 +18,7 @@ Pluggable Back-end for Account Server from uuid import uuid4 import time -import cPickle as pickle +import six.moves.cPickle as pickle import sqlite3 @@ -380,6 +380,7 @@ class AccountBroker(DatabaseBroker): :returns: list of tuples of (name, object_count, bytes_used, 0) """ + delim_force_gte = False (marker, end_marker, prefix, delimiter) = utf8encode( marker, end_marker, prefix, delimiter) self._commit_puts_stale_ok() @@ -392,12 +393,17 @@ class AccountBroker(DatabaseBroker): query = """ SELECT name, object_count, bytes_used, 0 FROM container - WHERE deleted = 0 AND """ + WHERE """ query_args = [] if end_marker: query += ' name < ? AND' query_args.append(end_marker) - if marker and marker >= prefix: + if delim_force_gte: + query += ' name >= ? AND' + query_args.append(marker) + # Always set back to False + delim_force_gte = False + elif marker and marker >= prefix: query += ' name > ? AND' query_args.append(marker) elif prefix: @@ -437,6 +443,8 @@ class AccountBroker(DatabaseBroker): end = name.find(delimiter, len(prefix)) if end > 0: marker = name[:end] + chr(ord(delimiter) + 1) + # we want result to be inclusive of delim+1 + delim_force_gte = True dir_name = name[:end + 1] if dir_name != orig_marker: results.append([dir_name, 0, 0, 1]) diff --git a/swift/account/reaper.py b/swift/account/reaper.py index c121bf0ea5..14af76ad5d 100644 --- a/swift/account/reaper.py +++ b/swift/account/reaper.py @@ -15,10 +15,12 @@ import os import random +import socket from swift import gettext_ as _ from logging import DEBUG from math import sqrt from time import time +from hashlib import md5 import itertools from eventlet import GreenPool, sleep, Timeout @@ -70,6 +72,7 @@ class AccountReaper(Daemon): self.node_timeout = int(conf.get('node_timeout', 10)) self.conn_timeout = float(conf.get('conn_timeout', 0.5)) self.myips = whataremyips(conf.get('bind_ip', '0.0.0.0')) + self.bind_port = int(conf.get('bind_port', 0)) self.concurrency = int(conf.get('concurrency', 25)) self.container_concurrency = self.object_concurrency = \ sqrt(self.concurrency) @@ -79,6 +82,7 @@ class AccountReaper(Daemon): self.delay_reaping = int(conf.get('delay_reaping') or 0) reap_warn_after = float(conf.get('reap_warn_after') or 86400 * 30) self.reap_not_done_after = reap_warn_after + self.delay_reaping + self.start_time = time() def get_account_ring(self): """The account :class:`swift.common.ring.Ring` for the cluster.""" @@ -161,9 +165,16 @@ class AccountReaper(Daemon): if not partition.isdigit(): continue nodes = self.get_account_ring().get_part_nodes(int(partition)) - if (not is_local_device(self.myips, None, nodes[0]['ip'], None) - or not os.path.isdir(partition_path)): + if not os.path.isdir(partition_path): continue + container_shard = None + for container_shard, node in enumerate(nodes): + if is_local_device(self.myips, None, node['ip'], None) and \ + (not self.bind_port or self.bind_port == node['port']): + break + else: + continue + for suffix in os.listdir(partition_path): suffix_path = os.path.join(partition_path, suffix) if not os.path.isdir(suffix_path): @@ -181,7 +192,9 @@ class AccountReaper(Daemon): AccountBroker(os.path.join(hsh_path, fname)) if broker.is_status_deleted() and \ not broker.empty(): - self.reap_account(broker, partition, nodes) + self.reap_account( + broker, partition, nodes, + container_shard=container_shard) def reset_stats(self): self.stats_return_codes = {} @@ -192,7 +205,7 @@ class AccountReaper(Daemon): self.stats_containers_possibly_remaining = 0 self.stats_objects_possibly_remaining = 0 - def reap_account(self, broker, partition, nodes): + def reap_account(self, broker, partition, nodes, container_shard=None): """ Called once per pass for each account this server is the primary for and attempts to delete the data for the given account. The reaper will @@ -219,6 +232,8 @@ class AccountReaper(Daemon): :param broker: The AccountBroker for the account to delete. :param partition: The partition in the account ring the account is on. :param nodes: The primary node dicts for the account to delete. + :param container_shard: int used to shard containers reaped. If None, + will reap all containers. .. seealso:: @@ -237,16 +252,24 @@ class AccountReaper(Daemon): account = info['account'] self.logger.info(_('Beginning pass on account %s'), account) self.reset_stats() + container_limit = 1000 + if container_shard is not None: + container_limit *= len(nodes) try: marker = '' while True: containers = \ - list(broker.list_containers_iter(1000, marker, None, None, - None)) + list(broker.list_containers_iter(container_limit, marker, + None, None, None)) if not containers: break try: for (container, _junk, _junk, _junk) in containers: + this_shard = int(md5(container).hexdigest(), 16) % \ + len(nodes) + if container_shard not in (this_shard, None): + continue + self.container_pool.spawn(self.reap_container, account, partition, nodes, container) self.container_pool.waitall() @@ -347,10 +370,14 @@ class AccountReaper(Daemon): if self.logger.getEffectiveLevel() <= DEBUG: self.logger.exception( _('Exception with %(ip)s:%(port)s/%(device)s'), node) - self.stats_return_codes[err.http_status / 100] = \ - self.stats_return_codes.get(err.http_status / 100, 0) + 1 + self.stats_return_codes[err.http_status // 100] = \ + self.stats_return_codes.get(err.http_status // 100, 0) + 1 self.logger.increment( - 'return_codes.%d' % (err.http_status / 100,)) + 'return_codes.%d' % (err.http_status // 100,)) + except (Timeout, socket.error) as err: + self.logger.error( + _('Timeout Exception with %(ip)s:%(port)s/%(device)s'), + node) if not objects: break try: @@ -399,10 +426,16 @@ class AccountReaper(Daemon): _('Exception with %(ip)s:%(port)s/%(device)s'), node) failures += 1 self.logger.increment('containers_failures') - self.stats_return_codes[err.http_status / 100] = \ - self.stats_return_codes.get(err.http_status / 100, 0) + 1 + self.stats_return_codes[err.http_status // 100] = \ + self.stats_return_codes.get(err.http_status // 100, 0) + 1 self.logger.increment( - 'return_codes.%d' % (err.http_status / 100,)) + 'return_codes.%d' % (err.http_status // 100,)) + except (Timeout, socket.error) as err: + self.logger.error( + _('Timeout Exception with %(ip)s:%(port)s/%(device)s'), + node) + failures += 1 + self.logger.increment('containers_failures') if successes > failures: self.stats_containers_deleted += 1 self.logger.increment('containers_deleted') @@ -469,10 +502,16 @@ class AccountReaper(Daemon): _('Exception with %(ip)s:%(port)s/%(device)s'), node) failures += 1 self.logger.increment('objects_failures') - self.stats_return_codes[err.http_status / 100] = \ - self.stats_return_codes.get(err.http_status / 100, 0) + 1 + self.stats_return_codes[err.http_status // 100] = \ + self.stats_return_codes.get(err.http_status // 100, 0) + 1 self.logger.increment( - 'return_codes.%d' % (err.http_status / 100,)) + 'return_codes.%d' % (err.http_status // 100,)) + except (Timeout, socket.error) as err: + failures += 1 + self.logger.increment('objects_failures') + self.logger.error( + _('Timeout Exception with %(ip)s:%(port)s/%(device)s'), + node) if successes > failures: self.stats_objects_deleted += 1 self.logger.increment('objects_deleted') diff --git a/swift/cli/form_signature.py b/swift/cli/form_signature.py index 0aefaca37d..0cf1a99d34 100644 --- a/swift/cli/form_signature.py +++ b/swift/cli/form_signature.py @@ -15,6 +15,7 @@ """ Script for generating a form signature for use with FormPost middleware. """ +from __future__ import print_function import hmac from hashlib import sha1 from os.path import basename @@ -24,41 +25,41 @@ from time import time def main(argv): if len(argv) != 7: prog = basename(argv[0]) - print 'Syntax: %s ' \ - ' ' % prog - print - print 'Where:' - print ' The prefix to use for form uploaded' - print ' objects. For example:' - print ' /v1/account/container/object_prefix_ would' - print ' ensure all form uploads have that path' - print ' prepended to the browser-given file name.' - print ' The URL to redirect the browser to after' - print ' the uploads have completed.' - print ' The maximum file size per file uploaded.' - print ' The maximum number of uploaded files' - print ' allowed.' - print ' The number of seconds from now to allow' - print ' the form post to begin.' - print ' The X-Account-Meta-Temp-URL-Key for the' - print ' account.' - print - print 'Example output:' - print ' Expires: 1323842228' - print ' Signature: 18de97e47345a82c4dbfb3b06a640dbb' - print - print 'Sample form:' - print + print('Syntax: %s ' + ' ' % prog) + print() + print('Where:') + print(' The prefix to use for form uploaded') + print(' objects. For example:') + print(' /v1/account/container/object_prefix_ would') + print(' ensure all form uploads have that path') + print(' prepended to the browser-given file name.') + print(' The URL to redirect the browser to after') + print(' the uploads have completed.') + print(' The maximum file size per file uploaded.') + print(' The maximum number of uploaded files') + print(' allowed.') + print(' The number of seconds from now to allow') + print(' the form post to begin.') + print(' The X-Account-Meta-Temp-URL-Key for the') + print(' account.') + print() + print('Example output:') + print(' Expires: 1323842228') + print(' Signature: 18de97e47345a82c4dbfb3b06a640dbb') + print() + print('Sample form:') + print() print('NOTE: the
tag\'s "action" attribute does not contain ' 'the Swift cluster\'s hostname.') - print 'You should manually add it before using the form.' - print + print('You should manually add it before using the form.') + print() print('') - print ' ' - print ' ... more HTML ...' - print ' ' - print '
' + print(' ') + print(' ... more HTML ...') + print(' ') + print('') return 1 path, redirect, max_file_size, max_file_count, seconds, key = argv[1:] try: @@ -66,37 +67,37 @@ def main(argv): except ValueError: max_file_size = -1 if max_file_size < 0: - print 'Please use a value greater than or equal to 0.' + print('Please use a value greater than or equal to 0.') return 1 try: max_file_count = int(max_file_count) except ValueError: max_file_count = 0 if max_file_count < 1: - print 'Please use a positive value.' + print('Please use a positive value.') return 1 try: expires = int(time() + int(seconds)) except ValueError: expires = 0 if expires < 1: - print 'Please use a positive value.' + print('Please use a positive value.') return 1 parts = path.split('/', 4) # Must be four parts, ['', 'v1', 'a', 'c'], must be a v1 request, have # account and container values, and optionally have an object prefix. if len(parts) < 4 or parts[0] or parts[1] != 'v1' or not parts[2] or \ not parts[3]: - print ' must point to a container at least.' - print 'For example: /v1/account/container' - print ' Or: /v1/account/container/object_prefix' + print(' must point to a container at least.') + print('For example: /v1/account/container') + print(' Or: /v1/account/container/object_prefix') return 1 sig = hmac.new(key, '%s\n%s\n%s\n%s\n%s' % (path, redirect, max_file_size, max_file_count, expires), sha1).hexdigest() - print ' Expires:', expires - print 'Signature:', sig - print '' + print(' Expires:', expires) + print('Signature:', sig) + print('') print('Sample form:\n') diff --git a/swift/cli/info.py b/swift/cli/info.py index cc23b58650..eed4dea164 100644 --- a/swift/cli/info.py +++ b/swift/cli/info.py @@ -10,6 +10,7 @@ # License for the specific language governing permissions and limitations # under the License. +from __future__ import print_function import itertools import os import sqlite3 @@ -84,17 +85,17 @@ def print_ring_locations(ring, datadir, account, container=None, obj=None, path_hash = hash_path(account, container, obj) else: path_hash = None - print 'Partition\t%s' % part - print 'Hash \t%s\n' % path_hash + print('Partition\t%s' % part) + print('Hash \t%s\n' % path_hash) for node in primary_nodes: - print 'Server:Port Device\t%s:%s %s' % (node['ip'], node['port'], - node['device']) + print('Server:Port Device\t%s:%s %s' % (node['ip'], node['port'], + node['device'])) for node in handoff_nodes: - print 'Server:Port Device\t%s:%s %s\t [Handoff]' % ( - node['ip'], node['port'], node['device']) + print('Server:Port Device\t%s:%s %s\t [Handoff]' % ( + node['ip'], node['port'], node['device'])) - print "\n" + print("\n") for node in primary_nodes: cmd = 'curl -I -XHEAD "http://%s:%s/%s/%s/%s"' \ @@ -103,7 +104,7 @@ def print_ring_locations(ring, datadir, account, container=None, obj=None, if policy_index is not None: cmd += ' -H "%s: %s"' % ('X-Backend-Storage-Policy-Index', policy_index) - print cmd + print(cmd) for node in handoff_nodes: cmd = 'curl -I -XHEAD "http://%s:%s/%s/%s/%s"' \ % (node['ip'], node['port'], node['device'], part, @@ -112,30 +113,30 @@ def print_ring_locations(ring, datadir, account, container=None, obj=None, cmd += ' -H "%s: %s"' % ('X-Backend-Storage-Policy-Index', policy_index) cmd += ' # [Handoff]' - print cmd + print(cmd) - print "\n\nUse your own device location of servers:" - print "such as \"export DEVICE=/srv/node\"" + print("\n\nUse your own device location of servers:") + print("such as \"export DEVICE=/srv/node\"") if path_hash: for node in primary_nodes: - print ('ssh %s "ls -lah ${DEVICE:-/srv/node*}/%s/%s"' % - (node['ip'], node['device'], - storage_directory(datadir, part, path_hash))) + print('ssh %s "ls -lah ${DEVICE:-/srv/node*}/%s/%s"' % + (node['ip'], node['device'], + storage_directory(datadir, part, path_hash))) for node in handoff_nodes: - print ('ssh %s "ls -lah ${DEVICE:-/srv/node*}/%s/%s" # [Handoff]' % - (node['ip'], node['device'], - storage_directory(datadir, part, path_hash))) + print('ssh %s "ls -lah ${DEVICE:-/srv/node*}/%s/%s" # [Handoff]' % + (node['ip'], node['device'], + storage_directory(datadir, part, path_hash))) else: for node in primary_nodes: - print ('ssh %s "ls -lah ${DEVICE:-/srv/node*}/%s/%s/%d"' % - (node['ip'], node['device'], datadir, part)) + print('ssh %s "ls -lah ${DEVICE:-/srv/node*}/%s/%s/%d"' % + (node['ip'], node['device'], datadir, part)) for node in handoff_nodes: - print ('ssh %s "ls -lah ${DEVICE:-/srv/node*}/%s/%s/%d"' - ' # [Handoff]' % - (node['ip'], node['device'], datadir, part)) + print('ssh %s "ls -lah ${DEVICE:-/srv/node*}/%s/%s/%d"' + ' # [Handoff]' % + (node['ip'], node['device'], datadir, part)) - print '\nnote: `/srv/node*` is used as default value of `devices`, the ' \ - 'real value is set in the config file on each storage node.' + print('\nnote: `/srv/node*` is used as default value of `devices`, the ' + 'real value is set in the config file on each storage node.') def print_db_info_metadata(db_type, info, metadata): @@ -162,52 +163,53 @@ def print_db_info_metadata(db_type, info, metadata): else: path = '/%s' % account - print 'Path: %s' % path - print ' Account: %s' % account + print('Path: %s' % path) + print(' Account: %s' % account) if db_type == 'container': - print ' Container: %s' % container + print(' Container: %s' % container) path_hash = hash_path(account, container) if db_type == 'container': - print ' Container Hash: %s' % path_hash + print(' Container Hash: %s' % path_hash) else: - print ' Account Hash: %s' % path_hash + print(' Account Hash: %s' % path_hash) - print 'Metadata:' - print (' Created at: %s (%s)' % - (Timestamp(info['created_at']).isoformat, - info['created_at'])) - print (' Put Timestamp: %s (%s)' % - (Timestamp(info['put_timestamp']).isoformat, - info['put_timestamp'])) - print (' Delete Timestamp: %s (%s)' % - (Timestamp(info['delete_timestamp']).isoformat, - info['delete_timestamp'])) - print (' Status Timestamp: %s (%s)' % - (Timestamp(info['status_changed_at']).isoformat, - info['status_changed_at'])) + print('Metadata:') + print(' Created at: %s (%s)' % + (Timestamp(info['created_at']).isoformat, + info['created_at'])) + print(' Put Timestamp: %s (%s)' % + (Timestamp(info['put_timestamp']).isoformat, + info['put_timestamp'])) + print(' Delete Timestamp: %s (%s)' % + (Timestamp(info['delete_timestamp']).isoformat, + info['delete_timestamp'])) + print(' Status Timestamp: %s (%s)' % + (Timestamp(info['status_changed_at']).isoformat, + info['status_changed_at'])) if db_type == 'account': - print ' Container Count: %s' % info['container_count'] - print ' Object Count: %s' % info['object_count'] - print ' Bytes Used: %s' % info['bytes_used'] + print(' Container Count: %s' % info['container_count']) + print(' Object Count: %s' % info['object_count']) + print(' Bytes Used: %s' % info['bytes_used']) if db_type == 'container': try: policy_name = POLICIES[info['storage_policy_index']].name except KeyError: policy_name = 'Unknown' - print (' Storage Policy: %s (%s)' % ( + print(' Storage Policy: %s (%s)' % ( policy_name, info['storage_policy_index'])) - print (' Reported Put Timestamp: %s (%s)' % - (Timestamp(info['reported_put_timestamp']).isoformat, - info['reported_put_timestamp'])) - print (' Reported Delete Timestamp: %s (%s)' % - (Timestamp(info['reported_delete_timestamp']).isoformat, - info['reported_delete_timestamp'])) - print ' Reported Object Count: %s' % info['reported_object_count'] - print ' Reported Bytes Used: %s' % info['reported_bytes_used'] - print ' Chexor: %s' % info['hash'] - print ' UUID: %s' % info['id'] + print(' Reported Put Timestamp: %s (%s)' % + (Timestamp(info['reported_put_timestamp']).isoformat, + info['reported_put_timestamp'])) + print(' Reported Delete Timestamp: %s (%s)' % + (Timestamp(info['reported_delete_timestamp']).isoformat, + info['reported_delete_timestamp'])) + print(' Reported Object Count: %s' % + info['reported_object_count']) + print(' Reported Bytes Used: %s' % info['reported_bytes_used']) + print(' Chexor: %s' % info['hash']) + print(' UUID: %s' % info['id']) except KeyError as e: raise ValueError('Info is incomplete: %s' % e) @@ -215,7 +217,7 @@ def print_db_info_metadata(db_type, info, metadata): for key, value in info.items(): if key.lower().startswith(meta_prefix): title = key.replace('_', '-').title() - print ' %s: %s' % (title, value) + print(' %s: %s' % (title, value)) user_metadata = {} sys_metadata = {} for key, (value, timestamp) in metadata.items(): @@ -225,16 +227,16 @@ def print_db_info_metadata(db_type, info, metadata): sys_metadata[strip_sys_meta_prefix(db_type, key)] = value else: title = key.replace('_', '-').title() - print ' %s: %s' % (title, value) + print(' %s: %s' % (title, value)) if sys_metadata: - print ' System Metadata: %s' % sys_metadata + print(' System Metadata: %s' % sys_metadata) else: - print 'No system metadata found in db file' + print('No system metadata found in db file') if user_metadata: - print ' User Metadata: %s' % user_metadata + print(' User Metadata: %s' % user_metadata) else: - print 'No user metadata found in db file' + print('No user metadata found in db file') def print_obj_metadata(metadata): @@ -268,21 +270,21 @@ def print_obj_metadata(metadata): raise ValueError('Path is invalid for object %r' % path) else: obj_hash = hash_path(account, container, obj) - print 'Path: %s' % path - print ' Account: %s' % account - print ' Container: %s' % container - print ' Object: %s' % obj - print ' Object hash: %s' % obj_hash + print('Path: %s' % path) + print(' Account: %s' % account) + print(' Container: %s' % container) + print(' Object: %s' % obj) + print(' Object hash: %s' % obj_hash) else: - print 'Path: Not found in metadata' + print('Path: Not found in metadata') if content_type: - print 'Content-Type: %s' % content_type + print('Content-Type: %s' % content_type) else: - print 'Content-Type: Not found in metadata' + print('Content-Type: Not found in metadata') if ts: - print ('Timestamp: %s (%s)' % (ts.isoformat, ts.internal)) + print('Timestamp: %s (%s)' % (ts.isoformat, ts.internal)) else: - print 'Timestamp: Not found in metadata' + print('Timestamp: Not found in metadata') for key, value in metadata.items(): if is_user_meta('Object', key): @@ -293,12 +295,12 @@ def print_obj_metadata(metadata): other_metadata[key] = value def print_metadata(title, items): - print title + print(title) if items: for meta_key in sorted(items): - print ' %s: %s' % (meta_key, items[meta_key]) + print(' %s: %s' % (meta_key, items[meta_key])) else: - print ' No metadata found' + print(' No metadata found') print_metadata('System Metadata:', sys_metadata) print_metadata('User Metadata:', user_metadata) @@ -307,10 +309,10 @@ def print_obj_metadata(metadata): def print_info(db_type, db_file, swift_dir='/etc/swift'): if db_type not in ('account', 'container'): - print "Unrecognized DB type: internal error" + print("Unrecognized DB type: internal error") raise InfoSystemExit() if not os.path.exists(db_file) or not db_file.endswith('.db'): - print "DB file doesn't exist" + print("DB file doesn't exist") raise InfoSystemExit() if not db_file.startswith(('/', './')): db_file = './' + db_file # don't break if the bare db file is given @@ -324,8 +326,8 @@ def print_info(db_type, db_file, swift_dir='/etc/swift'): info = broker.get_info() except sqlite3.OperationalError as err: if 'no such table' in str(err): - print "Does not appear to be a DB of type \"%s\": %s" % ( - db_type, db_file) + print("Does not appear to be a DB of type \"%s\": %s" + % (db_type, db_file)) raise InfoSystemExit() raise account = info['account'] @@ -353,7 +355,7 @@ def print_obj(datafile, check_etag=True, swift_dir='/etc/swift', :param policy_name: optionally the name to use when finding the ring """ if not os.path.exists(datafile): - print "Data file doesn't exist" + print("Data file doesn't exist") raise InfoSystemExit() if not datafile.startswith(('/', './')): datafile = './' + datafile @@ -363,7 +365,8 @@ def print_obj(datafile, check_etag=True, swift_dir='/etc/swift', datadir = DATADIR_BASE # try to extract policy index from datafile disk path - policy_index = int(extract_policy(datafile) or POLICIES.legacy) + fullpath = os.path.abspath(datafile) + policy_index = int(extract_policy(fullpath) or POLICIES.legacy) try: if policy_index: @@ -382,8 +385,8 @@ def print_obj(datafile, check_etag=True, swift_dir='/etc/swift', if (policy_index is not None and policy_index_for_name is not None and policy_index != policy_index_for_name): - print 'Warning: Ring does not match policy!' - print 'Double check your policy name!' + print('Warning: Ring does not match policy!') + print('Double check your policy name!') if not ring and policy_index_for_name: ring = POLICIES.get_object_ring(policy_index_for_name, swift_dir) @@ -393,7 +396,7 @@ def print_obj(datafile, check_etag=True, swift_dir='/etc/swift', try: metadata = read_metadata(fp) except EOFError: - print "Invalid metadata" + print("Invalid metadata") raise InfoSystemExit() etag = metadata.pop('ETag', '') @@ -415,24 +418,24 @@ def print_obj(datafile, check_etag=True, swift_dir='/etc/swift', h = h.hexdigest() if etag: if h == etag: - print 'ETag: %s (valid)' % etag + print('ETag: %s (valid)' % etag) else: - print ("ETag: %s doesn't match file hash of %s!" % - (etag, h)) + print("ETag: %s doesn't match file hash of %s!" % + (etag, h)) else: - print 'ETag: Not found in metadata' + print('ETag: Not found in metadata') else: - print 'ETag: %s (not checked)' % etag + print('ETag: %s (not checked)' % etag) file_len = os.fstat(fp.fileno()).st_size if length: if file_len == int(length): - print 'Content-Length: %s (valid)' % length + print('Content-Length: %s (valid)' % length) else: - print ("Content-Length: %s doesn't match file length of %s" - % (length, file_len)) + print("Content-Length: %s doesn't match file length of %s" + % (length, file_len)) else: - print 'Content-Length: Not found in metadata' + print('Content-Length: Not found in metadata') account, container, obj = path.split('/', 3)[1:] if ring: @@ -472,33 +475,33 @@ def print_item_locations(ring, ring_name=None, account=None, container=None, policy = POLICIES.get_by_name(policy_name) if policy: if ring_name != policy.ring_name: - print 'Warning: mismatch between ring and policy name!' + print('Warning: mismatch between ring and policy name!') else: - print 'Warning: Policy %s is not valid' % policy_name + print('Warning: Policy %s is not valid' % policy_name) policy_index = None if ring is None and (obj or part): if not policy_name: - print 'Need a ring or policy' + print('Need a ring or policy') raise InfoSystemExit() policy = POLICIES.get_by_name(policy_name) if not policy: - print 'No policy named %r' % policy_name + print('No policy named %r' % policy_name) raise InfoSystemExit() policy_index = int(policy) ring = POLICIES.get_object_ring(policy_index, swift_dir) ring_name = (POLICIES.get_by_name(policy_name)).ring_name if account is None and (container is not None or obj is not None): - print 'No account specified' + print('No account specified') raise InfoSystemExit() if container is None and obj is not None: - print 'No container specified' + print('No container specified') raise InfoSystemExit() if account is None and part is None: - print 'No target specified' + print('No target specified') raise InfoSystemExit() loc = '' @@ -518,19 +521,19 @@ def print_item_locations(ring, ring_name=None, account=None, container=None, ring = Ring(swift_dir, ring_name='container') else: if ring_name != 'container': - print 'Warning: account/container specified ' + \ - 'but ring not named "container"' + print('Warning: account/container specified ' + + 'but ring not named "container"') if account and not container and not obj: loc = 'accounts' if not any([ring, ring_name]): ring = Ring(swift_dir, ring_name='account') else: if ring_name != 'account': - print 'Warning: account specified ' + \ - 'but ring not named "account"' + print('Warning: account specified ' + + 'but ring not named "account"') - print '\nAccount \t%s' % account - print 'Container\t%s' % container - print 'Object \t%s\n\n' % obj + print('\nAccount \t%s' % account) + print('Container\t%s' % container) + print('Object \t%s\n\n' % obj) print_ring_locations(ring, loc, account, container, obj, part, all_nodes, policy_index=policy_index) diff --git a/swift/cli/recon.py b/swift/cli/recon.py old mode 100755 new mode 100644 index f57f75c22a..6c5894a5ae --- a/swift/cli/recon.py +++ b/swift/cli/recon.py @@ -51,7 +51,7 @@ def size_suffix(size): for suffix in suffixes: if size < 1000: return "%s %s" % (size, suffix) - size = size / 1000 + size = size // 1000 return "%s %s" % (size, suffix) @@ -100,11 +100,14 @@ class Scout(object): Obtain telemetry from a host running the swift recon middleware. :param host: host to check - :returns: tuple of (recon url used, response body, and status) + :returns: tuple of (recon url used, response body, status, time start + and time end) """ base_url = "http://%s:%s/recon/" % (host[0], host[1]) + ts_start = time.time() url, content, status = self.scout_host(base_url, self.recon_type) - return url, content, status + ts_end = time.time() + return url, content, status, ts_start, ts_end def scout_server_type(self, host): """ @@ -253,7 +256,8 @@ class SwiftRecon(object): if self.verbose: for ring_file, ring_sum in rings.items(): print("-> On disk %s md5sum: %s" % (ring_file, ring_sum)) - for url, response, status in self.pool.imap(recon.scout, hosts): + for url, response, status, ts_start, ts_end in self.pool.imap( + recon.scout, hosts): if status != 200: errors = errors + 1 continue @@ -291,7 +295,8 @@ class SwiftRecon(object): printfn("[%s] Checking swift.conf md5sum" % self._ptime()) if self.verbose: printfn("-> On disk swift.conf md5sum: %s" % (conf_sum,)) - for url, response, status in self.pool.imap(recon.scout, hosts): + for url, response, status, ts_start, ts_end in self.pool.imap( + recon.scout, hosts): if status == 200: if response[SWIFT_CONF_FILE] != conf_sum: printfn("!! %s (%s) doesn't match on disk md5sum" % @@ -317,7 +322,8 @@ class SwiftRecon(object): recon = Scout("async", self.verbose, self.suppress_errors, self.timeout) print("[%s] Checking async pendings" % self._ptime()) - for url, response, status in self.pool.imap(recon.scout, hosts): + for url, response, status, ts_start, ts_end in self.pool.imap( + recon.scout, hosts): if status == 200: scan[url] = response['async_pending'] stats = self._gen_stats(scan.values(), 'async_pending') @@ -338,7 +344,8 @@ class SwiftRecon(object): recon = Scout("driveaudit", self.verbose, self.suppress_errors, self.timeout) print("[%s] Checking drive-audit errors" % self._ptime()) - for url, response, status in self.pool.imap(recon.scout, hosts): + for url, response, status, ts_start, ts_end in self.pool.imap( + recon.scout, hosts): if status == 200: scan[url] = response['drive_audit_errors'] stats = self._gen_stats(scan.values(), 'drive_audit_errors') @@ -361,7 +368,8 @@ class SwiftRecon(object): self.timeout) print("[%s] Getting unmounted drives from %s hosts..." % (self._ptime(), len(hosts))) - for url, response, status in self.pool.imap(recon.scout, hosts): + for url, response, status, ts_start, ts_end in self.pool.imap( + recon.scout, hosts): if status == 200: unmounted[url] = [] errors[url] = [] @@ -414,7 +422,8 @@ class SwiftRecon(object): recon = Scout("expirer/%s" % self.server_type, self.verbose, self.suppress_errors, self.timeout) print("[%s] Checking on expirers" % self._ptime()) - for url, response, status in self.pool.imap(recon.scout, hosts): + for url, response, status, ts_start, ts_end in self.pool.imap( + recon.scout, hosts): if status == 200: stats['object_expiration_pass'].append( response.get('object_expiration_pass')) @@ -447,15 +456,18 @@ class SwiftRecon(object): least_recent_url = None most_recent_time = 0 most_recent_url = None - for url, response, status in self.pool.imap(recon.scout, hosts): + for url, response, status, ts_start, ts_end in self.pool.imap( + recon.scout, hosts): if status == 200: stats['replication_time'].append( - response.get('replication_time')) - repl_stats = response['replication_stats'] + response.get('replication_time', + response.get('object_replication_time', 0))) + repl_stats = response.get('replication_stats') if repl_stats: for stat_key in ['attempted', 'failure', 'success']: stats[stat_key].append(repl_stats.get(stat_key)) - last = response.get('replication_last', 0) + last = response.get('replication_last', + response.get('object_replication_last', 0)) if last < least_recent_time: least_recent_time = last least_recent_url = url @@ -496,61 +508,6 @@ class SwiftRecon(object): elapsed, elapsed_unit, host)) print("=" * 79) - def object_replication_check(self, hosts): - """ - Obtain and print replication statistics from object servers - - :param hosts: set of hosts to check. in the format of: - set([('127.0.0.1', 6020), ('127.0.0.2', 6030)]) - """ - stats = {} - recon = Scout("replication", self.verbose, self.suppress_errors, - self.timeout) - print("[%s] Checking on replication" % self._ptime()) - least_recent_time = 9999999999 - least_recent_url = None - most_recent_time = 0 - most_recent_url = None - for url, response, status in self.pool.imap(recon.scout, hosts): - if status == 200: - stats[url] = response['object_replication_time'] - last = response.get('object_replication_last', 0) - if last < least_recent_time: - least_recent_time = last - least_recent_url = url - if last > most_recent_time: - most_recent_time = last - most_recent_url = url - times = [x for x in stats.values() if x is not None] - if len(stats) > 0 and len(times) > 0: - computed = self._gen_stats(times, 'replication_time') - if computed['reported'] > 0: - self._print_stats(computed) - else: - print("[replication_time] - No hosts returned valid data.") - else: - print("[replication_time] - No hosts returned valid data.") - if least_recent_url is not None: - host = urlparse(least_recent_url).netloc - if not least_recent_time: - print('Oldest completion was NEVER by %s.' % host) - else: - elapsed = time.time() - least_recent_time - elapsed, elapsed_unit = seconds2timeunit(elapsed) - print('Oldest completion was %s (%d %s ago) by %s.' % ( - time.strftime('%Y-%m-%d %H:%M:%S', - time.gmtime(least_recent_time)), - elapsed, elapsed_unit, host)) - if most_recent_url is not None: - host = urlparse(most_recent_url).netloc - elapsed = time.time() - most_recent_time - elapsed, elapsed_unit = seconds2timeunit(elapsed) - print('Most recent completion was %s (%d %s ago) by %s.' % ( - time.strftime('%Y-%m-%d %H:%M:%S', - time.gmtime(most_recent_time)), - elapsed, elapsed_unit, host)) - print("=" * 79) - def updater_check(self, hosts): """ Obtain and print updater statistics @@ -562,7 +519,8 @@ class SwiftRecon(object): recon = Scout("updater/%s" % self.server_type, self.verbose, self.suppress_errors, self.timeout) print("[%s] Checking updater times" % self._ptime()) - for url, response, status in self.pool.imap(recon.scout, hosts): + for url, response, status, ts_start, ts_end in self.pool.imap( + recon.scout, hosts): if status == 200: if response['%s_updater_sweep' % self.server_type]: stats.append(response['%s_updater_sweep' % @@ -592,7 +550,8 @@ class SwiftRecon(object): recon = Scout("auditor/%s" % self.server_type, self.verbose, self.suppress_errors, self.timeout) print("[%s] Checking auditor stats" % self._ptime()) - for url, response, status in self.pool.imap(recon.scout, hosts): + for url, response, status, ts_start, ts_end in self.pool.imap( + recon.scout, hosts): if status == 200: scan[url] = response if len(scan) < 1: @@ -665,7 +624,8 @@ class SwiftRecon(object): recon = Scout("auditor/object", self.verbose, self.suppress_errors, self.timeout) print("[%s] Checking auditor stats " % self._ptime()) - for url, response, status in self.pool.imap(recon.scout, hosts): + for url, response, status, ts_start, ts_end in self.pool.imap( + recon.scout, hosts): if status == 200: if response['object_auditor_stats_ALL']: all_scan[url] = response['object_auditor_stats_ALL'] @@ -736,7 +696,8 @@ class SwiftRecon(object): recon = Scout("load", self.verbose, self.suppress_errors, self.timeout) print("[%s] Checking load averages" % self._ptime()) - for url, response, status in self.pool.imap(recon.scout, hosts): + for url, response, status, ts_start, ts_end in self.pool.imap( + recon.scout, hosts): if status == 200: load1[url] = response['1m'] load5[url] = response['5m'] @@ -765,7 +726,8 @@ class SwiftRecon(object): recon = Scout("quarantined", self.verbose, self.suppress_errors, self.timeout) print("[%s] Checking quarantine" % self._ptime()) - for url, response, status in self.pool.imap(recon.scout, hosts): + for url, response, status, ts_start, ts_end in self.pool.imap( + recon.scout, hosts): if status == 200: objq[url] = response['objects'] conq[url] = response['containers'] @@ -799,7 +761,8 @@ class SwiftRecon(object): recon = Scout("sockstat", self.verbose, self.suppress_errors, self.timeout) print("[%s] Checking socket usage" % self._ptime()) - for url, response, status in self.pool.imap(recon.scout, hosts): + for url, response, status, ts_start, ts_end in self.pool.imap( + recon.scout, hosts): if status == 200: inuse4[url] = response['tcp_in_use'] mem[url] = response['tcp_mem_allocated_bytes'] @@ -835,7 +798,8 @@ class SwiftRecon(object): recon = Scout("diskusage", self.verbose, self.suppress_errors, self.timeout) print("[%s] Checking disk usage now" % self._ptime()) - for url, response, status in self.pool.imap(recon.scout, hosts): + for url, response, status, ts_start, ts_end in self.pool.imap( + recon.scout, hosts): if status == 200: hostusage = [] for entry in response: @@ -915,6 +879,47 @@ class SwiftRecon(object): host = urlparse(url).netloc.split(':')[0] print('%.02f%% %s' % (used, '%-15s %s' % (host, device))) + def time_check(self, hosts): + """ + Check a time synchronization of hosts with current time + + :param hosts: set of hosts to check. in the format of: + set([('127.0.0.1', 6020), ('127.0.0.2', 6030)]) + """ + + matches = 0 + errors = 0 + recon = Scout("time", self.verbose, self.suppress_errors, + self.timeout) + print("[%s] Checking time-sync" % self._ptime()) + for url, ts_remote, status, ts_start, ts_end in self.pool.imap( + recon.scout, hosts): + if status != 200: + errors = errors + 1 + continue + if (ts_remote < ts_start or ts_remote > ts_end): + diff = abs(ts_end - ts_remote) + ts_end_f = time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(ts_end)) + ts_remote_f = time.strftime( + "%Y-%m-%d %H:%M:%S", + time.localtime(ts_remote)) + + print("!! %s current time is %s, but remote is %s, " + "differs by %.2f sec" % ( + url, + ts_end_f, + ts_remote_f, + diff)) + continue + matches += 1 + if self.verbose: + print("-> %s matches." % url) + print("%s/%s hosts matched, %s error[s] while checking hosts." % ( + matches, len(hosts), errors)) + print("=" * 79) + def main(self): """ Retrieve and report cluster info from hosts running recon middleware. @@ -922,7 +927,7 @@ class SwiftRecon(object): print("=" * 79) usage = ''' usage: %prog [-v] [--suppress] [-a] [-r] [-u] [-d] - [-l] [--md5] [--auditor] [--updater] [--expirer] [--sockstat] + [-l] [-T] [--md5] [--auditor] [--updater] [--expirer] [--sockstat] [--human-readable] \taccount|container|object @@ -964,13 +969,15 @@ class SwiftRecon(object): help="Get cluster socket usage stats") args.add_option('--driveaudit', action="store_true", help="Get drive audit error stats") + args.add_option('--time', '-T', action="store_true", + help="Check time synchronization") args.add_option('--top', type='int', metavar='COUNT', default=0, help='Also show the top COUNT entries in rank order.') args.add_option('--lowest', type='int', metavar='COUNT', default=0, help='Also show the lowest COUNT entries in rank \ order.') args.add_option('--all', action="store_true", - help="Perform all checks. Equal to \t\t\t-arudlq " + help="Perform all checks. Equal to \t\t\t-arudlqT " "--md5 --sockstat --auditor --updater --expirer") args.add_option('--region', type="int", help="Only query servers in specified region") @@ -1011,7 +1018,7 @@ class SwiftRecon(object): if options.all: if self.server_type == 'object': self.async_check(hosts) - self.object_replication_check(hosts) + self.replication_check(hosts) self.object_auditor_check(hosts) self.updater_check(hosts) self.expirer_check(hosts) @@ -1031,6 +1038,7 @@ class SwiftRecon(object): self.socket_usage(hosts) self.server_type_check(hosts) self.driveaudit_check(hosts) + self.time_check(hosts) else: if options.async: if self.server_type == 'object': @@ -1040,10 +1048,7 @@ class SwiftRecon(object): if options.unmounted: self.umount_check(hosts) if options.replication: - if self.server_type == 'object': - self.object_replication_check(hosts) - else: - self.replication_check(hosts) + self.replication_check(hosts) if options.auditor: if self.server_type == 'object': self.object_auditor_check(hosts) @@ -1075,6 +1080,8 @@ class SwiftRecon(object): self.socket_usage(hosts) if options.driveaudit: self.driveaudit_check(hosts) + if options.time: + self.time_check(hosts) def main(): diff --git a/swift/cli/ring_builder_analyzer.py b/swift/cli/ring_builder_analyzer.py index 26d964bb8b..8e3d7b5ebe 100644 --- a/swift/cli/ring_builder_analyzer.py +++ b/swift/cli/ring_builder_analyzer.py @@ -96,26 +96,30 @@ ARG_PARSER.add_argument( help="Path to the scenario file") +class ParseCommandError(ValueError): + + def __init__(self, name, round_index, command_index, msg): + msg = "Invalid %s (round %s, command %s): %s" % ( + name, round_index, command_index, msg) + super(ParseCommandError, self).__init__(msg) + + def _parse_weight(round_index, command_index, weight_str): try: weight = float(weight_str) except ValueError as err: - raise ValueError( - "Invalid weight %r (round %d, command %d): %s" - % (weight_str, round_index, command_index, err)) + raise ParseCommandError('weight', round_index, command_index, err) if weight < 0: - raise ValueError( - "Negative weight (round %d, command %d)" - % (round_index, command_index)) + raise ParseCommandError('weight', round_index, command_index, + 'cannot be negative') return weight def _parse_add_command(round_index, command_index, command): if len(command) != 3: - raise ValueError( - "Invalid add command (round %d, command %d): expected array of " - "length 3, but got %d" - % (round_index, command_index, len(command))) + raise ParseCommandError( + 'add command', round_index, command_index, + 'expected array of length 3, but got %r' % command) dev_str = command[1] weight_str = command[2] @@ -123,43 +127,47 @@ def _parse_add_command(round_index, command_index, command): try: dev = parse_add_value(dev_str) except ValueError as err: - raise ValueError( - "Invalid device specifier '%s' in add (round %d, command %d): %s" - % (dev_str, round_index, command_index, err)) + raise ParseCommandError('device specifier', round_index, + command_index, err) dev['weight'] = _parse_weight(round_index, command_index, weight_str) if dev['region'] is None: dev['region'] = 1 + default_key_map = { + 'replication_ip': 'ip', + 'replication_port': 'port', + } + for empty_key, default_key in default_key_map.items(): + if dev[empty_key] is None: + dev[empty_key] = dev[default_key] + return ['add', dev] def _parse_remove_command(round_index, command_index, command): if len(command) != 2: - raise ValueError( - "Invalid remove command (round %d, command %d): expected array of " - "length 2, but got %d" - % (round_index, command_index, len(command))) + raise ParseCommandError('remove commnd', round_index, command_index, + "expected array of length 2, but got %r" % + (command,)) dev_str = command[1] try: dev_id = int(dev_str) except ValueError as err: - raise ValueError( - "Invalid device ID '%s' in remove (round %d, command %d): %s" - % (dev_str, round_index, command_index, err)) + raise ParseCommandError('device ID in remove', + round_index, command_index, err) return ['remove', dev_id] def _parse_set_weight_command(round_index, command_index, command): if len(command) != 3: - raise ValueError( - "Invalid remove command (round %d, command %d): expected array of " - "length 3, but got %d" - % (round_index, command_index, len(command))) + raise ParseCommandError('remove command', round_index, command_index, + "expected array of length 3, but got %r" % + (command,)) dev_str = command[1] weight_str = command[2] @@ -167,14 +175,21 @@ def _parse_set_weight_command(round_index, command_index, command): try: dev_id = int(dev_str) except ValueError as err: - raise ValueError( - "Invalid device ID '%s' in set_weight (round %d, command %d): %s" - % (dev_str, round_index, command_index, err)) + raise ParseCommandError('device ID in set_weight', + round_index, command_index, err) weight = _parse_weight(round_index, command_index, weight_str) return ['set_weight', dev_id, weight] +def _parse_save_command(round_index, command_index, command): + if len(command) != 2: + raise ParseCommandError( + command, round_index, command_index, + "expected array of length 2 but got %r" % (command,)) + return ['save', command[1]] + + def parse_scenario(scenario_data): """ Takes a serialized scenario and turns it into a data structure suitable @@ -236,9 +251,12 @@ def parse_scenario(scenario_data): if not isinstance(raw_scenario['rounds'], list): raise ValueError("rounds must be an array") - parser_for_command = {'add': _parse_add_command, - 'remove': _parse_remove_command, - 'set_weight': _parse_set_weight_command} + parser_for_command = { + 'add': _parse_add_command, + 'remove': _parse_remove_command, + 'set_weight': _parse_set_weight_command, + 'save': _parse_save_command, + } parsed_scenario['rounds'] = [] for round_index, raw_round in enumerate(raw_scenario['rounds']): @@ -268,18 +286,24 @@ def run_scenario(scenario): rb = builder.RingBuilder(scenario['part_power'], scenario['replicas'], 1) rb.set_overload(scenario['overload']) + + command_map = { + 'add': rb.add_dev, + 'remove': rb.remove_dev, + 'set_weight': rb.set_dev_weight, + 'save': rb.save, + } + for round_index, commands in enumerate(scenario['rounds']): print "Round %d" % (round_index + 1) for command in commands: - if command[0] == 'add': - rb.add_dev(command[1]) - elif command[0] == 'remove': - rb.remove_dev(command[1]) - elif command[0] == 'set_weight': - rb.set_dev_weight(command[1], command[2]) - else: - raise ValueError("unknown command %r" % (command[0],)) + key = command.pop(0) + try: + command_f = command_map[key] + except KeyError: + raise ValueError("unknown command %r" % key) + command_f(*command) rebalance_number = 1 parts_moved, old_balance = rb.rebalance(seed=seed) diff --git a/swift/cli/ringbuilder.py b/swift/cli/ringbuilder.py index 5e9d5b2c00..a6860c98de 100755 --- a/swift/cli/ringbuilder.py +++ b/swift/cli/ringbuilder.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import print_function import logging from errno import EEXIST @@ -71,14 +72,14 @@ def _parse_search_values(argvish): search_values = {} if len(args) > 0: if new_cmd_format or len(args) != 1: - print Commands.search.__doc__.strip() + print(Commands.search.__doc__.strip()) exit(EXIT_ERROR) search_values = parse_search_value(args[0]) else: search_values = parse_search_values_from_opts(opts) return search_values except ValueError as e: - print e + print(e) exit(EXIT_ERROR) @@ -113,7 +114,7 @@ def _parse_list_parts_values(argvish): devs = [] if len(args) > 0: if new_cmd_format: - print Commands.list_parts.__doc__.strip() + print(Commands.list_parts.__doc__.strip()) exit(EXIT_ERROR) for arg in args: @@ -125,7 +126,7 @@ def _parse_list_parts_values(argvish): return devs except ValueError as e: - print e + print(e) exit(EXIT_ERROR) @@ -145,7 +146,7 @@ def _parse_add_values(argvish): parsed_devs = [] if len(args) > 0: if new_cmd_format or len(args) % 2 != 0: - print Commands.add.__doc__.strip() + print(Commands.add.__doc__.strip()) exit(EXIT_ERROR) devs_and_weights = izip(islice(args, 0, len(args), 2), @@ -184,18 +185,18 @@ def _set_weight_values(devs, weight): exit(EXIT_ERROR) if len(devs) > 1: - print 'Matched more than one device:' + print('Matched more than one device:') for dev in devs: - print ' %s' % format_device(dev) + print(' %s' % format_device(dev)) if raw_input('Are you sure you want to update the weight for ' 'these %s devices? (y/N) ' % len(devs)) != 'y': - print 'Aborting device modifications' + print('Aborting device modifications') exit(EXIT_ERROR) for dev in devs: builder.set_dev_weight(dev['id'], weight) - print '%s weight set to %s' % (format_device(dev), - dev['weight']) + print('%s weight set to %s' % (format_device(dev), + dev['weight'])) def _parse_set_weight_values(argvish): @@ -209,7 +210,7 @@ def _parse_set_weight_values(argvish): devs = [] if not new_cmd_format: if len(args) % 2 != 0: - print Commands.set_weight.__doc__.strip() + print(Commands.set_weight.__doc__.strip()) exit(EXIT_ERROR) devs_and_weights = izip(islice(argvish, 0, len(argvish), 2), @@ -221,7 +222,7 @@ def _parse_set_weight_values(argvish): _set_weight_values(devs, weight) else: if len(args) != 1: - print Commands.set_weight.__doc__.strip() + print(Commands.set_weight.__doc__.strip()) exit(EXIT_ERROR) devs.extend(builder.search_devs( @@ -229,7 +230,7 @@ def _parse_set_weight_values(argvish): weight = float(args[0]) _set_weight_values(devs, weight) except ValueError as e: - print e + print(e) exit(EXIT_ERROR) @@ -241,12 +242,12 @@ def _set_info_values(devs, change): exit(EXIT_ERROR) if len(devs) > 1: - print 'Matched more than one device:' + print('Matched more than one device:') for dev in devs: - print ' %s' % format_device(dev) + print(' %s' % format_device(dev)) if raw_input('Are you sure you want to update the info for ' 'these %s devices? (y/N) ' % len(devs)) != 'y': - print 'Aborting device modifications' + print('Aborting device modifications') exit(EXIT_ERROR) for dev in devs: @@ -260,14 +261,14 @@ def _set_info_values(devs, change): if check_dev['ip'] == test_dev['ip'] and \ check_dev['port'] == test_dev['port'] and \ check_dev['device'] == test_dev['device']: - print 'Device %d already uses %s:%d/%s.' % \ + print('Device %d already uses %s:%d/%s.' % (check_dev['id'], check_dev['ip'], - check_dev['port'], check_dev['device']) + check_dev['port'], check_dev['device'])) exit(EXIT_ERROR) for key in change: dev[key] = change[key] - print 'Device %s is now %s' % (orig_dev_string, - format_device(dev)) + print('Device %s is now %s' % (orig_dev_string, + format_device(dev))) def _parse_set_info_values(argvish): @@ -279,7 +280,7 @@ def _parse_set_info_values(argvish): # but not both. If both are specified, raise an error. if not new_cmd_format: if len(args) % 2 != 0: - print Commands.search.__doc__.strip() + print(Commands.search.__doc__.strip()) exit(EXIT_ERROR) searches_and_changes = izip(islice(argvish, 0, len(argvish), 2), @@ -368,7 +369,7 @@ def _parse_remove_values(argvish): devs = [] if len(args) > 0: if new_cmd_format: - print Commands.remove.__doc__.strip() + print(Commands.remove.__doc__.strip()) exit(EXIT_ERROR) for arg in args: @@ -380,14 +381,14 @@ def _parse_remove_values(argvish): return devs except ValueError as e: - print e + print(e) exit(EXIT_ERROR) class Commands(object): def unknown(): - print 'Unknown command: %s' % argv[2] + print('Unknown command: %s' % argv[2]) exit(EXIT_ERROR) def create(): @@ -399,17 +400,18 @@ swift-ring-builder create than once. """ if len(argv) < 6: - print Commands.create.__doc__.strip() + print(Commands.create.__doc__.strip()) exit(EXIT_ERROR) builder = RingBuilder(int(argv[3]), float(argv[4]), int(argv[5])) - backup_dir = pathjoin(dirname(argv[1]), 'backups') + backup_dir = pathjoin(dirname(builder_file), 'backups') try: mkdir(backup_dir) except OSError as err: if err.errno != EEXIST: raise - builder.save(pathjoin(backup_dir, '%d.' % time() + basename(argv[1]))) - builder.save(argv[1]) + builder.save(pathjoin(backup_dir, + '%d.' % time() + basename(builder_file))) + builder.save(builder_file) exit(EXIT_SUCCESS) def default(): @@ -417,7 +419,7 @@ swift-ring-builder create swift-ring-builder Shows information about the ring and the devices within. """ - print '%s, build version %d' % (argv[1], builder.version) + print('%s, build version %d' % (builder_file, builder.version)) regions = 0 zones = 0 balance = 0 @@ -432,18 +434,18 @@ swift-ring-builder balance = builder.get_balance() dispersion_trailer = '' if builder.dispersion is None else ( ', %.02f dispersion' % (builder.dispersion)) - print '%d partitions, %.6f replicas, %d regions, %d zones, ' \ + print('%d partitions, %.6f replicas, %d regions, %d zones, ' '%d devices, %.02f balance%s' % ( builder.parts, builder.replicas, regions, zones, dev_count, - balance, dispersion_trailer) - print 'The minimum number of hours before a partition can be ' \ - 'reassigned is %s' % builder.min_part_hours - print 'The overload factor is %0.2f%% (%.6f)' % ( - builder.overload * 100, builder.overload) + balance, dispersion_trailer)) + print('The minimum number of hours before a partition can be ' + 'reassigned is %s' % builder.min_part_hours) + print('The overload factor is %0.2f%% (%.6f)' % ( + builder.overload * 100, builder.overload)) if builder.devs: - print 'Devices: id region zone ip address port ' \ - 'replication ip replication port name ' \ - 'weight partitions balance meta' + print('Devices: id region zone ip address port ' + 'replication ip replication port name ' + 'weight partitions balance meta') weighted_parts = builder.parts * builder.replicas / \ sum(d['weight'] for d in builder.devs if d is not None) for dev in builder.devs: @@ -483,19 +485,19 @@ swift-ring-builder search Shows information about matching devices. """ if len(argv) < 4: - print Commands.search.__doc__.strip() - print - print parse_search_value.__doc__.strip() + print(Commands.search.__doc__.strip()) + print() + print(parse_search_value.__doc__.strip()) exit(EXIT_ERROR) devs = builder.search_devs(_parse_search_values(argv[3:])) if not devs: - print 'No matching devices found' + print('No matching devices found') exit(EXIT_ERROR) - print 'Devices: id region zone ip address port ' \ - 'replication ip replication port name weight partitions ' \ - 'balance meta' + print('Devices: id region zone ip address port ' + 'replication ip replication port name weight partitions ' + 'balance meta') weighted_parts = builder.parts * builder.replicas / \ sum(d['weight'] for d in builder.devs if d is not None) for dev in devs: @@ -538,30 +540,30 @@ swift-ring-builder list_parts could take a while to run. """ if len(argv) < 4: - print Commands.list_parts.__doc__.strip() - print - print parse_search_value.__doc__.strip() + print(Commands.list_parts.__doc__.strip()) + print() + print(parse_search_value.__doc__.strip()) exit(EXIT_ERROR) if not builder._replica2part2dev: print('Specified builder file \"%s\" is not rebalanced yet. ' - 'Please rebalance first.' % argv[1]) + 'Please rebalance first.' % builder_file) exit(EXIT_ERROR) devs = _parse_list_parts_values(argv[3:]) if not devs: - print 'No matching devices found' + print('No matching devices found') exit(EXIT_ERROR) sorted_partition_count = _find_parts(devs) if not sorted_partition_count: - print 'No matching devices found' + print('No matching devices found') exit(EXIT_ERROR) - print 'Partition Matches' + print('Partition Matches') for partition, count in sorted_partition_count: - print '%9d %7d' % (partition, count) + print('%9d %7d' % (partition, count)) exit(EXIT_SUCCESS) def add(): @@ -587,7 +589,7 @@ swift-ring-builder add can make multiple device changes and rebalance them all just once. """ if len(argv) < 5: - print Commands.add.__doc__.strip() + print(Commands.add.__doc__.strip()) exit(EXIT_ERROR) try: @@ -598,20 +600,20 @@ swift-ring-builder add if dev['ip'] == new_dev['ip'] and \ dev['port'] == new_dev['port'] and \ dev['device'] == new_dev['device']: - print 'Device %d already uses %s:%d/%s.' % \ + print('Device %d already uses %s:%d/%s.' % (dev['id'], dev['ip'], - dev['port'], dev['device']) - print "The on-disk ring builder is unchanged.\n" + dev['port'], dev['device'])) + print("The on-disk ring builder is unchanged.\n") exit(EXIT_ERROR) dev_id = builder.add_dev(new_dev) print('Device %s with %s weight got id %s' % (format_device(new_dev), new_dev['weight'], dev_id)) except ValueError as err: - print err - print 'The on-disk ring builder is unchanged.' + print(err) + print('The on-disk ring builder is unchanged.') exit(EXIT_ERROR) - builder.save(argv[1]) + builder.save(builder_file) exit(EXIT_SUCCESS) def set_weight(): @@ -636,14 +638,14 @@ swift-ring-builder set_weight """ # if len(argv) < 5 or len(argv) % 2 != 1: if len(argv) < 5: - print Commands.set_weight.__doc__.strip() - print - print parse_search_value.__doc__.strip() + print(Commands.set_weight.__doc__.strip()) + print() + print(parse_search_value.__doc__.strip()) exit(EXIT_ERROR) _parse_set_weight_values(argv[3:]) - builder.save(argv[1]) + builder.save(builder_file) exit(EXIT_SUCCESS) def set_info(): @@ -677,18 +679,18 @@ swift-ring-builder set_info just update the meta data for device id 74. """ if len(argv) < 5: - print Commands.set_info.__doc__.strip() - print - print parse_search_value.__doc__.strip() + print(Commands.set_info.__doc__.strip()) + print() + print(parse_search_value.__doc__.strip()) exit(EXIT_ERROR) try: _parse_set_info_values(argv[3:]) except ValueError as err: - print err + print(err) exit(EXIT_ERROR) - builder.save(argv[1]) + builder.save(builder_file) exit(EXIT_SUCCESS) def remove(): @@ -714,9 +716,9 @@ swift-ring-builder search once. """ if len(argv) < 4: - print Commands.remove.__doc__.strip() - print - print parse_search_value.__doc__.strip() + print(Commands.remove.__doc__.strip()) + print() + print(parse_search_value.__doc__.strip()) exit(EXIT_ERROR) devs = _parse_remove_values(argv[3:]) @@ -727,19 +729,19 @@ swift-ring-builder search exit(EXIT_ERROR) if len(devs) > 1: - print 'Matched more than one device:' + print('Matched more than one device:') for dev in devs: - print ' %s' % format_device(dev) + print(' %s' % format_device(dev)) if raw_input('Are you sure you want to remove these %s ' 'devices? (y/N) ' % len(devs)) != 'y': - print 'Aborting device removals' + print('Aborting device removals') exit(EXIT_ERROR) for dev in devs: try: builder.remove_dev(dev['id']) except exceptions.RingBuilderError as e: - print '-' * 79 + print('-' * 79) print( 'An error occurred while removing device with id %d\n' 'This usually means that you attempted to remove\n' @@ -748,12 +750,12 @@ swift-ring-builder search 'The on-disk ring builder is unchanged.\n' 'Original exception message: %s' % (dev['id'], e)) - print '-' * 79 + print('-' * 79) exit(EXIT_ERROR) - print '%s marked for removal and will ' \ - 'be removed next rebalance.' % format_device(dev) - builder.save(argv[1]) + print('%s marked for removal and will ' + 'be removed next rebalance.' % format_device(dev)) + builder.save(builder_file) exit(EXIT_SUCCESS) def rebalance(): @@ -793,18 +795,18 @@ swift-ring-builder rebalance [options] last_balance = builder.get_balance() parts, balance = builder.rebalance(seed=get_seed(3)) except exceptions.RingBuilderError as e: - print '-' * 79 + print('-' * 79) print("An error has occurred during ring validation. Common\n" "causes of failure are rings that are empty or do not\n" "have enough devices to accommodate the replica count.\n" "Original exception message:\n %s" % (e,)) - print '-' * 79 + print('-' * 79) exit(EXIT_ERROR) if not (parts or options.force): - print 'No partitions could be reassigned.' - print 'Either none need to be or none can be due to ' \ - 'min_part_hours [%s].' % builder.min_part_hours + print('No partitions could be reassigned.') + print('Either none need to be or none can be due to ' + 'min_part_hours [%s].' % builder.min_part_hours) exit(EXIT_WARNING) # If we set device's weight to zero, currently balance will be set # special value(MAX_BALANCE) until zero weighted device return all @@ -813,29 +815,29 @@ swift-ring-builder rebalance [options] if not options.force and \ not devs_changed and abs(last_balance - balance) < 1 and \ not (last_balance == MAX_BALANCE and balance == MAX_BALANCE): - print 'Cowardly refusing to save rebalance as it did not change ' \ - 'at least 1%.' + print('Cowardly refusing to save rebalance as it did not change ' + 'at least 1%.') exit(EXIT_WARNING) try: builder.validate() except exceptions.RingValidationError as e: - print '-' * 79 + print('-' * 79) print("An error has occurred during ring validation. Common\n" "causes of failure are rings that are empty or do not\n" "have enough devices to accommodate the replica count.\n" "Original exception message:\n %s" % (e,)) - print '-' * 79 + print('-' * 79) exit(EXIT_ERROR) - print ('Reassigned %d (%.02f%%) partitions. ' - 'Balance is now %.02f. ' - 'Dispersion is now %.02f' % ( - parts, 100.0 * parts / builder.parts, - balance, - builder.dispersion)) + print('Reassigned %d (%.02f%%) partitions. ' + 'Balance is now %.02f. ' + 'Dispersion is now %.02f' % ( + parts, 100.0 * parts / builder.parts, + balance, + builder.dispersion)) status = EXIT_SUCCESS if builder.dispersion > 0: - print '-' * 79 + print('-' * 79) print( 'NOTE: Dispersion of %.06f indicates some parts are not\n' ' optimally dispersed.\n\n' @@ -843,21 +845,21 @@ swift-ring-builder rebalance [options] ' the overload or review the dispersion report.' % builder.dispersion) status = EXIT_WARNING - print '-' * 79 + print('-' * 79) elif balance > 5 and balance / 100.0 > builder.overload: - print '-' * 79 - print 'NOTE: Balance of %.02f indicates you should push this ' % \ - balance - print ' ring, wait at least %d hours, and rebalance/repush.' \ - % builder.min_part_hours - print '-' * 79 + print('-' * 79) + print('NOTE: Balance of %.02f indicates you should push this ' % + balance) + print(' ring, wait at least %d hours, and rebalance/repush.' + % builder.min_part_hours) + print('-' * 79) status = EXIT_WARNING ts = time() builder.get_ring().save( pathjoin(backup_dir, '%d.' % ts + basename(ring_file))) - builder.save(pathjoin(backup_dir, '%d.' % ts + basename(argv[1]))) + builder.save(pathjoin(backup_dir, '%d.' % ts + basename(builder_file))) builder.get_ring().save(ring_file) - builder.save(argv[1]) + builder.save(builder_file) exit(status) def dispersion(): @@ -892,7 +894,7 @@ swift-ring-builder dispersion [options] status = EXIT_SUCCESS if not builder._replica2part2dev: print('Specified builder file \"%s\" is not rebalanced yet. ' - 'Please rebalance first.' % argv[1]) + 'Please rebalance first.' % builder_file) exit(EXIT_ERROR) usage = Commands.dispersion.__doc__.strip() parser = optparse.OptionParser(usage) @@ -905,12 +907,12 @@ swift-ring-builder dispersion [options] search_filter = None report = dispersion_report(builder, search_filter=search_filter, verbose=options.verbose) - print 'Dispersion is %.06f, Balance is %.06f, Overload is %0.2f%%' % ( - builder.dispersion, builder.get_balance(), builder.overload * 100) + print('Dispersion is %.06f, Balance is %.06f, Overload is %0.2f%%' % ( + builder.dispersion, builder.get_balance(), builder.overload * 100)) if report['worst_tier']: status = EXIT_WARNING - print 'Worst tier is %.06f (%s)' % (report['max_dispersion'], - report['worst_tier']) + print('Worst tier is %.06f (%s)' % (report['max_dispersion'], + report['worst_tier'])) if report['graph']: replica_range = range(int(math.ceil(builder.replicas + 1))) part_count_width = '%%%ds' % max(len(str(builder.parts)), 5) @@ -929,13 +931,19 @@ swift-ring-builder dispersion [options] for tier_name, dispersion in report['graph']: replica_counts_repr = replica_counts_tmpl % tuple( dispersion['replicas']) - print ('%-' + str(tier_width) + 's ' + part_count_width + - ' %6.02f %6d %s') % (tier_name, - dispersion['placed_parts'], - dispersion['dispersion'], - dispersion['max_replicas'], - replica_counts_repr, - ) + template = ''.join([ + '%-', str(tier_width), 's ', + part_count_width, + ' %6.02f %6d %s', + ]) + args = ( + tier_name, + dispersion['placed_parts'], + dispersion['dispersion'], + dispersion['max_replicas'], + replica_counts_repr, + ) + print(template % args) exit(status) def validate(): @@ -957,11 +965,11 @@ swift-ring-builder write_ring ring_data = builder.get_ring() if not ring_data._replica2part2dev_id: if ring_data.devs: - print 'Warning: Writing a ring with no partition ' \ - 'assignments but with devices; did you forget to run ' \ - '"rebalance"?' + print('Warning: Writing a ring with no partition ' + 'assignments but with devices; did you forget to run ' + '"rebalance"?') else: - print 'Warning: Writing an empty ring' + print('Warning: Writing an empty ring') ring_data.save( pathjoin(backup_dir, '%d.' % time() + basename(ring_file))) ring_data.save(ring_file) @@ -976,8 +984,8 @@ swift-ring-builder write_builder [min_part_hours] you can change it with set_min_part_hours. """ if exists(builder_file): - print 'Cowardly refusing to overwrite existing ' \ - 'Ring Builder file: %s' % builder_file + print('Cowardly refusing to overwrite existing ' + 'Ring Builder file: %s' % builder_file) exit(EXIT_ERROR) if len(argv) > 3: min_part_hours = int(argv[3]) @@ -1014,7 +1022,7 @@ swift-ring-builder write_builder [min_part_hours] def pretend_min_part_hours_passed(): builder.pretend_min_part_hours_passed() - builder.save(argv[1]) + builder.save(builder_file) exit(EXIT_SUCCESS) def set_min_part_hours(): @@ -1025,12 +1033,12 @@ swift-ring-builder set_min_part_hours to determine this more easily than scanning logs. """ if len(argv) < 4: - print Commands.set_min_part_hours.__doc__.strip() + print(Commands.set_min_part_hours.__doc__.strip()) exit(EXIT_ERROR) builder.change_min_part_hours(int(argv[3])) - print 'The minimum number of hours before a partition can be ' \ - 'reassigned is now set to %s' % argv[3] - builder.save(argv[1]) + print('The minimum number of hours before a partition can be ' + 'reassigned is now set to %s' % argv[3]) + builder.save(builder_file) exit(EXIT_SUCCESS) def set_replicas(): @@ -1044,25 +1052,25 @@ swift-ring-builder set_replicas A rebalance is needed to make the change take effect. """ if len(argv) < 4: - print Commands.set_replicas.__doc__.strip() + print(Commands.set_replicas.__doc__.strip()) exit(EXIT_ERROR) new_replicas = argv[3] try: new_replicas = float(new_replicas) except ValueError: - print Commands.set_replicas.__doc__.strip() - print "\"%s\" is not a valid number." % new_replicas + print(Commands.set_replicas.__doc__.strip()) + print("\"%s\" is not a valid number." % new_replicas) exit(EXIT_ERROR) if new_replicas < 1: - print "Replica count must be at least 1." + print("Replica count must be at least 1.") exit(EXIT_ERROR) builder.set_replicas(new_replicas) - print 'The replica count is now %.6f.' % builder.replicas - print 'The change will take effect after the next rebalance.' - builder.save(argv[1]) + print('The replica count is now %.6f.' % builder.replicas) + print('The change will take effect after the next rebalance.') + builder.save(builder_file) exit(EXIT_SUCCESS) def set_overload(): @@ -1073,7 +1081,7 @@ swift-ring-builder set_overload [%] A rebalance is needed to make the change take effect. """ if len(argv) < 4: - print Commands.set_overload.__doc__.strip() + print(Commands.set_overload.__doc__.strip()) exit(EXIT_ERROR) new_overload = argv[3] @@ -1085,27 +1093,27 @@ swift-ring-builder set_overload [%] try: new_overload = float(new_overload) except ValueError: - print Commands.set_overload.__doc__.strip() - print "%r is not a valid number." % new_overload + print(Commands.set_overload.__doc__.strip()) + print("%r is not a valid number." % new_overload) exit(EXIT_ERROR) if percent: new_overload *= 0.01 if new_overload < 0: - print "Overload must be non-negative." + print("Overload must be non-negative.") exit(EXIT_ERROR) if new_overload > 1 and not percent: - print "!?! Warning overload is greater than 100% !?!" + print("!?! Warning overload is greater than 100% !?!") status = EXIT_WARNING else: status = EXIT_SUCCESS builder.set_overload(new_overload) - print 'The overload factor is now %0.2f%% (%.6f)' % ( - builder.overload * 100, builder.overload) - print 'The change will take effect after the next rebalance.' - builder.save(argv[1]) + print('The overload factor is now %0.2f%% (%.6f)' % ( + builder.overload * 100, builder.overload)) + print('The change will take effect after the next rebalance.') + builder.save(builder_file) exit(status) @@ -1117,43 +1125,46 @@ def main(arguments=None): argv = sys_argv if len(argv) < 2: - print "swift-ring-builder %(MAJOR_VERSION)s.%(MINOR_VERSION)s\n" % \ - globals() - print Commands.default.__doc__.strip() - print + print("swift-ring-builder %(MAJOR_VERSION)s.%(MINOR_VERSION)s\n" % + globals()) + print(Commands.default.__doc__.strip()) + print() cmds = [c for c, f in Commands.__dict__.items() if f.__doc__ and c[0] != '_' and c != 'default'] cmds.sort() for cmd in cmds: - print Commands.__dict__[cmd].__doc__.strip() - print - print parse_search_value.__doc__.strip() - print + print(Commands.__dict__[cmd].__doc__.strip()) + print() + print(parse_search_value.__doc__.strip()) + print() for line in wrap(' '.join(cmds), 79, initial_indent='Quick list: ', subsequent_indent=' '): - print line + print(line) print('Exit codes: 0 = operation successful\n' ' 1 = operation completed with warnings\n' ' 2 = error') exit(EXIT_SUCCESS) builder_file, ring_file = parse_builder_ring_filename_args(argv) + if builder_file != argv[1]: + print('Note: using %s instead of %s as builder file' % ( + builder_file, argv[1])) try: builder = RingBuilder.load(builder_file) except exceptions.UnPicklingError as e: - print e + print(e) exit(EXIT_ERROR) except (exceptions.FileNotFoundError, exceptions.PermissionError) as e: if len(argv) < 3 or argv[2] not in('create', 'write_builder'): - print e + print(e) exit(EXIT_ERROR) except Exception as e: print('Problem occurred while reading builder file: %s. %s' % - (argv[1], e)) + (builder_file, e)) exit(EXIT_ERROR) - backup_dir = pathjoin(dirname(argv[1]), 'backups') + backup_dir = pathjoin(dirname(builder_file), 'backups') try: mkdir(backup_dir) except OSError as err: @@ -1166,10 +1177,10 @@ def main(arguments=None): command = argv[2] if argv[0].endswith('-safe'): try: - with lock_parent_directory(abspath(argv[1]), 15): + with lock_parent_directory(abspath(builder_file), 15): Commands.__dict__.get(command, Commands.unknown.im_func)() except exceptions.LockTimeout: - print "Ring/builder dir currently locked." + print("Ring/builder dir currently locked.") exit(2) else: Commands.__dict__.get(command, Commands.unknown.im_func)() diff --git a/swift/common/constraints.py b/swift/common/constraints.py index 647f875609..36f9d5eae8 100644 --- a/swift/common/constraints.py +++ b/swift/common/constraints.py @@ -13,11 +13,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +import functools import os import urllib import time from urllib import unquote -from ConfigParser import ConfigParser, NoSectionError, NoOptionError + +from six.moves.configparser import ConfigParser, NoSectionError, NoOptionError from swift.common import utils, exceptions from swift.common.swob import HTTPBadRequest, HTTPLengthRequired, \ @@ -405,28 +407,33 @@ def check_destination_header(req): '/') -def check_account_format(req, account): +def check_name_format(req, name, target_type): """ - Validate that the header contains valid account name. - We assume the caller ensures that - destination header is present in req.headers. + Validate that the header contains valid account or container name. :param req: HTTP request object - :returns: A properly encoded account name + :param name: header value to validate + :param target_type: which header is being validated (Account or Container) + :returns: A properly encoded account name or container name :raise: HTTPPreconditionFailed if account header is not well formatted. """ - if not account: + if not name: raise HTTPPreconditionFailed( request=req, - body='Account name cannot be empty') - if isinstance(account, unicode): - account = account.encode('utf-8') - if '/' in account: + body='%s name cannot be empty' % target_type) + if isinstance(name, unicode): + name = name.encode('utf-8') + if '/' in name: raise HTTPPreconditionFailed( request=req, - body='Account name cannot contain slashes') - return account + body='%s name cannot contain slashes' % target_type) + return name + +check_account_format = functools.partial(check_name_format, + target_type='Account') +check_container_format = functools.partial(check_name_format, + target_type='Container') def valid_api_version(version): diff --git a/swift/common/container_sync_realms.py b/swift/common/container_sync_realms.py index 083c5e1fd9..7b441da9de 100644 --- a/swift/common/container_sync_realms.py +++ b/swift/common/container_sync_realms.py @@ -13,13 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -import ConfigParser import errno import hashlib import hmac import os import time +from six.moves import configparser + from swift import gettext_ as _ from swift.common.utils import get_valid_utf8_str @@ -61,9 +62,9 @@ class ContainerSyncRealms(object): if mtime != self.conf_path_mtime: self.conf_path_mtime = mtime try: - conf = ConfigParser.SafeConfigParser() + conf = configparser.SafeConfigParser() conf.read(self.conf_path) - except ConfigParser.ParsingError as err: + except configparser.ParsingError as err: self.logger.error( _('Could not load %r: %s'), self.conf_path, err) else: @@ -72,11 +73,11 @@ class ContainerSyncRealms(object): 'DEFAULT', 'mtime_check_interval') self.next_mtime_check = \ now + self.mtime_check_interval - except ConfigParser.NoOptionError: + except configparser.NoOptionError: self.mtime_check_interval = 300 self.next_mtime_check = \ now + self.mtime_check_interval - except (ConfigParser.ParsingError, ValueError) as err: + except (configparser.ParsingError, ValueError) as err: self.logger.error( _('Error in %r with mtime_check_interval: %s'), self.conf_path, err) diff --git a/swift/common/db.py b/swift/common/db.py index 308a1bb83e..c65943dbe9 100644 --- a/swift/common/db.py +++ b/swift/common/db.py @@ -23,7 +23,7 @@ from uuid import uuid4 import sys import time import errno -import cPickle as pickle +import six.moves.cPickle as pickle from swift import gettext_ as _ from tempfile import mkstemp diff --git a/swift/common/db_replicator.py b/swift/common/db_replicator.py index 151a070c07..7a6e8d549f 100644 --- a/swift/common/db_replicator.py +++ b/swift/common/db_replicator.py @@ -187,7 +187,8 @@ class Replicator(Daemon): self.stats = {'attempted': 0, 'success': 0, 'failure': 0, 'ts_repl': 0, 'no_change': 0, 'hashmatch': 0, 'rsync': 0, 'diff': 0, 'remove': 0, 'empty': 0, 'remote_merge': 0, - 'start': time.time(), 'diff_capped': 0} + 'start': time.time(), 'diff_capped': 0, + 'failure_nodes': {}} def _report_stats(self): """Report the current stats to the logs.""" @@ -212,6 +213,13 @@ class Replicator(Daemon): ('no_change', 'hashmatch', 'rsync', 'diff', 'ts_repl', 'empty', 'diff_capped')])) + def _add_failure_stats(self, failure_devs_info): + for node, dev in failure_devs_info: + self.stats['failure'] += 1 + failure_devs = self.stats['failure_nodes'].setdefault(node, {}) + failure_devs.setdefault(dev, 0) + failure_devs[dev] += 1 + def _rsync_file(self, db_file, remote_file, whole_file=True, different_region=False): """ @@ -479,7 +487,10 @@ class Replicator(Daemon): quarantine_db(broker.db_file, broker.db_type) else: self.logger.exception(_('ERROR reading db %s'), object_file) - self.stats['failure'] += 1 + nodes = self.ring.get_part_nodes(int(partition)) + self._add_failure_stats([(failure_dev['replication_ip'], + failure_dev['device']) + for failure_dev in nodes]) self.logger.increment('failures') return # The db is considered deleted if the delete_timestamp value is greater @@ -494,6 +505,7 @@ class Replicator(Daemon): self.logger.timing_since('timing', start_time) return responses = [] + failure_devs_info = set() nodes = self.ring.get_part_nodes(int(partition)) local_dev = None for node in nodes: @@ -532,7 +544,8 @@ class Replicator(Daemon): self.logger.exception(_('ERROR syncing %(file)s with node' ' %(node)s'), {'file': object_file, 'node': node}) - self.stats['success' if success else 'failure'] += 1 + if not success: + failure_devs_info.add((node['replication_ip'], node['device'])) self.logger.increment('successes' if success else 'failures') responses.append(success) try: @@ -543,7 +556,17 @@ class Replicator(Daemon): if not shouldbehere and all(responses): # If the db shouldn't be on this node and has been successfully # synced to all of its peers, it can be removed. - self.delete_db(broker) + if not self.delete_db(broker): + failure_devs_info.update( + [(failure_dev['replication_ip'], failure_dev['device']) + for failure_dev in repl_nodes]) + + target_devs_info = set([(target_dev['replication_ip'], + target_dev['device']) + for target_dev in repl_nodes]) + self.stats['success'] += len(target_devs_info - failure_devs_info) + self._add_failure_stats(failure_devs_info) + self.logger.timing_since('timing', start_time) def delete_db(self, broker): @@ -558,9 +581,11 @@ class Replicator(Daemon): if err.errno not in (errno.ENOENT, errno.ENOTEMPTY): self.logger.exception( _('ERROR while trying to clean up %s') % suf_dir) + return False self.stats['remove'] += 1 device_name = self.extract_device(object_file) self.logger.increment('removes.' + device_name) + return True def extract_device(self, object_file): """ @@ -592,6 +617,10 @@ class Replicator(Daemon): node['replication_port']): if self.mount_check and not ismount( os.path.join(self.root, node['device'])): + self._add_failure_stats( + [(failure_dev['replication_ip'], + failure_dev['device']) + for failure_dev in self.ring.devs if failure_dev]) self.logger.warn( _('Skipping %(device)s as it is not mounted') % node) continue diff --git a/swift/common/direct_client.py b/swift/common/direct_client.py index 9d36757649..3058309b8b 100644 --- a/swift/common/direct_client.py +++ b/swift/common/direct_client.py @@ -20,10 +20,10 @@ through the proxy. import os import socket -from httplib import HTTPException from time import time from eventlet import sleep, Timeout +from six.moves.http_client import HTTPException from swift.common.bufferedhttp import http_connect from swift.common.exceptions import ClientException @@ -401,7 +401,7 @@ def direct_put_object(node, part, account, container, name, contents, headers['Content-Length'] = '0' if isinstance(contents, basestring): contents = [contents] - #Incase the caller want to insert an object with specific age + # Incase the caller want to insert an object with specific age add_ts = 'X-Timestamp' not in headers if content_length is None: @@ -543,8 +543,8 @@ def retry(func, *args, **kwargs): # Shouldn't actually get down here, but just in case. if args and 'ip' in args[0]: raise ClientException('Raise too many retries', - http_host=args[ - 0]['ip'], http_port=args[0]['port'], + http_host=args[0]['ip'], + http_port=args[0]['port'], http_device=args[0]['device']) else: raise ClientException('Raise too many retries') diff --git a/swift/common/http.py b/swift/common/http.py index 1eccf9b957..070404c3a3 100644 --- a/swift/common/http.py +++ b/swift/common/http.py @@ -67,7 +67,7 @@ def is_server_error(status): # List of HTTP status codes ############################################################################### -## 1xx Informational +# 1xx Informational ############################################################################### HTTP_CONTINUE = 100 @@ -77,7 +77,7 @@ HTTP_CHECKPOINT = 103 HTTP_REQUEST_URI_TOO_LONG = 122 ############################################################################### -## 2xx Success +# 2xx Success ############################################################################### HTTP_OK = 200 @@ -91,7 +91,7 @@ HTTP_MULTI_STATUS = 207 # WebDAV HTTP_IM_USED = 226 ############################################################################### -## 3xx Redirection +# 3xx Redirection ############################################################################### HTTP_MULTIPLE_CHOICES = 300 @@ -105,7 +105,7 @@ HTTP_TEMPORARY_REDIRECT = 307 HTTP_RESUME_INCOMPLETE = 308 ############################################################################### -## 4xx Client Error +# 4xx Client Error ############################################################################### HTTP_BAD_REQUEST = 400 @@ -141,7 +141,7 @@ HTTP_BLOCKED_BY_WINDOWS_PARENTAL_CONTROLS = 450 HTTP_CLIENT_CLOSED_REQUEST = 499 ############################################################################### -## 5xx Server Error +# 5xx Server Error ############################################################################### HTTP_INTERNAL_SERVER_ERROR = 500 diff --git a/swift/common/manager.py b/swift/common/manager.py index e9aa8ea138..ca1bc3ca26 100644 --- a/swift/common/manager.py +++ b/swift/common/manager.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import print_function import functools import errno import os @@ -62,22 +63,22 @@ def setup_env(): resource.setrlimit(resource.RLIMIT_NOFILE, (MAX_DESCRIPTORS, MAX_DESCRIPTORS)) except ValueError: - print _("WARNING: Unable to modify file descriptor limit. " - "Running as non-root?") + print(_("WARNING: Unable to modify file descriptor limit. " + "Running as non-root?")) try: resource.setrlimit(resource.RLIMIT_DATA, (MAX_MEMORY, MAX_MEMORY)) except ValueError: - print _("WARNING: Unable to modify memory limit. " - "Running as non-root?") + print(_("WARNING: Unable to modify memory limit. " + "Running as non-root?")) try: resource.setrlimit(resource.RLIMIT_NPROC, (MAX_PROCS, MAX_PROCS)) except ValueError: - print _("WARNING: Unable to modify max process limit. " - "Running as non-root?") + print(_("WARNING: Unable to modify max process limit. " + "Running as non-root?")) # Set PYTHON_EGG_CACHE if it isn't already set os.environ.setdefault('PYTHON_EGG_CACHE', '/tmp') @@ -217,7 +218,7 @@ class Manager(object): try: status += server.interact(**kwargs) except KeyboardInterrupt: - print _('\nuser quit') + print(_('\nuser quit')) self.stop(**kwargs) break elif kwargs.get('wait', True): @@ -254,7 +255,7 @@ class Manager(object): for server in self.servers: signaled_pids = server.stop(**kwargs) if not signaled_pids: - print _('No %s running') % server + print(_('No %s running') % server) else: server_pids[server] = signaled_pids @@ -267,7 +268,7 @@ class Manager(object): for server, killed_pid in watch_server_pids(server_pids, interval=kill_wait, **kwargs): - print _("%s (%s) appears to have stopped") % (server, killed_pid) + print(_("%s (%s) appears to have stopped") % (server, killed_pid)) killed_pids.add(killed_pid) if not killed_pids.symmetric_difference(signaled_pids): # all processes have been stopped @@ -277,8 +278,8 @@ class Manager(object): for server, pids in server_pids.items(): if not killed_pids.issuperset(pids): # some pids of this server were not killed - print _('Waited %s seconds for %s to die; giving up') % ( - kill_wait, server) + print(_('Waited %s seconds for %s to die; giving up') % ( + kill_wait, server)) return 1 @command @@ -461,15 +462,15 @@ class Server(object): # maybe there's a config file(s) out there, but I couldn't find it! if not kwargs.get('quiet'): if number: - print _('Unable to locate config number %s for %s' % ( - number, self.server)) + print(_('Unable to locate config number %s for %s') + % (number, self.server)) else: - print _('Unable to locate config for %s' % (self.server)) + print(_('Unable to locate config for %s') % self.server) if kwargs.get('verbose') and not kwargs.get('quiet'): if found_conf_files: - print _('Found configs:') + print(_('Found configs:')) for i, conf_file in enumerate(found_conf_files): - print ' %d) %s' % (i + 1, conf_file) + print(' %d) %s' % (i + 1, conf_file)) return conf_files @@ -514,27 +515,27 @@ class Server(object): pids = {} for pid_file, pid in self.iter_pid_files(**kwargs): if not pid: # Catches None and 0 - print _('Removing pid file %s with invalid pid') % pid_file + print (_('Removing pid file %s with invalid pid') % pid_file) remove_file(pid_file) continue try: if sig != signal.SIG_DFL: - print _('Signal %s pid: %s signal: %s') % (self.server, - pid, sig) + print(_('Signal %s pid: %s signal: %s') % (self.server, + pid, sig)) safe_kill(pid, sig, 'swift-%s' % self.server) except InvalidPidFileException as e: if kwargs.get('verbose'): - print _('Removing pid file %s with wrong pid %d') \ - % (pid_file, pid) + print(_('Removing pid file %s with wrong pid %d') % ( + pid_file, pid)) remove_file(pid_file) except OSError as e: if e.errno == errno.ESRCH: # pid does not exist if kwargs.get('verbose'): - print _("Removing stale pid file %s") % pid_file + print(_("Removing stale pid file %s") % pid_file) remove_file(pid_file) elif e.errno == errno.EPERM: - print _("No permission to signal PID %d") % pid + print(_("No permission to signal PID %d") % pid) else: # process exists pids[pid] = pid_file @@ -579,14 +580,14 @@ class Server(object): kwargs['quiet'] = True conf_files = self.conf_files(**kwargs) if conf_files: - print _("%s #%d not running (%s)") % (self.server, number, - conf_files[0]) + print(_("%s #%d not running (%s)") % (self.server, number, + conf_files[0])) else: - print _("No %s running") % self.server + print(_("No %s running") % self.server) return 1 for pid, pid_file in pids.items(): conf_file = self.get_conf_file_name(pid_file) - print _("%s running (%s - %s)") % (self.server, pid, conf_file) + print(_("%s running (%s - %s)") % (self.server, pid, conf_file)) return 0 def spawn(self, conf_file, once=False, wait=True, daemon=True, **kwargs): @@ -638,7 +639,7 @@ class Server(object): # no-daemon anyway, but this is quieter proc.wait() if output: - print output + print(output) start = time.time() # wait for process to die (output may just be a warning) while time.time() - start < WARNING_WAIT: @@ -679,13 +680,14 @@ class Server(object): # any unstarted instances if conf_file in conf_files: already_started = True - print _("%s running (%s - %s)") % (self.server, pid, conf_file) + print(_("%s running (%s - %s)") % + (self.server, pid, conf_file)) elif not kwargs.get('number', 0): already_started = True - print _("%s running (%s - %s)") % (self.server, pid, pid_file) + print(_("%s running (%s - %s)") % (self.server, pid, pid_file)) if already_started: - print _("%s already started...") % self.server + print(_("%s already started...") % self.server) return {} if self.server not in START_ONCE_SERVERS: @@ -697,13 +699,13 @@ class Server(object): msg = _('Running %s once') % self.server else: msg = _('Starting %s') % self.server - print '%s...(%s)' % (msg, conf_file) + print('%s...(%s)' % (msg, conf_file)) try: pid = self.spawn(conf_file, **kwargs) except OSError as e: if e.errno == errno.ENOENT: - #TODO(clayg): should I check if self.cmd exists earlier? - print _("%s does not exist") % self.cmd + # TODO(clayg): should I check if self.cmd exists earlier? + print(_("%s does not exist") % self.cmd) break else: raise diff --git a/swift/common/memcached.py b/swift/common/memcached.py index 526f9f666a..f3d0eae5d6 100644 --- a/swift/common/memcached.py +++ b/swift/common/memcached.py @@ -44,7 +44,7 @@ version is at: http://github.com/memcached/memcached/blob/1.4.2/doc/protocol.txt """ -import cPickle as pickle +import six.moves.cPickle as pickle import logging import time from bisect import bisect @@ -443,7 +443,7 @@ class MemcacheRing(object): with Timeout(self._io_timeout): sock.sendall(msg) # Wait for the set to complete - for _ in range(len(mapping)): + for line in range(len(mapping)): fp.readline() self._return_conn(server, fp, sock) return diff --git a/swift/common/middleware/dlo.py b/swift/common/middleware/dlo.py index 9330ccb8cb..882d21f305 100644 --- a/swift/common/middleware/dlo.py +++ b/swift/common/middleware/dlo.py @@ -14,7 +14,9 @@ # limitations under the License. import os -from ConfigParser import ConfigParser, NoSectionError, NoOptionError + +from six.moves.configparser import ConfigParser, NoSectionError, NoOptionError + from hashlib import md5 from swift.common import constraints from swift.common.exceptions import ListingIterError, SegmentError diff --git a/swift/common/middleware/keystoneauth.py b/swift/common/middleware/keystoneauth.py index 6f70ede5f4..d57f132ff9 100644 --- a/swift/common/middleware/keystoneauth.py +++ b/swift/common/middleware/keystoneauth.py @@ -410,7 +410,7 @@ class KeystoneAuth(object): user_id, user_name = env_identity['user'] referrers, roles = swift_acl.parse_acl(getattr(req, 'acl', None)) - #allow OPTIONS requests to proceed as normal + # allow OPTIONS requests to proceed as normal if req.method == 'OPTIONS': return @@ -526,7 +526,7 @@ class KeystoneAuth(object): except ValueError: return HTTPNotFound(request=req) - #allow OPTIONS requests to proceed as normal + # allow OPTIONS requests to proceed as normal if req.method == 'OPTIONS': return diff --git a/swift/common/middleware/memcache.py b/swift/common/middleware/memcache.py index 1b66716b15..e846749cb0 100644 --- a/swift/common/middleware/memcache.py +++ b/swift/common/middleware/memcache.py @@ -14,7 +14,8 @@ # limitations under the License. import os -from ConfigParser import ConfigParser, NoSectionError, NoOptionError + +from six.moves.configparser import ConfigParser, NoSectionError, NoOptionError from swift.common.memcached import (MemcacheRing, CONN_TIMEOUT, POOL_TIMEOUT, IO_TIMEOUT, TRY_COUNT) diff --git a/swift/common/middleware/recon.py b/swift/common/middleware/recon.py index 88d5243a4d..b0d1a1a526 100644 --- a/swift/common/middleware/recon.py +++ b/swift/common/middleware/recon.py @@ -15,6 +15,7 @@ import errno import os +import time from swift import gettext_ as _ from swift import __version__ as swiftver @@ -133,19 +134,19 @@ class ReconMiddleware(object): def get_replication_info(self, recon_type): """get replication info""" + replication_list = ['replication_time', + 'replication_stats', + 'replication_last'] if recon_type == 'account': - return self._from_recon_cache(['replication_time', - 'replication_stats', - 'replication_last'], + return self._from_recon_cache(replication_list, self.account_recon_cache) elif recon_type == 'container': - return self._from_recon_cache(['replication_time', - 'replication_stats', - 'replication_last'], + return self._from_recon_cache(replication_list, self.container_recon_cache) elif recon_type == 'object': - return self._from_recon_cache(['object_replication_time', - 'object_replication_last'], + replication_list += ['object_replication_time', + 'object_replication_last'] + return self._from_recon_cache(replication_list, self.object_recon_cache) else: return None @@ -328,6 +329,11 @@ class ReconMiddleware(object): raise return sockstat + def get_time(self): + """get current time""" + + return time.time() + def GET(self, req): root, rcheck, rtype = req.split_path(1, 3, True) all_rtypes = ['account', 'container', 'object'] @@ -340,7 +346,7 @@ class ReconMiddleware(object): elif rcheck == 'replication' and rtype in all_rtypes: content = self.get_replication_info(rtype) elif rcheck == 'replication' and rtype is None: - #handle old style object replication requests + # handle old style object replication requests content = self.get_replication_info('object') elif rcheck == "devices": content = self.get_device_info() @@ -368,6 +374,8 @@ class ReconMiddleware(object): content = self.get_version() elif rcheck == "driveaudit": content = self.get_driveaudit_error() + elif rcheck == "time": + content = self.get_time() else: content = "Invalid path: %s" % req.path return Response(request=req, status="404 Not Found", diff --git a/swift/common/middleware/slo.py b/swift/common/middleware/slo.py index 5bfb3036fe..853a51c661 100644 --- a/swift/common/middleware/slo.py +++ b/swift/common/middleware/slo.py @@ -148,10 +148,11 @@ metadata which can be used for stats purposes. from six.moves import range -from cStringIO import StringIO from datetime import datetime import mimetypes import re +import six +from six import BytesIO from hashlib import md5 from swift.common.exceptions import ListingIterError, SegmentError from swift.common.swob import Request, HTTPBadRequest, HTTPServerError, \ @@ -681,8 +682,10 @@ class StaticLargeObject(object): env['CONTENT_TYPE'] += ";swift_bytes=%d" % total_size env['HTTP_X_STATIC_LARGE_OBJECT'] = 'True' json_data = json.dumps(data_for_storage) + if six.PY3: + json_data = json_data.encode('utf-8') env['CONTENT_LENGTH'] = str(len(json_data)) - env['wsgi.input'] = StringIO(json_data) + env['wsgi.input'] = BytesIO(json_data) slo_put_context = SloPutContext(self, slo_etag) return slo_put_context.handle_slo_put(req, start_response) diff --git a/swift/common/middleware/tempauth.py b/swift/common/middleware/tempauth.py index dfde519f42..5e3f980e64 100644 --- a/swift/common/middleware/tempauth.py +++ b/swift/common/middleware/tempauth.py @@ -531,7 +531,7 @@ class TempAuth(object): return None if req.method == 'OPTIONS': - #allow OPTIONS requests to proceed as normal + # allow OPTIONS requests to proceed as normal self.logger.debug("Allow OPTIONS request.") return None @@ -674,15 +674,15 @@ class TempAuth(object): user = req.headers.get('x-auth-user') if not user or ':' not in user: self.logger.increment('token_denied') - return HTTPUnauthorized(request=req, headers= - {'Www-Authenticate': - 'Swift realm="%s"' % account}) + auth = 'Swift realm="%s"' % account + return HTTPUnauthorized(request=req, + headers={'Www-Authenticate': auth}) account2, user = user.split(':', 1) if account != account2: self.logger.increment('token_denied') - return HTTPUnauthorized(request=req, headers= - {'Www-Authenticate': - 'Swift realm="%s"' % account}) + auth = 'Swift realm="%s"' % account + return HTTPUnauthorized(request=req, + headers={'Www-Authenticate': auth}) key = req.headers.get('x-storage-pass') if not key: key = req.headers.get('x-auth-key') @@ -692,9 +692,9 @@ class TempAuth(object): user = req.headers.get('x-storage-user') if not user or ':' not in user: self.logger.increment('token_denied') - return HTTPUnauthorized(request=req, headers= - {'Www-Authenticate': - 'Swift realm="unknown"'}) + auth = 'Swift realm="unknown"' + return HTTPUnauthorized(request=req, + headers={'Www-Authenticate': auth}) account, user = user.split(':', 1) key = req.headers.get('x-auth-key') if not key: @@ -711,14 +711,14 @@ class TempAuth(object): account_user = account + ':' + user if account_user not in self.users: self.logger.increment('token_denied') - return HTTPUnauthorized(request=req, headers= - {'Www-Authenticate': - 'Swift realm="%s"' % account}) + auth = 'Swift realm="%s"' % account + return HTTPUnauthorized(request=req, + headers={'Www-Authenticate': auth}) if self.users[account_user]['key'] != key: self.logger.increment('token_denied') - return HTTPUnauthorized(request=req, headers= - {'Www-Authenticate': - 'Swift realm="unknown"'}) + auth = 'Swift realm="unknown"' + return HTTPUnauthorized(request=req, + headers={'Www-Authenticate': auth}) account_id = self.users[account_user]['url'].rsplit('/', 1)[-1] # Get memcache client memcache_client = cache_from_env(req.environ) diff --git a/swift/common/middleware/tempurl.py b/swift/common/middleware/tempurl.py index cf3afe3de4..10278bf7b5 100644 --- a/swift/common/middleware/tempurl.py +++ b/swift/common/middleware/tempurl.py @@ -122,11 +122,13 @@ from urllib import urlencode from urlparse import parse_qs from swift.proxy.controllers.base import get_account_info, get_container_info -from swift.common.swob import HeaderKeyDict, HTTPUnauthorized +from swift.common.swob import HeaderKeyDict, HTTPUnauthorized, HTTPBadRequest from swift.common.utils import split_path, get_valid_utf8_str, \ register_swift_info, get_hmac, streq_const_time, quote +DISALLOWED_INCOMING_HEADERS = 'x-object-manifest' + #: Default headers to remove from incoming requests. Simply a whitespace #: delimited list of header names and names can optionally end with '*' to #: indicate a prefix match. DEFAULT_INCOMING_ALLOW_HEADERS is a list of @@ -150,6 +152,10 @@ DEFAULT_OUTGOING_REMOVE_HEADERS = 'x-object-meta-*' DEFAULT_OUTGOING_ALLOW_HEADERS = 'x-object-meta-public-*' +CONTAINER_SCOPE = 'container' +ACCOUNT_SCOPE = 'account' + + def get_tempurl_keys_from_metadata(meta): """ Extracts the tempurl keys from metadata. @@ -170,6 +176,38 @@ def disposition_format(filename): quote(filename, safe=' /'), quote(filename)) +def authorize_same_account(account_to_match): + + def auth_callback_same_account(req): + try: + _ver, acc, _rest = req.split_path(2, 3, True) + except ValueError: + return HTTPUnauthorized(request=req) + + if acc == account_to_match: + return None + else: + return HTTPUnauthorized(request=req) + + return auth_callback_same_account + + +def authorize_same_container(account_to_match, container_to_match): + + def auth_callback_same_container(req): + try: + _ver, acc, con, _rest = req.split_path(3, 4, True) + except ValueError: + return HTTPUnauthorized(request=req) + + if acc == account_to_match and con == container_to_match: + return None + else: + return HTTPUnauthorized(request=req) + + return auth_callback_same_container + + class TempURL(object): """ WSGI Middleware to grant temporary URLs specific access to Swift @@ -230,6 +268,10 @@ class TempURL(object): #: The methods allowed with Temp URLs. self.methods = methods + self.disallowed_headers = set( + 'HTTP_' + h.upper().replace('-', '_') + for h in DISALLOWED_INCOMING_HEADERS.split()) + headers = DEFAULT_INCOMING_REMOVE_HEADERS if 'incoming_remove_headers' in conf: headers = conf['incoming_remove_headers'] @@ -298,10 +340,10 @@ class TempURL(object): return self.app(env, start_response) if not temp_url_sig or not temp_url_expires: return self._invalid(env, start_response) - account = self._get_account(env) + account, container = self._get_account_and_container(env) if not account: return self._invalid(env, start_response) - keys = self._get_keys(env, account) + keys = self._get_keys(env) if not keys: return self._invalid(env, start_response) if env['REQUEST_METHOD'] == 'HEAD': @@ -316,15 +358,32 @@ class TempURL(object): else: hmac_vals = self._get_hmacs(env, temp_url_expires, keys) - # While it's true that any() will short-circuit, this doesn't affect - # the timing-attack resistance since the only way this will - # short-circuit is when a valid signature is passed in. - is_valid_hmac = any(streq_const_time(temp_url_sig, hmac) - for hmac in hmac_vals) + is_valid_hmac = False + hmac_scope = None + for hmac, scope in hmac_vals: + # While it's true that we short-circuit, this doesn't affect the + # timing-attack resistance since the only way this will + # short-circuit is when a valid signature is passed in. + if streq_const_time(temp_url_sig, hmac): + is_valid_hmac = True + hmac_scope = scope + break if not is_valid_hmac: return self._invalid(env, start_response) + # disallowed headers prevent accidently allowing upload of a pointer + # to data that the PUT tempurl would not otherwise allow access for. + # It should be safe to provide a GET tempurl for data that an + # untrusted client just uploaded with a PUT tempurl. + resp = self._clean_disallowed_headers(env, start_response) + if resp: + return resp self._clean_incoming_headers(env) - env['swift.authorize'] = lambda req: None + + if hmac_scope == ACCOUNT_SCOPE: + env['swift.authorize'] = authorize_same_account(account) + else: + env['swift.authorize'] = authorize_same_container(account, + container) env['swift.authorize_override'] = True env['REMOTE_USER'] = '.wsgi.tempurl' qs = {'temp_url_sig': temp_url_sig, @@ -365,22 +424,23 @@ class TempURL(object): return self.app(env, _start_response) - def _get_account(self, env): + def _get_account_and_container(self, env): """ - Returns just the account for the request, if it's an object - request and one of the configured methods; otherwise, None is + Returns just the account and container for the request, if it's an + object request and one of the configured methods; otherwise, None is returned. :param env: The WSGI environment for the request. - :returns: Account str or None. + :returns: (Account str, container str) or (None, None). """ if env['REQUEST_METHOD'] in self.methods: try: ver, acc, cont, obj = split_path(env['PATH_INFO'], 4, 4, True) except ValueError: - return None + return (None, None) if ver == 'v1' and obj.strip('/'): - return acc + return (acc, cont) + return (None, None) def _get_temp_url_info(self, env): """ @@ -410,18 +470,23 @@ class TempURL(object): inline = True return temp_url_sig, temp_url_expires, filename, inline - def _get_keys(self, env, account): + def _get_keys(self, env): """ Returns the X-[Account|Container]-Meta-Temp-URL-Key[-2] header values - for the account or container, or an empty list if none are set. + for the account or container, or an empty list if none are set. Each + value comes as a 2-tuple (key, scope), where scope is either + CONTAINER_SCOPE or ACCOUNT_SCOPE. Returns 0-4 elements depending on how many keys are set in the account's or container's metadata. :param env: The WSGI environment for the request. - :param account: Account str. - :returns: [X-Account-Meta-Temp-URL-Key str value if set, - X-Account-Meta-Temp-URL-Key-2 str value if set] + :returns: [ + (X-Account-Meta-Temp-URL-Key str value, ACCOUNT_SCOPE) if set, + (X-Account-Meta-Temp-URL-Key-2 str value, ACCOUNT_SCOPE if set, + (X-Container-Meta-Temp-URL-Key str value, CONTAINER_SCOPE) if set, + (X-Container-Meta-Temp-URL-Key-2 str value, CONTAINER_SCOPE if set, + ] """ account_info = get_account_info(env, self.app, swift_source='TU') account_keys = get_tempurl_keys_from_metadata(account_info['meta']) @@ -430,25 +495,28 @@ class TempURL(object): container_keys = get_tempurl_keys_from_metadata( container_info.get('meta', [])) - return account_keys + container_keys + return ([(ak, ACCOUNT_SCOPE) for ak in account_keys] + + [(ck, CONTAINER_SCOPE) for ck in container_keys]) - def _get_hmacs(self, env, expires, keys, request_method=None): + def _get_hmacs(self, env, expires, scoped_keys, request_method=None): """ :param env: The WSGI environment for the request. :param expires: Unix timestamp as an int for when the URL expires. - :param keys: Key strings, from the X-Account-Meta-Temp-URL-Key[-2] of - the account. + :param scoped_keys: (key, scope) tuples like _get_keys() returns :param request_method: Optional override of the request in the WSGI env. For example, if a HEAD does not match, you may wish to override with GET to still allow the HEAD. + + :returns: a list of (hmac, scope) 2-tuples """ if not request_method: request_method = env['REQUEST_METHOD'] - return [get_hmac( - request_method, env['PATH_INFO'], expires, key) for key in keys] + return [ + (get_hmac(request_method, env['PATH_INFO'], expires, key), scope) + for (key, scope) in scoped_keys] def _invalid(self, env, start_response): """ @@ -465,6 +533,22 @@ class TempURL(object): body = '401 Unauthorized: Temp URL invalid\n' return HTTPUnauthorized(body=body)(env, start_response) + def _clean_disallowed_headers(self, env, start_response): + """ + Validate the absense of disallowed headers for "unsafe" operations. + + :returns: None for safe operations or swob.HTTPBadResponse if the + request includes disallowed headers. + """ + if env['REQUEST_METHOD'] in ('GET', 'HEAD', 'OPTIONS'): + return + for h in env: + if h in self.disallowed_headers: + return HTTPBadRequest( + body='The header %r is not allowed in this tempurl' % + h[len('HTTP_'):].title().replace('_', '-'))( + env, start_response) + def _clean_incoming_headers(self, env): """ Removes any headers from the WSGI environment as per the diff --git a/swift/common/middleware/versioned_writes.py b/swift/common/middleware/versioned_writes.py new file mode 100644 index 0000000000..6a25c66461 --- /dev/null +++ b/swift/common/middleware/versioned_writes.py @@ -0,0 +1,496 @@ +# Copyright (c) 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Object versioning in swift is implemented by setting a flag on the container +to tell swift to version all objects in the container. The flag is the +``X-Versions-Location`` header on the container, and its value is the +container where the versions are stored. It is recommended to use a different +``X-Versions-Location`` container for each container that is being versioned. + +When data is ``PUT`` into a versioned container (a container with the +versioning flag turned on), the existing data in the file is redirected to a +new object and the data in the ``PUT`` request is saved as the data for the +versioned object. The new object name (for the previous version) is +``//``, where ``length`` +is the 3-character zero-padded hexadecimal length of the ```` and +```` is the timestamp of when the previous version was created. + +A ``GET`` to a versioned object will return the current version of the object +without having to do any request redirects or metadata lookups. + +A ``POST`` to a versioned object will update the object metadata as normal, +but will not create a new version of the object. In other words, new versions +are only created when the content of the object changes. + +A ``DELETE`` to a versioned object will only remove the current version of the +object. If you have 5 total versions of the object, you must delete the +object 5 times to completely remove the object. + +-------------------------------------------------- +How to Enable Object Versioning in a Swift Cluster +-------------------------------------------------- + +This middleware was written as an effort to refactor parts of the proxy server, +so this functionality was already available in previous releases and every +attempt was made to maintain backwards compatibility. To allow operators to +perform a seamless upgrade, it is not required to add the middleware to the +proxy pipeline and the flag ``allow_versions`` in the container server +configuration files are still valid. In future releases, ``allow_versions`` +will be deprecated in favor of adding this middleware to the pipeline to enable +or disable the feature. + +In case the middleware is added to the proxy pipeline, you must also +set ``allow_versioned_writes`` to ``True`` in the middleware options +to enable the information about this middleware to be returned in a /info +request. + +Upgrade considerations: If ``allow_versioned_writes`` is set in the filter +configuration, you can leave the ``allow_versions`` flag in the container +server configuration files untouched. If you decide to disable or remove the +``allow_versions`` flag, you must re-set any existing containers that had +the 'X-Versions-Location' flag configured so that it can now be tracked by the +versioned_writes middleware. + +----------------------- +Examples Using ``curl`` +----------------------- + +First, create a container with the ``X-Versions-Location`` header or add the +header to an existing container. Also make sure the container referenced by +the ``X-Versions-Location`` exists. In this example, the name of that +container is "versions":: + + curl -i -XPUT -H "X-Auth-Token: " \ +-H "X-Versions-Location: versions" http:///container + curl -i -XPUT -H "X-Auth-Token: " http:///versions + +Create an object (the first version):: + + curl -i -XPUT --data-binary 1 -H "X-Auth-Token: " \ +http:///container/myobject + +Now create a new version of that object:: + + curl -i -XPUT --data-binary 2 -H "X-Auth-Token: " \ +http:///container/myobject + +See a listing of the older versions of the object:: + + curl -i -H "X-Auth-Token: " \ +http:///versions?prefix=008myobject/ + +Now delete the current version of the object and see that the older version is +gone:: + + curl -i -XDELETE -H "X-Auth-Token: " \ +http:///container/myobject + curl -i -H "X-Auth-Token: " \ +http:///versions?prefix=008myobject/ + +--------------------------------------------------- +How to Disable Object Versioning in a Swift Cluster +--------------------------------------------------- + +If you want to disable all functionality, set ``allow_versioned_writes`` to +``False`` in the middleware options. + +Disable versioning from a container (x is any value except empty):: + + curl -i -XPOST -H "X-Auth-Token: " \ +-H "X-Remove-Versions-Location: x" http:///container +""" + +import time +from urllib import quote, unquote +from swift.common.utils import get_logger, Timestamp, json, \ + register_swift_info, config_true_value +from swift.common.request_helpers import get_sys_meta_prefix +from swift.common.wsgi import WSGIContext, make_pre_authed_request +from swift.common.swob import Request, HTTPException +from swift.common.constraints import ( + check_account_format, check_container_format, check_destination_header) +from swift.proxy.controllers.base import get_container_info +from swift.common.http import ( + is_success, is_client_error, HTTP_NOT_FOUND) +from swift.common.swob import HTTPPreconditionFailed, HTTPServiceUnavailable, \ + HTTPServerError +from swift.common.exceptions import ( + ListingIterNotFound, ListingIterError) + + +class VersionedWritesContext(WSGIContext): + + def __init__(self, wsgi_app, logger): + WSGIContext.__init__(self, wsgi_app) + self.logger = logger + + def _listing_iter(self, account_name, lcontainer, lprefix, env): + for page in self._listing_pages_iter(account_name, + lcontainer, lprefix, env): + for item in page: + yield item + + def _listing_pages_iter(self, account_name, lcontainer, lprefix, env): + marker = '' + while True: + lreq = make_pre_authed_request( + env, method='GET', swift_source='VW', + path='/v1/%s/%s' % (account_name, lcontainer)) + lreq.environ['QUERY_STRING'] = \ + 'format=json&prefix=%s&marker=%s' % (quote(lprefix), + quote(marker)) + lresp = lreq.get_response(self.app) + if not is_success(lresp.status_int): + if lresp.status_int == HTTP_NOT_FOUND: + raise ListingIterNotFound() + elif is_client_error(lresp.status_int): + raise HTTPPreconditionFailed() + else: + raise ListingIterError() + + if not lresp.body: + break + + sublisting = json.loads(lresp.body) + if not sublisting: + break + marker = sublisting[-1]['name'].encode('utf-8') + yield sublisting + + def handle_obj_versions_put(self, req, object_versions, + object_name, policy_index): + ret = None + + # do a HEAD request to check object versions + _headers = {'X-Newest': 'True', + 'X-Backend-Storage-Policy-Index': policy_index, + 'x-auth-token': req.headers.get('x-auth-token')} + + # make a pre_auth request in case the user has write access + # to container, but not READ. This was allowed in previous version + # (i.e., before middleware) so keeping the same behavior here + head_req = make_pre_authed_request( + req.environ, path=req.path_info, + headers=_headers, method='HEAD', swift_source='VW') + hresp = head_req.get_response(self.app) + + is_dlo_manifest = 'X-Object-Manifest' in req.headers or \ + 'X-Object-Manifest' in hresp.headers + + # if there's an existing object, then copy it to + # X-Versions-Location + if is_success(hresp.status_int) and not is_dlo_manifest: + lcontainer = object_versions.split('/')[0] + prefix_len = '%03x' % len(object_name) + lprefix = prefix_len + object_name + '/' + ts_source = hresp.environ.get('swift_x_timestamp') + if ts_source is None: + ts_source = time.mktime(time.strptime( + hresp.headers['last-modified'], + '%a, %d %b %Y %H:%M:%S GMT')) + new_ts = Timestamp(ts_source).internal + vers_obj_name = lprefix + new_ts + copy_headers = { + 'Destination': '%s/%s' % (lcontainer, vers_obj_name), + 'x-auth-token': req.headers.get('x-auth-token')} + + # COPY implementation sets X-Newest to True when it internally + # does a GET on source object. So, we don't have to explicity + # set it in request headers here. + copy_req = make_pre_authed_request( + req.environ, path=req.path_info, + headers=copy_headers, method='COPY', swift_source='VW') + copy_resp = copy_req.get_response(self.app) + + if is_success(copy_resp.status_int): + # success versioning previous existing object + # return None and handle original request + ret = None + else: + if is_client_error(copy_resp.status_int): + # missing container or bad permissions + ret = HTTPPreconditionFailed(request=req) + else: + # could not copy the data, bail + ret = HTTPServiceUnavailable(request=req) + + else: + if hresp.status_int == HTTP_NOT_FOUND or is_dlo_manifest: + # nothing to version + # return None and handle original request + ret = None + else: + # if not HTTP_NOT_FOUND, return error immediately + ret = hresp + + return ret + + def handle_obj_versions_delete(self, req, object_versions, + account_name, container_name, object_name): + lcontainer = object_versions.split('/')[0] + prefix_len = '%03x' % len(object_name) + lprefix = prefix_len + object_name + '/' + item_list = [] + try: + for _item in self._listing_iter(account_name, lcontainer, lprefix, + req.environ): + item_list.append(_item) + except ListingIterNotFound: + pass + except HTTPPreconditionFailed: + return HTTPPreconditionFailed(request=req) + except ListingIterError: + return HTTPServerError(request=req) + + if item_list: + # we're about to start making COPY requests - need to validate the + # write access to the versioned container + if 'swift.authorize' in req.environ: + container_info = get_container_info( + req.environ, self.app) + req.acl = container_info.get('write_acl') + aresp = req.environ['swift.authorize'](req) + if aresp: + return aresp + + while len(item_list) > 0: + previous_version = item_list.pop() + + # there are older versions so copy the previous version to the + # current object and delete the previous version + prev_obj_name = previous_version['name'].encode('utf-8') + + copy_path = '/v1/' + account_name + '/' + \ + lcontainer + '/' + prev_obj_name + + copy_headers = {'X-Newest': 'True', + 'Destination': container_name + '/' + object_name, + 'x-auth-token': req.headers.get('x-auth-token')} + + copy_req = make_pre_authed_request( + req.environ, path=copy_path, + headers=copy_headers, method='COPY', swift_source='VW') + copy_resp = copy_req.get_response(self.app) + + # if the version isn't there, keep trying with previous version + if copy_resp.status_int == HTTP_NOT_FOUND: + continue + + if not is_success(copy_resp.status_int): + if is_client_error(copy_resp.status_int): + # some user error, maybe permissions + return HTTPPreconditionFailed(request=req) + else: + # could not copy the data, bail + return HTTPServiceUnavailable(request=req) + + # reset these because the COPY changed them + new_del_req = make_pre_authed_request( + req.environ, path=copy_path, method='DELETE', + swift_source='VW') + req = new_del_req + + # remove 'X-If-Delete-At', since it is not for the older copy + if 'X-If-Delete-At' in req.headers: + del req.headers['X-If-Delete-At'] + break + + # handle DELETE request here in case it was modified + return req.get_response(self.app) + + def handle_container_request(self, env, start_response): + app_resp = self._app_call(env) + if self._response_headers is None: + self._response_headers = [] + sysmeta_version_hdr = get_sys_meta_prefix('container') + \ + 'versions-location' + location = '' + for key, val in self._response_headers: + if key.lower() == sysmeta_version_hdr: + location = val + + if location: + self._response_headers.extend([('X-Versions-Location', location)]) + + start_response(self._response_status, + self._response_headers, + self._response_exc_info) + return app_resp + + +class VersionedWritesMiddleware(object): + + def __init__(self, app, conf): + self.app = app + self.conf = conf + self.logger = get_logger(conf, log_route='versioned_writes') + + def container_request(self, req, start_response, enabled): + sysmeta_version_hdr = get_sys_meta_prefix('container') + \ + 'versions-location' + + # set version location header as sysmeta + if 'X-Versions-Location' in req.headers: + val = req.headers.get('X-Versions-Location') + if val: + # diferently from previous version, we are actually + # returning an error if user tries to set versions location + # while feature is explicitly disabled. + if not config_true_value(enabled) and \ + req.method in ('PUT', 'POST'): + raise HTTPPreconditionFailed( + request=req, content_type='text/plain', + body='Versioned Writes is disabled') + + location = check_container_format(req, val) + req.headers[sysmeta_version_hdr] = location + + # reset original header to maintain sanity + # now only sysmeta is source of Versions Location + req.headers['X-Versions-Location'] = '' + + # if both headers are in the same request + # adding location takes precendence over removing + if 'X-Remove-Versions-Location' in req.headers: + del req.headers['X-Remove-Versions-Location'] + else: + # empty value is the same as X-Remove-Versions-Location + req.headers['X-Remove-Versions-Location'] = 'x' + + # handle removing versions container + val = req.headers.get('X-Remove-Versions-Location') + if val: + req.headers.update({sysmeta_version_hdr: ''}) + req.headers.update({'X-Versions-Location': ''}) + del req.headers['X-Remove-Versions-Location'] + + # send request and translate sysmeta headers from response + vw_ctx = VersionedWritesContext(self.app, self.logger) + return vw_ctx.handle_container_request(req.environ, start_response) + + def object_request(self, req, version, account, container, obj, + allow_versioned_writes): + account_name = unquote(account) + container_name = unquote(container) + object_name = unquote(obj) + container_info = None + resp = None + is_enabled = config_true_value(allow_versioned_writes) + if req.method in ('PUT', 'DELETE'): + container_info = get_container_info( + req.environ, self.app) + elif req.method == 'COPY' and 'Destination' in req.headers: + if 'Destination-Account' in req.headers: + account_name = req.headers.get('Destination-Account') + account_name = check_account_format(req, account_name) + container_name, object_name = check_destination_header(req) + req.environ['PATH_INFO'] = "/%s/%s/%s/%s" % ( + version, account_name, container_name, object_name) + container_info = get_container_info( + req.environ, self.app) + + if not container_info: + return self.app + + # To maintain backwards compatibility, container version + # location could be stored as sysmeta or not, need to check both. + # If stored as sysmeta, check if middleware is enabled. If sysmeta + # is not set, but versions property is set in container_info, then + # for backwards compatibility feature is enabled. + object_versions = container_info.get( + 'sysmeta', {}).get('versions-location') + if object_versions and isinstance(object_versions, unicode): + object_versions = object_versions.encode('utf-8') + elif not object_versions: + object_versions = container_info.get('versions') + # if allow_versioned_writes is not set in the configuration files + # but 'versions' is configured, enable feature to maintain + # backwards compatibility + if not allow_versioned_writes and object_versions: + is_enabled = True + + if is_enabled and object_versions: + object_versions = unquote(object_versions) + vw_ctx = VersionedWritesContext(self.app, self.logger) + if req.method in ('PUT', 'COPY'): + policy_idx = req.headers.get( + 'X-Backend-Storage-Policy-Index', + container_info['storage_policy']) + resp = vw_ctx.handle_obj_versions_put( + req, object_versions, object_name, policy_idx) + else: # handle DELETE + resp = vw_ctx.handle_obj_versions_delete( + req, object_versions, account_name, + container_name, object_name) + + if resp: + return resp + else: + return self.app + + def __call__(self, env, start_response): + # making a duplicate, because if this is a COPY request, we will + # modify the PATH_INFO to find out if the 'Destination' is in a + # versioned container + req = Request(env.copy()) + try: + (version, account, container, obj) = req.split_path(3, 4, True) + except ValueError: + return self.app(env, start_response) + + # In case allow_versioned_writes is set in the filter configuration, + # the middleware becomes the authority on whether object + # versioning is enabled or not. In case it is not set, then + # the option in the container configuration is still checked + # for backwards compatibility + + # For a container request, first just check if option is set, + # can be either true or false. + # If set, check if enabled when actually trying to set container + # header. If not set, let request be handled by container server + # for backwards compatibility. + # For an object request, also check if option is set (either T or F). + # If set, check if enabled when checking versions container in + # sysmeta property. If it is not set check 'versions' property in + # container_info + allow_versioned_writes = self.conf.get('allow_versioned_writes') + if allow_versioned_writes and container and not obj: + try: + return self.container_request(req, start_response, + allow_versioned_writes) + except HTTPException as error_response: + return error_response(env, start_response) + elif obj and req.method in ('PUT', 'COPY', 'DELETE'): + try: + return self.object_request( + req, version, account, container, obj, + allow_versioned_writes)(env, start_response) + except HTTPException as error_response: + return error_response(env, start_response) + else: + return self.app(env, start_response) + + +def filter_factory(global_conf, **local_conf): + conf = global_conf.copy() + conf.update(local_conf) + if config_true_value(conf.get('allow_versioned_writes')): + register_swift_info('versioned_writes') + + def obj_versions_filter(app): + return VersionedWritesMiddleware(app, conf) + + return obj_versions_filter diff --git a/swift/common/middleware/x_profile/html_viewer.py b/swift/common/middleware/x_profile/html_viewer.py index 7fb1c5dcd3..1132c6aa59 100644 --- a/swift/common/middleware/x_profile/html_viewer.py +++ b/swift/common/middleware/x_profile/html_viewer.py @@ -423,7 +423,7 @@ class HTMLViewer(object): plt.yticks(y_pos, nfls) plt.xlabel(names[metric_selected]) plt.title('Profile Statistics (by %s)' % names[metric_selected]) - #plt.gcf().tight_layout(pad=1.2) + # plt.gcf().tight_layout(pad=1.2) with tempfile.TemporaryFile() as profile_img: plt.savefig(profile_img, format='png', dpi=300) profile_img.seek(0) diff --git a/swift/common/ring/builder.py b/swift/common/ring/builder.py index e870364ce0..2feff95d3e 100644 --- a/swift/common/ring/builder.py +++ b/swift/common/ring/builder.py @@ -20,7 +20,7 @@ import itertools import logging import math import random -import cPickle as pickle +import six.moves.cPickle as pickle from copy import deepcopy from array import array diff --git a/swift/common/ring/ring.py b/swift/common/ring/ring.py index 861eccbf84..c62015fa3d 100644 --- a/swift/common/ring/ring.py +++ b/swift/common/ring/ring.py @@ -14,7 +14,7 @@ # limitations under the License. import array -import cPickle as pickle +import six.moves.cPickle as pickle import inspect from collections import defaultdict from gzip import GzipFile diff --git a/swift/common/storage_policy.py b/swift/common/storage_policy.py index 415aa55377..b3b460656c 100644 --- a/swift/common/storage_policy.py +++ b/swift/common/storage_policy.py @@ -11,12 +11,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ConfigParser import ConfigParser + import os import string import textwrap import six +from six.moves.configparser import ConfigParser + from swift.common.utils import ( config_true_value, SWIFT_CONF_FILE, whataremyips) from swift.common.ring import Ring, RingData diff --git a/swift/common/swob.py b/swift/common/swob.py index 36f871415e..fdcbaf3e9b 100644 --- a/swift/common/swob.py +++ b/swift/common/swob.py @@ -36,7 +36,6 @@ needs to change. """ from collections import defaultdict -from StringIO import StringIO import UserDict import time from functools import partial @@ -49,6 +48,9 @@ import random import functools import inspect +from six import BytesIO +from six import StringIO + from swift.common.utils import reiterate, split_path, Timestamp, pairs, \ close_if_possible from swift.common.exceptions import InvalidTimestamp @@ -129,10 +131,10 @@ class _UTC(tzinfo): UTC = _UTC() -class WsgiStringIO(StringIO): +class WsgiBytesIO(BytesIO): """ This class adds support for the additional wsgi.input methods defined on - eventlet.wsgi.Input to the StringIO class which would otherwise be a fine + eventlet.wsgi.Input to the BytesIO class which would otherwise be a fine stand-in for the file-like object in the WSGI environment. """ @@ -478,8 +480,8 @@ class Range(object): After initialization, "range.ranges" is populated with a list of (start, end) tuples denoting the requested ranges. - If there were any syntactically-invalid byte-range-spec values, - "range.ranges" will be an empty list, per the relevant RFC: + If there were any syntactically-invalid byte-range-spec values, the + constructor will raise a ValueError, per the relevant RFC: "The recipient of a byte-range-set that includes one or more syntactically invalid byte-range-spec values MUST ignore the header field that includes @@ -758,16 +760,16 @@ def _req_environ_property(environ_field): def _req_body_property(): """ Set and retrieve the Request.body parameter. It consumes wsgi.input and - returns the results. On assignment, uses a WsgiStringIO to create a new + returns the results. On assignment, uses a WsgiBytesIO to create a new wsgi.input. """ def getter(self): body = self.environ['wsgi.input'].read() - self.environ['wsgi.input'] = WsgiStringIO(body) + self.environ['wsgi.input'] = WsgiBytesIO(body) return body def setter(self, value): - self.environ['wsgi.input'] = WsgiStringIO(value) + self.environ['wsgi.input'] = WsgiBytesIO(value) self.environ['CONTENT_LENGTH'] = str(len(value)) return property(getter, setter, doc="Get and set the request body str") @@ -835,7 +837,7 @@ class Request(object): :param path: encoded, parsed, and unquoted into PATH_INFO :param environ: WSGI environ dictionary :param headers: HTTP headers - :param body: stuffed in a WsgiStringIO and hung on wsgi.input + :param body: stuffed in a WsgiBytesIO and hung on wsgi.input :param kwargs: any environ key with an property setter """ headers = headers or {} @@ -864,16 +866,16 @@ class Request(object): 'SERVER_PROTOCOL': 'HTTP/1.0', 'wsgi.version': (1, 0), 'wsgi.url_scheme': parsed_path.scheme or 'http', - 'wsgi.errors': StringIO(''), + 'wsgi.errors': StringIO(), 'wsgi.multithread': False, 'wsgi.multiprocess': False } env.update(environ) if body is not None: - env['wsgi.input'] = WsgiStringIO(body) + env['wsgi.input'] = WsgiBytesIO(body) env['CONTENT_LENGTH'] = str(len(body)) elif 'wsgi.input' not in env: - env['wsgi.input'] = WsgiStringIO('') + env['wsgi.input'] = WsgiBytesIO() req = Request(env) for key, val in headers.items(): req.headers[key] = val @@ -980,7 +982,7 @@ class Request(object): env.update({ 'REQUEST_METHOD': 'GET', 'CONTENT_LENGTH': '0', - 'wsgi.input': WsgiStringIO(''), + 'wsgi.input': WsgiBytesIO(), }) return Request(env) @@ -1127,6 +1129,7 @@ class Response(object): self.request = request self.body = body self.app_iter = app_iter + self.response_iter = None self.status = status self.boundary = "%.32x" % random.randint(0, 256 ** 16) if request: @@ -1322,6 +1325,17 @@ class Response(object): return [body] return [''] + def fix_conditional_response(self): + """ + You may call this once you have set the content_length to the whole + object length and body or app_iter to reset the content_length + properties on the request. + + It is ok to not call this method, the conditional resposne will be + maintained for you when you __call__ the response. + """ + self.response_iter = self._response_iter(self.app_iter, self._body) + def absolute_location(self): """ Attempt to construct an absolute location. @@ -1372,12 +1386,15 @@ class Response(object): if not self.request: self.request = Request(env) self.environ = env - app_iter = self._response_iter(self.app_iter, self._body) + + if not self.response_iter: + self.response_iter = self._response_iter(self.app_iter, self._body) + if 'location' in self.headers and \ not env.get('swift.leave_relative_location'): self.location = self.absolute_location() start_response(self.status, self.headers.items()) - return app_iter + return self.response_iter class HTTPException(Response, Exception): diff --git a/swift/common/utils.py b/swift/common/utils.py index 40a5b85232..0dbbda6c09 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -38,16 +38,14 @@ from urllib import quote as _quote from contextlib import contextmanager, closing import ctypes import ctypes.util -from ConfigParser import ConfigParser, NoSectionError, NoOptionError, \ - RawConfigParser from optparse import OptionParser -from Queue import Queue, Empty + from tempfile import mkstemp, NamedTemporaryFile try: import simplejson as json except ImportError: import json -import cPickle as pickle +import six.moves.cPickle as pickle import glob from urlparse import urlparse as stdlib_urlparse, ParseResult import itertools @@ -64,6 +62,9 @@ import netifaces import codecs utf8_decoder = codecs.getdecoder('utf-8') utf8_encoder = codecs.getencoder('utf-8') +from six.moves.configparser import ConfigParser, NoSectionError, \ + NoOptionError, RawConfigParser +from six.moves.queue import Queue, Empty from six.moves import range from swift import gettext_ as _ @@ -568,9 +569,9 @@ class FallocateWrapper(object): self.func_name = 'posix_fallocate' self.fallocate = noop_libc_function return - ## fallocate is preferred because we need the on-disk size to match - ## the allocated size. Older versions of sqlite require that the - ## two sizes match. However, fallocate is Linux only. + # fallocate is preferred because we need the on-disk size to match + # the allocated size. Older versions of sqlite require that the + # two sizes match. However, fallocate is Linux only. for func in ('fallocate', 'posix_fallocate'): self.func_name = func self.fallocate = load_libc_function(func, log_error=False) @@ -1414,7 +1415,7 @@ class SwiftLogFormatter(logging.Formatter): if self.max_line_length < 7: msg = msg[:self.max_line_length] else: - approxhalf = (self.max_line_length - 5) / 2 + approxhalf = (self.max_line_length - 5) // 2 msg = msg[:approxhalf] + " ... " + msg[-approxhalf:] return msg @@ -2267,6 +2268,7 @@ class GreenAsyncPile(object): size = size_or_pool self._responses = eventlet.queue.LightQueue(size) self._inflight = 0 + self._pending = 0 def _run_func(self, func, args, kwargs): try: @@ -2278,6 +2280,7 @@ class GreenAsyncPile(object): """ Spawn a job in a green thread on the pile. """ + self._pending += 1 self._inflight += 1 self._pool.spawn(self._run_func, func, args, kwargs) @@ -2302,12 +2305,13 @@ class GreenAsyncPile(object): def next(self): try: - return self._responses.get_nowait() + rv = self._responses.get_nowait() except Empty: if self._inflight == 0: raise StopIteration() - else: - return self._responses.get() + rv = self._responses.get() + self._pending -= 1 + return rv class ModifiedParseResult(ParseResult): diff --git a/swift/common/wsgi.py b/swift/common/wsgi.py index 6e89d4fc3d..e7504355b6 100644 --- a/swift/common/wsgi.py +++ b/swift/common/wsgi.py @@ -24,7 +24,6 @@ import signal import time import mimetools from swift import gettext_ as _ -from StringIO import StringIO from textwrap import dedent import eventlet @@ -32,6 +31,8 @@ import eventlet.debug from eventlet import greenio, GreenPool, sleep, wsgi, listen, Timeout from paste.deploy import loadwsgi from eventlet.green import socket, ssl, os as green_os +from six import BytesIO +from six import StringIO from urllib import unquote from swift.common import utils, constraints @@ -460,10 +461,14 @@ class WorkersStrategy(object): def loop_timeout(self): """ - :returns: None; to block in :py:func:`green.os.wait` + We want to keep from busy-waiting, but we also need a non-None value so + the main loop gets a chance to tell whether it should keep running or + not (e.g. SIGHUP received). + + So we return 0.5. """ - return None + return 0.5 def bind_ports(self): """ @@ -1079,13 +1084,13 @@ def make_env(env, method=None, path=None, agent='Swift', query_string=None, :returns: Fresh WSGI environment. """ newenv = {} - for name in ('eventlet.posthooks', 'HTTP_USER_AGENT', 'HTTP_HOST', - 'PATH_INFO', 'QUERY_STRING', 'REMOTE_USER', 'REQUEST_METHOD', + for name in ('HTTP_USER_AGENT', 'HTTP_HOST', 'PATH_INFO', + 'QUERY_STRING', 'REMOTE_USER', 'REQUEST_METHOD', 'SCRIPT_NAME', 'SERVER_NAME', 'SERVER_PORT', 'HTTP_ORIGIN', 'HTTP_ACCESS_CONTROL_REQUEST_METHOD', 'SERVER_PROTOCOL', 'swift.cache', 'swift.source', 'swift.trans_id', 'swift.authorize_override', - 'swift.authorize'): + 'swift.authorize', 'HTTP_X_USER_ID', 'HTTP_X_PROJECT_ID'): if name in env: newenv[name] = env[name] if method: @@ -1102,7 +1107,7 @@ def make_env(env, method=None, path=None, agent='Swift', query_string=None, del newenv['HTTP_USER_AGENT'] if swift_source: newenv['swift.source'] = swift_source - newenv['wsgi.input'] = StringIO('') + newenv['wsgi.input'] = BytesIO() if 'SCRIPT_NAME' not in newenv: newenv['SCRIPT_NAME'] = '' return newenv diff --git a/swift/container/backend.py b/swift/container/backend.py index 7aad12dd2f..c878edfa88 100644 --- a/swift/container/backend.py +++ b/swift/container/backend.py @@ -19,7 +19,7 @@ Pluggable Back-ends for Container Server import os from uuid import uuid4 import time -import cPickle as pickle +import six.moves.cPickle as pickle from six.moves import range import sqlite3 diff --git a/swift/container/server.py b/swift/container/server.py index 12e078f7ce..6009e53888 100644 --- a/swift/container/server.py +++ b/swift/container/server.py @@ -185,7 +185,11 @@ class ContainerController(BaseStorageServer): return HTTPBadRequest(req=req) if account_partition: - updates = zip(account_hosts, account_devices) + # zip is lazy on py3, but we need a list, so force evaluation. + # On py2 it's an extra list copy, but the list is so small + # (one element per replica in account ring, usually 3) that it + # doesn't matter. + updates = list(zip(account_hosts, account_devices)) else: updates = [] diff --git a/swift/container/updater.py b/swift/container/updater.py index 8e8aa42cc0..a8f84cb081 100644 --- a/swift/container/updater.py +++ b/swift/container/updater.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import print_function import logging import os import signal @@ -254,8 +255,8 @@ class ContainerUpdater(Daemon): self.account_suppressions[info['account']] = until = \ time.time() + self.account_suppression_time if self.new_account_suppressions: - print >>self.new_account_suppressions, \ - info['account'], until + print(info['account'], until, + file=self.new_account_suppressions) # Only track timing data for attempted updates: self.logger.timing_since('timing', start_time) else: diff --git a/swift/locale/swift-log-critical.pot b/swift/locale/swift-log-critical.pot index 265d8b7763..0e4cce3dcf 100644 --- a/swift/locale/swift-log-critical.pot +++ b/swift/locale/swift-log-critical.pot @@ -1,19 +1,19 @@ # Translations template for swift. -# Copyright (C) 2014 ORGANIZATION +# Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. -# FIRST AUTHOR , 2014. +# FIRST AUTHOR , 2015. # #, fuzzy msgid "" msgstr "" -"Project-Id-Version: swift 2.1.0.77.g0d0c16d\n" +"Project-Id-Version: swift 2.3.1.dev213\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-09-22 06:07+0000\n" +"POT-Creation-Date: 2015-07-29 06:35+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 1.3\n" +"Generated-By: Babel 2.0\n" diff --git a/swift/locale/swift-log-error.pot b/swift/locale/swift-log-error.pot index 265d8b7763..0e4cce3dcf 100644 --- a/swift/locale/swift-log-error.pot +++ b/swift/locale/swift-log-error.pot @@ -1,19 +1,19 @@ # Translations template for swift. -# Copyright (C) 2014 ORGANIZATION +# Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. -# FIRST AUTHOR , 2014. +# FIRST AUTHOR , 2015. # #, fuzzy msgid "" msgstr "" -"Project-Id-Version: swift 2.1.0.77.g0d0c16d\n" +"Project-Id-Version: swift 2.3.1.dev213\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-09-22 06:07+0000\n" +"POT-Creation-Date: 2015-07-29 06:35+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 1.3\n" +"Generated-By: Babel 2.0\n" diff --git a/swift/locale/swift-log-info.pot b/swift/locale/swift-log-info.pot index 265d8b7763..0e4cce3dcf 100644 --- a/swift/locale/swift-log-info.pot +++ b/swift/locale/swift-log-info.pot @@ -1,19 +1,19 @@ # Translations template for swift. -# Copyright (C) 2014 ORGANIZATION +# Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. -# FIRST AUTHOR , 2014. +# FIRST AUTHOR , 2015. # #, fuzzy msgid "" msgstr "" -"Project-Id-Version: swift 2.1.0.77.g0d0c16d\n" +"Project-Id-Version: swift 2.3.1.dev213\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-09-22 06:07+0000\n" +"POT-Creation-Date: 2015-07-29 06:35+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 1.3\n" +"Generated-By: Babel 2.0\n" diff --git a/swift/locale/swift-log-warning.pot b/swift/locale/swift-log-warning.pot index 265d8b7763..0e4cce3dcf 100644 --- a/swift/locale/swift-log-warning.pot +++ b/swift/locale/swift-log-warning.pot @@ -1,19 +1,19 @@ # Translations template for swift. -# Copyright (C) 2014 ORGANIZATION +# Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. -# FIRST AUTHOR , 2014. +# FIRST AUTHOR , 2015. # #, fuzzy msgid "" msgstr "" -"Project-Id-Version: swift 2.1.0.77.g0d0c16d\n" +"Project-Id-Version: swift 2.3.1.dev213\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-09-22 06:07+0000\n" +"POT-Creation-Date: 2015-07-29 06:35+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 1.3\n" +"Generated-By: Babel 2.0\n" diff --git a/swift/locale/swift.pot b/swift/locale/swift.pot index f7a41bba58..0fd410a370 100644 --- a/swift/locale/swift.pot +++ b/swift/locale/swift.pot @@ -6,16 +6,16 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: swift 2.3.1.dev133\n" +"Project-Id-Version: swift 2.4.1.dev19\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-07-09 06:14+0000\n" +"POT-Creation-Date: 2015-09-05 06:17+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 1.3\n" +"Generated-By: Babel 2.0\n" #: swift/account/auditor.py:59 #, python-format @@ -53,108 +53,114 @@ msgid "" " %(key)s across policies (%(sum)s)" msgstr "" -#: swift/account/auditor.py:149 +#: swift/account/auditor.py:148 #, python-format msgid "Audit Failed for %s: %s" msgstr "" -#: swift/account/auditor.py:153 +#: swift/account/auditor.py:152 #, python-format msgid "ERROR Could not get account info %s" msgstr "" -#: swift/account/reaper.py:134 swift/common/utils.py:2146 -#: swift/obj/diskfile.py:480 swift/obj/updater.py:88 swift/obj/updater.py:131 +#: swift/account/reaper.py:138 swift/common/utils.py:2147 +#: swift/obj/diskfile.py:296 swift/obj/updater.py:88 swift/obj/updater.py:131 #, python-format msgid "Skipping %s as it is not mounted" msgstr "" -#: swift/account/reaper.py:138 +#: swift/account/reaper.py:142 msgid "Exception in top-level account reaper loop" msgstr "" -#: swift/account/reaper.py:141 +#: swift/account/reaper.py:145 #, python-format msgid "Devices pass completed: %.02fs" msgstr "" -#: swift/account/reaper.py:238 +#: swift/account/reaper.py:253 #, python-format msgid "Beginning pass on account %s" msgstr "" -#: swift/account/reaper.py:255 +#: swift/account/reaper.py:278 #, python-format msgid "Exception with containers for account %s" msgstr "" -#: swift/account/reaper.py:262 +#: swift/account/reaper.py:285 #, python-format msgid "Exception with account %s" msgstr "" -#: swift/account/reaper.py:263 +#: swift/account/reaper.py:286 #, python-format msgid "Incomplete pass on account %s" msgstr "" -#: swift/account/reaper.py:265 +#: swift/account/reaper.py:288 #, python-format msgid ", %s containers deleted" msgstr "" -#: swift/account/reaper.py:267 +#: swift/account/reaper.py:290 #, python-format msgid ", %s objects deleted" msgstr "" -#: swift/account/reaper.py:269 +#: swift/account/reaper.py:292 #, python-format msgid ", %s containers remaining" msgstr "" -#: swift/account/reaper.py:272 +#: swift/account/reaper.py:295 #, python-format msgid ", %s objects remaining" msgstr "" -#: swift/account/reaper.py:274 +#: swift/account/reaper.py:297 #, python-format msgid ", %s containers possibly remaining" msgstr "" -#: swift/account/reaper.py:277 +#: swift/account/reaper.py:300 #, python-format msgid ", %s objects possibly remaining" msgstr "" -#: swift/account/reaper.py:280 +#: swift/account/reaper.py:303 msgid ", return codes: " msgstr "" -#: swift/account/reaper.py:284 +#: swift/account/reaper.py:307 #, python-format msgid ", elapsed: %.02fs" msgstr "" -#: swift/account/reaper.py:290 +#: swift/account/reaper.py:313 #, python-format msgid "Account %s has not been reaped since %s" msgstr "" -#: swift/account/reaper.py:349 swift/account/reaper.py:399 -#: swift/account/reaper.py:469 swift/container/updater.py:306 +#: swift/account/reaper.py:372 swift/account/reaper.py:426 +#: swift/account/reaper.py:502 swift/container/updater.py:307 #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "" -#: swift/account/reaper.py:369 +#: swift/account/reaper.py:379 swift/account/reaper.py:435 +#: swift/account/reaper.py:513 +#, python-format +msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" +msgstr "" + +#: swift/account/reaper.py:396 #, python-format msgid "Exception with objects for container %(container)s for account %(account)s" msgstr "" -#: swift/account/server.py:275 swift/container/server.py:582 -#: swift/obj/server.py:914 +#: swift/account/server.py:275 swift/container/server.py:586 +#: swift/obj/server.py:944 #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "" @@ -164,13 +170,13 @@ msgstr "" msgid "Error encoding to UTF-8: %s" msgstr "" -#: swift/common/container_sync_realms.py:59 -#: swift/common/container_sync_realms.py:68 +#: swift/common/container_sync_realms.py:60 +#: swift/common/container_sync_realms.py:69 #, python-format msgid "Could not load %r: %s" msgstr "" -#: swift/common/container_sync_realms.py:81 +#: swift/common/container_sync_realms.py:82 #, python-format msgid "Error in %r with mtime_check_interval: %s" msgstr "" @@ -194,74 +200,74 @@ msgstr "" msgid "ERROR reading HTTP response from %s" msgstr "" -#: swift/common/db_replicator.py:196 +#: swift/common/db_replicator.py:197 #, python-format msgid "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "" -#: swift/common/db_replicator.py:202 +#: swift/common/db_replicator.py:203 #, python-format msgid "Removed %(remove)d dbs" msgstr "" -#: swift/common/db_replicator.py:203 +#: swift/common/db_replicator.py:204 #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "" -#: swift/common/db_replicator.py:243 +#: swift/common/db_replicator.py:251 #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "" -#: swift/common/db_replicator.py:312 +#: swift/common/db_replicator.py:320 #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "" -#: swift/common/db_replicator.py:478 swift/common/db_replicator.py:721 +#: swift/common/db_replicator.py:486 swift/common/db_replicator.py:750 #, python-format msgid "Quarantining DB %s" msgstr "" -#: swift/common/db_replicator.py:481 +#: swift/common/db_replicator.py:489 #, python-format msgid "ERROR reading db %s" msgstr "" -#: swift/common/db_replicator.py:530 +#: swift/common/db_replicator.py:542 #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "" -#: swift/common/db_replicator.py:532 +#: swift/common/db_replicator.py:544 #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "" -#: swift/common/db_replicator.py:560 +#: swift/common/db_replicator.py:583 #, python-format msgid "ERROR while trying to clean up %s" msgstr "" -#: swift/common/db_replicator.py:586 +#: swift/common/db_replicator.py:611 msgid "ERROR Failed to get my own IPs?" msgstr "" -#: swift/common/db_replicator.py:596 +#: swift/common/db_replicator.py:625 #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "" -#: swift/common/db_replicator.py:605 +#: swift/common/db_replicator.py:634 msgid "Beginning replication run" msgstr "" -#: swift/common/db_replicator.py:610 +#: swift/common/db_replicator.py:639 msgid "Replication run OVER" msgstr "" -#: swift/common/db_replicator.py:623 +#: swift/common/db_replicator.py:652 msgid "ERROR trying to replicate" msgstr "" @@ -270,105 +276,105 @@ msgstr "" msgid "Unexpected response: %s" msgstr "" -#: swift/common/manager.py:65 +#: swift/common/manager.py:66 msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "" -#: swift/common/manager.py:72 +#: swift/common/manager.py:73 msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "" -#: swift/common/manager.py:79 +#: swift/common/manager.py:80 msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "" -#: swift/common/manager.py:220 +#: swift/common/manager.py:221 msgid "" "\n" "user quit" msgstr "" -#: swift/common/manager.py:257 swift/common/manager.py:585 +#: swift/common/manager.py:258 swift/common/manager.py:586 #, python-format msgid "No %s running" msgstr "" -#: swift/common/manager.py:270 +#: swift/common/manager.py:271 #, python-format msgid "%s (%s) appears to have stopped" msgstr "" -#: swift/common/manager.py:280 +#: swift/common/manager.py:281 #, python-format msgid "Waited %s seconds for %s to die; giving up" msgstr "" -#: swift/common/manager.py:464 +#: swift/common/manager.py:465 #, python-format msgid "Unable to locate config number %s for %s" msgstr "" -#: swift/common/manager.py:467 +#: swift/common/manager.py:468 #, python-format msgid "Unable to locate config for %s" msgstr "" -#: swift/common/manager.py:470 +#: swift/common/manager.py:471 msgid "Found configs:" msgstr "" -#: swift/common/manager.py:517 +#: swift/common/manager.py:518 #, python-format msgid "Removing pid file %s with invalid pid" msgstr "" -#: swift/common/manager.py:522 +#: swift/common/manager.py:523 #, python-format msgid "Signal %s pid: %s signal: %s" msgstr "" -#: swift/common/manager.py:527 +#: swift/common/manager.py:528 #, python-format msgid "Removing pid file %s with wrong pid %d" msgstr "" -#: swift/common/manager.py:534 +#: swift/common/manager.py:535 #, python-format msgid "Removing stale pid file %s" msgstr "" -#: swift/common/manager.py:537 +#: swift/common/manager.py:538 #, python-format msgid "No permission to signal PID %d" msgstr "" -#: swift/common/manager.py:582 +#: swift/common/manager.py:583 #, python-format msgid "%s #%d not running (%s)" msgstr "" -#: swift/common/manager.py:589 swift/common/manager.py:682 -#: swift/common/manager.py:685 +#: swift/common/manager.py:590 swift/common/manager.py:683 +#: swift/common/manager.py:687 #, python-format msgid "%s running (%s - %s)" msgstr "" -#: swift/common/manager.py:688 +#: swift/common/manager.py:690 #, python-format msgid "%s already started..." msgstr "" -#: swift/common/manager.py:697 +#: swift/common/manager.py:699 #, python-format msgid "Running %s once" msgstr "" -#: swift/common/manager.py:699 +#: swift/common/manager.py:701 #, python-format msgid "Starting %s" msgstr "" -#: swift/common/manager.py:706 +#: swift/common/manager.py:708 #, python-format msgid "%s does not exist" msgstr "" @@ -397,110 +403,110 @@ msgstr "" msgid "ERROR: An error occurred while retrieving segments" msgstr "" -#: swift/common/utils.py:390 +#: swift/common/utils.py:391 #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "" -#: swift/common/utils.py:580 +#: swift/common/utils.py:581 msgid "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" -#: swift/common/utils.py:664 +#: swift/common/utils.py:665 #, python-format msgid "Unable to perform fsync() on directory %s: %s" msgstr "" -#: swift/common/utils.py:1076 +#: swift/common/utils.py:1077 #, python-format msgid "%s: Connection reset by peer" msgstr "" -#: swift/common/utils.py:1078 swift/common/utils.py:1081 +#: swift/common/utils.py:1079 swift/common/utils.py:1082 #, python-format msgid "%s: %s" msgstr "" -#: swift/common/utils.py:1316 +#: swift/common/utils.py:1317 msgid "Connection refused" msgstr "" -#: swift/common/utils.py:1318 +#: swift/common/utils.py:1319 msgid "Host unreachable" msgstr "" -#: swift/common/utils.py:1320 +#: swift/common/utils.py:1321 msgid "Connection timeout" msgstr "" -#: swift/common/utils.py:1623 +#: swift/common/utils.py:1624 msgid "UNCAUGHT EXCEPTION" msgstr "" -#: swift/common/utils.py:1678 +#: swift/common/utils.py:1679 msgid "Error: missing config path argument" msgstr "" -#: swift/common/utils.py:1683 +#: swift/common/utils.py:1684 #, python-format msgid "Error: unable to locate %s" msgstr "" -#: swift/common/utils.py:2007 +#: swift/common/utils.py:2008 #, python-format msgid "Unable to read config from %s" msgstr "" -#: swift/common/utils.py:2013 +#: swift/common/utils.py:2014 #, python-format msgid "Unable to find %s config section in %s" msgstr "" -#: swift/common/utils.py:2372 +#: swift/common/utils.py:2376 #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "" -#: swift/common/utils.py:2377 +#: swift/common/utils.py:2381 #, python-format msgid "No realm key for %r" msgstr "" -#: swift/common/utils.py:2381 +#: swift/common/utils.py:2385 #, python-format msgid "No cluster endpoint for %r %r" msgstr "" -#: swift/common/utils.py:2390 +#: swift/common/utils.py:2394 #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" -#: swift/common/utils.py:2394 +#: swift/common/utils.py:2398 msgid "Path required in X-Container-Sync-To" msgstr "" -#: swift/common/utils.py:2397 +#: swift/common/utils.py:2401 msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "" -#: swift/common/utils.py:2402 +#: swift/common/utils.py:2406 #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "" -#: swift/common/utils.py:2594 +#: swift/common/utils.py:2598 msgid "Exception dumping recon cache" msgstr "" -#: swift/common/wsgi.py:198 +#: swift/common/wsgi.py:199 #, python-format msgid "Could not bind to %s:%s after trying for %s seconds" msgstr "" -#: swift/common/wsgi.py:208 +#: swift/common/wsgi.py:209 msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external " "SSL termination for a production deployment." @@ -541,27 +547,27 @@ msgstr "" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "" -#: swift/common/middleware/recon.py:80 +#: swift/common/middleware/recon.py:81 msgid "Error reading recon cache file" msgstr "" -#: swift/common/middleware/recon.py:82 +#: swift/common/middleware/recon.py:83 msgid "Error parsing recon cache file" msgstr "" -#: swift/common/middleware/recon.py:84 +#: swift/common/middleware/recon.py:85 msgid "Error retrieving recon data" msgstr "" -#: swift/common/middleware/recon.py:158 +#: swift/common/middleware/recon.py:159 msgid "Error listing devices" msgstr "" -#: swift/common/middleware/recon.py:254 +#: swift/common/middleware/recon.py:255 msgid "Error reading ringfile" msgstr "" -#: swift/common/middleware/recon.py:268 +#: swift/common/middleware/recon.py:269 msgid "Error reading swift.conf" msgstr "" @@ -654,14 +660,14 @@ msgid "" "request: \"%s\" vs \"%s\"" msgstr "" -#: swift/container/server.py:221 +#: swift/container/server.py:225 #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" -#: swift/container/server.py:230 +#: swift/container/server.py:234 #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " @@ -727,60 +733,60 @@ msgstr "" msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "" -#: swift/container/updater.py:77 +#: swift/container/updater.py:78 #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "" -#: swift/container/updater.py:91 swift/obj/reconstructor.py:797 -#: swift/obj/replicator.py:498 swift/obj/replicator.py:586 +#: swift/container/updater.py:92 swift/obj/reconstructor.py:812 +#: swift/obj/replicator.py:580 swift/obj/replicator.py:692 #, python-format msgid "%s is not mounted" msgstr "" -#: swift/container/updater.py:110 +#: swift/container/updater.py:111 #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "" -#: swift/container/updater.py:120 +#: swift/container/updater.py:121 msgid "Begin container update sweep" msgstr "" -#: swift/container/updater.py:154 +#: swift/container/updater.py:155 #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" -#: swift/container/updater.py:168 +#: swift/container/updater.py:169 #, python-format msgid "Container update sweep completed: %.02fs" msgstr "" -#: swift/container/updater.py:180 +#: swift/container/updater.py:181 msgid "Begin container update single threaded sweep" msgstr "" -#: swift/container/updater.py:188 +#: swift/container/updater.py:189 #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" -#: swift/container/updater.py:243 +#: swift/container/updater.py:244 #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "" -#: swift/container/updater.py:252 +#: swift/container/updater.py:253 #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "" -#: swift/container/updater.py:294 +#: swift/container/updater.py:295 #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " @@ -845,82 +851,55 @@ msgstr "" msgid "ERROR auditing: %s" msgstr "" -#: swift/obj/diskfile.py:327 swift/obj/diskfile.py:2339 -#, python-format -msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" -msgstr "" - -#: swift/obj/diskfile.py:418 swift/obj/diskfile.py:2407 -msgid "Error hashing suffix" -msgstr "" - -#: swift/obj/diskfile.py:490 swift/obj/updater.py:162 +#: swift/obj/diskfile.py:306 swift/obj/updater.py:162 #, python-format msgid "Directory %r does not map to a valid policy (%s)" msgstr "" -#: swift/obj/diskfile.py:741 +#: swift/obj/diskfile.py:619 +#, python-format +msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" +msgstr "" + +#: swift/obj/diskfile.py:702 +msgid "Error hashing suffix" +msgstr "" + +#: swift/obj/diskfile.py:823 #, python-format msgid "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" -#: swift/obj/diskfile.py:941 +#: swift/obj/diskfile.py:1037 #, python-format msgid "Problem cleaning up %s" msgstr "" -#: swift/obj/diskfile.py:1259 +#: swift/obj/diskfile.py:1344 #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "" -#: swift/obj/diskfile.py:1549 +#: swift/obj/diskfile.py:1618 #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata " "%(meta)s" msgstr "" -#: swift/obj/diskfile.py:1802 swift/obj/diskfile.py:1813 +#: swift/obj/diskfile.py:2054 #, python-format -msgid "" -"%s \n" -"Problem fsyncing dirafter writing .durable: %s" +msgid "No space left on device for %s (%s)" msgstr "" -#: swift/obj/diskfile.py:1808 +#: swift/obj/diskfile.py:2063 #, python-format -msgid "" -"%s \n" -"No space left on devicefor updates to: %s" +msgid "Problem cleaning up %s (%s)" msgstr "" -#: swift/obj/diskfile.py:1824 +#: swift/obj/diskfile.py:2066 #, python-format -msgid "" -"%s \n" -"Problem cleaning up %s" -msgstr "" - -#: swift/obj/diskfile.py:1827 -#, python-format -msgid "" -"%s \n" -"Problem fsyncing durable state file: %s" -msgstr "" - -#: swift/obj/diskfile.py:1832 -#, python-format -msgid "" -"%s \n" -"No space left on device for %s" -msgstr "" - -#: swift/obj/diskfile.py:1836 -#, python-format -msgid "" -"%s \n" -"Problem writing durable state file: %s" +msgid "Problem writing durable state file %s (%s)" msgstr "" #: swift/obj/expirer.py:79 @@ -952,7 +931,7 @@ msgstr "" msgid "Exception while deleting object %s %s %s" msgstr "" -#: swift/obj/reconstructor.py:208 swift/obj/reconstructor.py:478 +#: swift/obj/reconstructor.py:208 swift/obj/reconstructor.py:492 #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "" @@ -967,179 +946,180 @@ msgstr "" msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "" -#: swift/obj/reconstructor.py:344 +#: swift/obj/reconstructor.py:348 #, python-format msgid "" -"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions reconstructed" -" in %(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" +"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of " +"%(device)d/%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in " +"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" -#: swift/obj/reconstructor.py:357 swift/obj/replicator.py:430 +#: swift/obj/reconstructor.py:369 swift/obj/replicator.py:504 #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% " "synced" msgstr "" -#: swift/obj/reconstructor.py:364 swift/obj/replicator.py:437 +#: swift/obj/reconstructor.py:376 swift/obj/replicator.py:511 #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "" -#: swift/obj/reconstructor.py:372 +#: swift/obj/reconstructor.py:384 #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "" -#: swift/obj/reconstructor.py:401 swift/obj/replicator.py:474 +#: swift/obj/reconstructor.py:413 swift/obj/replicator.py:548 msgid "Lockup detected.. killing live coros." msgstr "" -#: swift/obj/reconstructor.py:448 +#: swift/obj/reconstructor.py:460 #, python-format msgid "Trying to sync suffixes with %s" msgstr "" -#: swift/obj/reconstructor.py:473 +#: swift/obj/reconstructor.py:485 #, python-format msgid "%s responded as unmounted" msgstr "" -#: swift/obj/reconstructor.py:860 swift/obj/replicator.py:306 +#: swift/obj/reconstructor.py:883 swift/obj/replicator.py:357 #, python-format msgid "Removing partition: %s" msgstr "" -#: swift/obj/reconstructor.py:876 +#: swift/obj/reconstructor.py:899 msgid "Ring change detected. Aborting current reconstruction pass." msgstr "" -#: swift/obj/reconstructor.py:895 +#: swift/obj/reconstructor.py:918 msgid "Exception in top-levelreconstruction loop" msgstr "" -#: swift/obj/reconstructor.py:905 +#: swift/obj/reconstructor.py:928 msgid "Running object reconstructor in script mode." msgstr "" -#: swift/obj/reconstructor.py:914 +#: swift/obj/reconstructor.py:937 #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "" -#: swift/obj/reconstructor.py:921 +#: swift/obj/reconstructor.py:944 msgid "Starting object reconstructor in daemon mode." msgstr "" -#: swift/obj/reconstructor.py:925 +#: swift/obj/reconstructor.py:948 msgid "Starting object reconstruction pass." msgstr "" -#: swift/obj/reconstructor.py:930 +#: swift/obj/reconstructor.py:953 #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "" -#: swift/obj/replicator.py:145 +#: swift/obj/replicator.py:173 #, python-format msgid "Killing long-running rsync: %s" msgstr "" -#: swift/obj/replicator.py:159 +#: swift/obj/replicator.py:187 #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "" -#: swift/obj/replicator.py:166 swift/obj/replicator.py:170 +#: swift/obj/replicator.py:194 swift/obj/replicator.py:198 #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "" -#: swift/obj/replicator.py:292 +#: swift/obj/replicator.py:327 #, python-format msgid "Removing %s objects" msgstr "" -#: swift/obj/replicator.py:300 +#: swift/obj/replicator.py:346 msgid "Error syncing handoff partition" msgstr "" -#: swift/obj/replicator.py:362 +#: swift/obj/replicator.py:422 #, python-format msgid "%(ip)s/%(device)s responded as unmounted" msgstr "" -#: swift/obj/replicator.py:367 +#: swift/obj/replicator.py:429 #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "" -#: swift/obj/replicator.py:402 +#: swift/obj/replicator.py:473 #, python-format msgid "Error syncing with node: %s" msgstr "" -#: swift/obj/replicator.py:406 +#: swift/obj/replicator.py:478 msgid "Error syncing partition" msgstr "" -#: swift/obj/replicator.py:419 +#: swift/obj/replicator.py:493 #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" -#: swift/obj/replicator.py:445 +#: swift/obj/replicator.py:519 #, python-format msgid "Nothing replicated for %s seconds." msgstr "" -#: swift/obj/replicator.py:589 +#: swift/obj/replicator.py:695 msgid "Ring change detected. Aborting current replication pass." msgstr "" -#: swift/obj/replicator.py:610 +#: swift/obj/replicator.py:723 msgid "Exception in top-level replication loop" msgstr "" -#: swift/obj/replicator.py:619 +#: swift/obj/replicator.py:733 msgid "Running object replicator in script mode." msgstr "" -#: swift/obj/replicator.py:637 +#: swift/obj/replicator.py:751 #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "" -#: swift/obj/replicator.py:644 +#: swift/obj/replicator.py:762 msgid "Starting object replicator in daemon mode." msgstr "" -#: swift/obj/replicator.py:648 +#: swift/obj/replicator.py:766 msgid "Starting object replication pass." msgstr "" -#: swift/obj/replicator.py:653 +#: swift/obj/replicator.py:771 #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "" -#: swift/obj/server.py:231 +#: swift/obj/server.py:240 #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d" " response from %(ip)s:%(port)s/%(dev)s" msgstr "" -#: swift/obj/server.py:238 +#: swift/obj/server.py:247 #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for " "async update later)" msgstr "" -#: swift/obj/server.py:273 +#: swift/obj/server.py:282 #, python-format msgid "" "ERROR Container update failed: different numbers of hosts and devices in " @@ -1193,21 +1173,21 @@ msgstr "" msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "" -#: swift/proxy/server.py:405 +#: swift/proxy/server.py:414 msgid "ERROR Unhandled exception in request" msgstr "" -#: swift/proxy/server.py:460 +#: swift/proxy/server.py:469 #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "" -#: swift/proxy/server.py:477 swift/proxy/server.py:495 +#: swift/proxy/server.py:486 swift/proxy/server.py:504 #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "" -#: swift/proxy/server.py:571 +#: swift/proxy/server.py:527 #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "" @@ -1216,129 +1196,129 @@ msgstr "" msgid "Account" msgstr "" -#: swift/proxy/controllers/base.py:797 swift/proxy/controllers/base.py:836 -#: swift/proxy/controllers/base.py:928 swift/proxy/controllers/obj.py:364 -#: swift/proxy/controllers/obj.py:584 swift/proxy/controllers/obj.py:996 -#: swift/proxy/controllers/obj.py:1043 swift/proxy/controllers/obj.py:1057 -#: swift/proxy/controllers/obj.py:1864 swift/proxy/controllers/obj.py:2101 -#: swift/proxy/controllers/obj.py:2229 swift/proxy/controllers/obj.py:2414 +#: swift/proxy/controllers/base.py:803 swift/proxy/controllers/base.py:842 +#: swift/proxy/controllers/base.py:935 swift/proxy/controllers/obj.py:320 +#: swift/proxy/controllers/obj.py:847 swift/proxy/controllers/obj.py:894 +#: swift/proxy/controllers/obj.py:908 swift/proxy/controllers/obj.py:1717 +#: swift/proxy/controllers/obj.py:1954 swift/proxy/controllers/obj.py:2079 +#: swift/proxy/controllers/obj.py:2264 msgid "Object" msgstr "" -#: swift/proxy/controllers/base.py:798 swift/proxy/controllers/base.py:837 +#: swift/proxy/controllers/base.py:804 swift/proxy/controllers/base.py:843 msgid "Trying to read during GET (retrying)" msgstr "" -#: swift/proxy/controllers/base.py:929 +#: swift/proxy/controllers/base.py:936 msgid "Trying to read during GET" msgstr "" -#: swift/proxy/controllers/base.py:933 +#: swift/proxy/controllers/base.py:940 #, python-format msgid "Client did not read from proxy within %ss" msgstr "" -#: swift/proxy/controllers/base.py:938 +#: swift/proxy/controllers/base.py:945 msgid "Client disconnected on read" msgstr "" -#: swift/proxy/controllers/base.py:940 +#: swift/proxy/controllers/base.py:947 msgid "Trying to send to client" msgstr "" -#: swift/proxy/controllers/base.py:991 swift/proxy/controllers/base.py:1303 +#: swift/proxy/controllers/base.py:998 swift/proxy/controllers/base.py:1410 #, python-format msgid "Trying to %(method)s %(path)s" msgstr "" -#: swift/proxy/controllers/base.py:1030 swift/proxy/controllers/base.py:1291 -#: swift/proxy/controllers/obj.py:387 swift/proxy/controllers/obj.py:1034 -#: swift/proxy/controllers/obj.py:2221 swift/proxy/controllers/obj.py:2459 +#: swift/proxy/controllers/base.py:1037 swift/proxy/controllers/base.py:1398 +#: swift/proxy/controllers/obj.py:343 swift/proxy/controllers/obj.py:885 +#: swift/proxy/controllers/obj.py:2071 swift/proxy/controllers/obj.py:2309 msgid "ERROR Insufficient Storage" msgstr "" -#: swift/proxy/controllers/base.py:1033 +#: swift/proxy/controllers/base.py:1040 #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "" -#: swift/proxy/controllers/base.py:1294 +#: swift/proxy/controllers/base.py:1401 #, python-format msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" msgstr "" -#: swift/proxy/controllers/base.py:1424 +#: swift/proxy/controllers/base.py:1531 #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "" -#: swift/proxy/controllers/container.py:98 swift/proxy/controllers/obj.py:163 +#: swift/proxy/controllers/container.py:98 msgid "Container" msgstr "" -#: swift/proxy/controllers/obj.py:365 +#: swift/proxy/controllers/obj.py:321 #, python-format msgid "Trying to get final status of PUT to %s" msgstr "" -#: swift/proxy/controllers/obj.py:391 swift/proxy/controllers/obj.py:2464 +#: swift/proxy/controllers/obj.py:347 swift/proxy/controllers/obj.py:2314 #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "" -#: swift/proxy/controllers/obj.py:657 +#: swift/proxy/controllers/obj.py:548 #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "" -#: swift/proxy/controllers/obj.py:666 +#: swift/proxy/controllers/obj.py:561 #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" -#: swift/proxy/controllers/obj.py:1038 swift/proxy/controllers/obj.py:2224 +#: swift/proxy/controllers/obj.py:889 swift/proxy/controllers/obj.py:2074 #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "" -#: swift/proxy/controllers/obj.py:1044 swift/proxy/controllers/obj.py:2230 +#: swift/proxy/controllers/obj.py:895 swift/proxy/controllers/obj.py:2080 #, python-format msgid "Expect: 100-continue on %s" msgstr "" -#: swift/proxy/controllers/obj.py:1058 swift/proxy/controllers/obj.py:1865 +#: swift/proxy/controllers/obj.py:909 swift/proxy/controllers/obj.py:1718 #, python-format msgid "Trying to write to %s" msgstr "" -#: swift/proxy/controllers/obj.py:1109 swift/proxy/controllers/obj.py:2369 +#: swift/proxy/controllers/obj.py:960 swift/proxy/controllers/obj.py:2219 #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "" -#: swift/proxy/controllers/obj.py:1116 swift/proxy/controllers/obj.py:2376 +#: swift/proxy/controllers/obj.py:967 swift/proxy/controllers/obj.py:2226 msgid "ERROR Exception causing client disconnect" msgstr "" -#: swift/proxy/controllers/obj.py:1121 swift/proxy/controllers/obj.py:2381 +#: swift/proxy/controllers/obj.py:972 swift/proxy/controllers/obj.py:2231 msgid "Client disconnected without sending enough data" msgstr "" -#: swift/proxy/controllers/obj.py:1167 +#: swift/proxy/controllers/obj.py:1018 #, python-format msgid "Object servers returned %s mismatched etags" msgstr "" -#: swift/proxy/controllers/obj.py:1171 swift/proxy/controllers/obj.py:2544 +#: swift/proxy/controllers/obj.py:1022 swift/proxy/controllers/obj.py:2393 msgid "Object PUT" msgstr "" -#: swift/proxy/controllers/obj.py:2356 +#: swift/proxy/controllers/obj.py:2206 #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "" -#: swift/proxy/controllers/obj.py:2415 +#: swift/proxy/controllers/obj.py:2265 #, python-format msgid "Trying to get %s status of PUT to %s" msgstr "" diff --git a/swift/locale/tr_TR/LC_MESSAGES/swift.po b/swift/locale/tr_TR/LC_MESSAGES/swift.po new file mode 100644 index 0000000000..674b6d7a02 --- /dev/null +++ b/swift/locale/tr_TR/LC_MESSAGES/swift.po @@ -0,0 +1,1118 @@ +# Turkish (Turkey) translations for swift. +# Copyright (C) 2015 ORGANIZATION +# This file is distributed under the same license as the swift project. +# +# Translators: +# İşbaran Akçayır , 2015 +msgid "" +msgstr "" +"Project-Id-Version: Swift\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2015-09-05 06:17+0000\n" +"PO-Revision-Date: 2015-09-04 07:42+0000\n" +"Last-Translator: İşbaran Akçayır \n" +"Language-Team: Turkish (Turkey) (http://www.transifex.com/openstack/swift/" +"language/tr_TR/)\n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.0\n" + +msgid "" +"\n" +"user quit" +msgstr "" +"\n" +"kullanıcı çıktı" + +#, python-format +msgid " - %s" +msgstr " - %s" + +#, python-format +msgid " - parallel, %s" +msgstr " - paralel, %s" + +#, python-format +msgid "" +"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" +msgstr "" +"%(checked)d sonek kontrol edildi - %(hashed).2f%% özetlenen, %(synced).2f%% " +"eşzamanlanan" + +#, python-format +msgid "%(ip)s/%(device)s responded as unmounted" +msgstr "%(ip)s/%(device)s bağlı değil olarak yanıt verdi" + +#, python-format +msgid "%(msg)s %(ip)s:%(port)s/%(device)s" +msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" + +#, python-format +msgid "" +"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of %(device)d/" +"%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in %(time).2fs " +"(%(rate).2f/sec, %(remaining)s remaining)" +msgstr "" +"%(device)d/%(dtotal)d (%(dpercentage).2f%%) aygıtın %(reconstructed)d/" +"%(total)d (%(percentage).2f%%) bölümü %(time).2fs (%(rate).2f/sn, " +"%(remaining)s kalan) içinde yeniden oluşturuldu" + +#, python-format +msgid "" +"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " +"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" +msgstr "" +"%(replicated)d/%(total)d (%(percentage).2f%%) bölüm %(time).2fs (%(rate).2f/" +"sn, %(remaining)s kalan) içinde çoğaltıldı" + +#, python-format +msgid "%(success)s successes, %(failure)s failures" +msgstr "%(success)s başarı, %(failure)s başarısızlık" + +#, python-format +msgid "%(type)s returning 503 for %(statuses)s" +msgstr "%(type)s %(statuses)s için 503 döndürüyor" + +#, python-format +msgid "%s #%d not running (%s)" +msgstr "%s #%d çalışmıyor (%s)" + +#, python-format +msgid "%s (%s) appears to have stopped" +msgstr "%s (%s) durmuş gibi görünüyor" + +#, python-format +msgid "%s already started..." +msgstr "%s zaten başlatıldı..." + +#, python-format +msgid "%s does not exist" +msgstr "%s mevcut değil" + +#, python-format +msgid "%s is not mounted" +msgstr "%s bağlı değil" + +#, python-format +msgid "%s responded as unmounted" +msgstr "%s bağlı değil olarak yanıt verdi" + +#, python-format +msgid "%s running (%s - %s)" +msgstr "%s çalışıyor (%s - %s)" + +#, python-format +msgid "%s: %s" +msgstr "%s: %s" + +#, python-format +msgid "%s: Connection reset by peer" +msgstr "%s: Bağlantı eş tarafından sıfırlandı" + +#, python-format +msgid ", %s containers deleted" +msgstr ", %s kap silindi" + +#, python-format +msgid ", %s containers possibly remaining" +msgstr ", %s kap kaldı muhtemelen" + +#, python-format +msgid ", %s containers remaining" +msgstr ", %s kap kaldı" + +#, python-format +msgid ", %s objects deleted" +msgstr ", %s nesne silindi" + +#, python-format +msgid ", %s objects possibly remaining" +msgstr ", %s nesne kaldı muhtemelen" + +#, python-format +msgid ", %s objects remaining" +msgstr ", %s nesne kaldı" + +#, python-format +msgid ", elapsed: %.02fs" +msgstr ", geçen süre: %.02fs" + +msgid ", return codes: " +msgstr ", dönen kodlar: " + +msgid "Account" +msgstr "Hesap" + +#, python-format +msgid "Account %s has not been reaped since %s" +msgstr "Hesap %s %s'den beri biçilmedi" + +#, python-format +msgid "Account audit \"once\" mode completed: %.02fs" +msgstr "Hesap denetimi \"bir kere\" kipi tamamlandı: %.02fs" + +#, python-format +msgid "Account audit pass completed: %.02fs" +msgstr "Hesap denetimi geçişi tamamlandı: %.02fs" + +#, python-format +msgid "" +"Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" +msgstr "%(count)d db %(time).5f saniyede çoğaltılmaya çalışıldı (%(rate).5f/s)" + +#, python-format +msgid "Audit Failed for %s: %s" +msgstr "Denetim %s için başarısız: %s" + +#, python-format +msgid "Bad rsync return code: %(ret)d <- %(args)s" +msgstr "Kötü rsync dönüş kodu: %(ret)d <- %(args)s" + +msgid "Begin account audit \"once\" mode" +msgstr "Hesap denetimi \"bir kere\" kipini başlat" + +msgid "Begin account audit pass." +msgstr "Hesap denetimi başlatma geçildi." + +msgid "Begin container audit \"once\" mode" +msgstr "Kap denetimine \"bir kere\" kipinde başla" + +msgid "Begin container audit pass." +msgstr "Kap denetimi geçişini başlat." + +msgid "Begin container sync \"once\" mode" +msgstr "Kap eşzamanlamayı \"bir kere\" kipinde başlat" + +msgid "Begin container update single threaded sweep" +msgstr "Kap güncelleme tek iş iplikli süpürmeye başla" + +msgid "Begin container update sweep" +msgstr "Kap güncelleme süpürmesine başla" + +#, python-format +msgid "Begin object audit \"%s\" mode (%s%s)" +msgstr "Nesne denetimini \"%s\" kipinde başlat (%s%s)" + +msgid "Begin object update single threaded sweep" +msgstr "Nesne güncelleme tek iş iplikli süpürmeye başla" + +msgid "Begin object update sweep" +msgstr "Nesne güncelleme süpürmesine başla" + +#, python-format +msgid "Beginning pass on account %s" +msgstr "%s hesabı üzerinde geçiş başlatılıyor" + +msgid "Beginning replication run" +msgstr "Çoğaltmanın çalıştırılmasına başlanıyor" + +msgid "Broker error trying to rollback locked connection" +msgstr "Kilitli bağlantı geri alınmaya çalışılırken vekil hatası" + +#, python-format +msgid "Can not access the file %s." +msgstr "%s dosyasına erişilemiyor." + +#, python-format +msgid "Can not load profile data from %s." +msgstr "%s'den profil verisi yüklenemiyor." + +#, python-format +msgid "Client did not read from proxy within %ss" +msgstr "İstemci %ss içinde vekilden okumadı" + +msgid "Client disconnected on read" +msgstr "İstemci okuma sırasında bağlantıyı kesti" + +msgid "Client disconnected without sending enough data" +msgstr "İstemci yeterli veri göndermeden bağlantıyı kesti" + +#, python-format +msgid "" +"Client path %(client)s does not match path stored in object metadata %(meta)s" +msgstr "" +"İstemci yolu %(client)s nesne metadata'sında kayıtlı yol ile eşleşmiyor " +"%(meta)s" + +msgid "" +"Configuration option internal_client_conf_path not defined. Using default " +"configuration, See internal-client.conf-sample for options" +msgstr "" +"Yapılandırma seçeneği internal_client_conf_path belirtilmemiş. Varsayılan " +"yapılandırma kullanılıyor, seçenekleri çin internal-client.conf-sample'a " +"bakın" + +msgid "Connection refused" +msgstr "Bağlantı reddedildi" + +msgid "Connection timeout" +msgstr "Bağlantı zaman aşımına uğradı" + +msgid "Container" +msgstr "Kap" + +#, python-format +msgid "Container audit \"once\" mode completed: %.02fs" +msgstr "Kap denetimi \"bir kere\" kipinde tamamlandı: %.02fs" + +#, python-format +msgid "Container audit pass completed: %.02fs" +msgstr "Kap denetim geçişi tamamlandı: %.02fs" + +#, python-format +msgid "Container sync \"once\" mode completed: %.02fs" +msgstr "Kap eşzamanlama \"bir kere\" kipinde tamamlandı: %.02fs" + +#, python-format +msgid "" +"Container update single threaded sweep completed: %(elapsed).02fs, " +"%(success)s successes, %(fail)s failures, %(no_change)s with no changes" +msgstr "" +"Kap güncelleme tek iş iplikli süpürme tamamlandı: %(elapsed).02fs, " +"%(success)s başarılı, %(fail)s başarısız, %(no_change)s değişiklik yok" + +#, python-format +msgid "Container update sweep completed: %.02fs" +msgstr "Kap güncelleme süpürme tamamlandı: %.02fs" + +#, python-format +msgid "" +"Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " +"successes, %(fail)s failures, %(no_change)s with no changes" +msgstr "" +"%(path)s in kap güncelleme süpürmesi tamamlandı: %(elapsed).02fs, " +"%(success)s başarılı, %(fail)s başarısız, %(no_change)s değişiklik yok" + +#, python-format +msgid "Could not bind to %s:%s after trying for %s seconds" +msgstr "%s:%s'e bağlanılamadı, %s saniye beklendi" + +#, python-format +msgid "Could not load %r: %s" +msgstr "%r yüklenemedi: %s" + +#, python-format +msgid "Data download error: %s" +msgstr "Veri indirme hatası: %s" + +#, python-format +msgid "Devices pass completed: %.02fs" +msgstr "Aygıtlar geçişi tamamlandı: %.02fs" + +#, python-format +msgid "Directory %r does not map to a valid policy (%s)" +msgstr "Dizin %r geçerli bir ilkeye eşleştirilmemiş (%s)" + +#, python-format +msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" +msgstr "HATA %(db_file)s: %(validate_sync_to_err)s" + +#, python-format +msgid "ERROR %(status)d %(body)s From %(type)s Server" +msgstr "HATA %(status)d %(body)s %(type)s Sunucudan" + +#, python-format +msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" +msgstr "HATA %(status)d %(body)s Nesne Sunucu re'den: %(path)s" + +#, python-format +msgid "ERROR %(status)d Expect: 100-continue From Object Server" +msgstr "HATA %(status)d Beklenen: 100-Nesne Sunucusundan devam et" + +#, python-format +msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" +msgstr "HATA %(status)d Kap Sunucusundan %(method)s %(path)s denenirken" + +#, python-format +msgid "" +"ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " +"later): Response %(status)s %(reason)s" +msgstr "" +"HATA %(ip)s:%(port)s/%(device)s ile hesap güncelleme başarısız (sonra tekrar " +"denenecek): Yanıt %(status)s %(reason)s" + +#, python-format +msgid "" +"ERROR Account update failed: different numbers of hosts and devices in " +"request: \"%s\" vs \"%s\"" +msgstr "" +"HATA Hesap güncelleme başarısız: istekte farklı sayıda istemci ve aygıt " +"var: \"%s\" \"%s\"" + +#, python-format +msgid "ERROR Bad response %(status)s from %(host)s" +msgstr "HATA %(host)s dan kötü yanıt %(status)s" + +#, python-format +msgid "ERROR Client read timeout (%ss)" +msgstr "HATA İstemci okuma zaman aşımına uğradı (%ss)" + +#, python-format +msgid "" +"ERROR Container update failed (saving for async update later): %(status)d " +"response from %(ip)s:%(port)s/%(dev)s" +msgstr "" +"HATA Kap güncelleme başarısız (daha sonraki async güncellemesi için " +"kaydediliyor): %(ip)s:%(port)s/%(dev)s den %(status)d yanıtı" + +#, python-format +msgid "" +"ERROR Container update failed: different numbers of hosts and devices in " +"request: \"%s\" vs \"%s\"" +msgstr "" +"HATA Kap güncelleme başarısız: istekte farklı sayıda istemci ve aygıt var: " +"\"%s\" e karşı \"%s\"" + +#, python-format +msgid "ERROR Could not get account info %s" +msgstr "HATA hesap bilgisi %s alınamadı" + +#, python-format +msgid "ERROR Could not get container info %s" +msgstr "HATA %s kap bilgisi alınamadı" + +#, python-format +msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" +msgstr "HATA %(data_file)s disk dosyası kapatma başarısız: %(exc)s : %(stack)s" + +msgid "ERROR Exception causing client disconnect" +msgstr "HATA İstisna istemci bağlantısının kesilmesine neden oluyor" + +msgid "ERROR Failed to get my own IPs?" +msgstr "Kendi IP'lerimi alırken HATA?" + +msgid "ERROR Insufficient Storage" +msgstr "HATA Yetersiz Depolama" + +#, python-format +msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" +msgstr "" +"HATA Nesne %(obj)s denetimde başarısız oldu ve karantinaya alındı: %(err)s" + +#, python-format +msgid "ERROR Pickle problem, quarantining %s" +msgstr "HATA Picke problemi, %s karantinaya alınıyor" + +#, python-format +msgid "ERROR Remote drive not mounted %s" +msgstr "HATA Uzak sürücü bağlı değil %s" + +#, python-format +msgid "ERROR Syncing %(db_file)s %(row)s" +msgstr "HATA %(db_file)s %(row)s eşzamanlamada" + +#, python-format +msgid "ERROR Syncing %s" +msgstr "HATA %s Eşzamanlama" + +#, python-format +msgid "ERROR Trying to audit %s" +msgstr "HATA %s denetimi denemesinde" + +msgid "ERROR Unhandled exception in request" +msgstr "HATA İstekte ele alınmayan istisna var" + +#, python-format +msgid "ERROR __call__ error with %(method)s %(path)s " +msgstr "ERROR __call__ hatası %(method)s %(path)s " + +#, python-format +msgid "" +"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " +"later)" +msgstr "" +"HATA %(ip)s:%(port)s/%(device)s ile hesap güncelleme başarısız (sonra " +"yeniden denenecek)" + +#, python-format +msgid "" +"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " +"later): " +msgstr "" +"HATA hesap güncelleme başarısız %(ip)s:%(port)s/%(device)s (sonra tekrar " +"denenecek):" + +#, python-format +msgid "ERROR async pending file with unexpected name %s" +msgstr "HATA beklenmeyen isimli async bekleyen dosya %s" + +msgid "ERROR auditing" +msgstr "denetlemede HATA" + +#, python-format +msgid "ERROR auditing: %s" +msgstr "HATA denetim: %s" + +#, python-format +msgid "" +"ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " +"update later)" +msgstr "" +"HATA kap güncelleme %(ip)s:%(port)s/%(dev)s ile başarısız oldu (sonraki " +"async güncellemesi için kaydediliyor)" + +#, python-format +msgid "ERROR reading HTTP response from %s" +msgstr "%s'den HTTP yanıtı okumada HATA" + +#, python-format +msgid "ERROR reading db %s" +msgstr "%s veri tabanı okumada HATA" + +#, python-format +msgid "ERROR rsync failed with %(code)s: %(args)s" +msgstr "HATA rsync %(code)s ile başarısız oldu: %(args)s" + +#, python-format +msgid "ERROR syncing %(file)s with node %(node)s" +msgstr "%(node)s düğümlü %(file)s eş zamanlamada HATA" + +msgid "ERROR trying to replicate" +msgstr "Çoğaltmaya çalışmada HATA" + +#, python-format +msgid "ERROR while trying to clean up %s" +msgstr "%s temizlenmeye çalışırken HATA" + +#, python-format +msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" +msgstr "HATA %(type)s sunucusu %(ip)s:%(port)s/%(device)s re: %(info)s" + +#, python-format +msgid "ERROR with loading suppressions from %s: " +msgstr "HATA %s den baskılamaların yüklenmesinde: " + +#, python-format +msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" +msgstr "HATA uzuk sunucuda %(ip)s:%(port)s/%(device)s" + +#, python-format +msgid "ERROR: Failed to get paths to drive partitions: %s" +msgstr "HATA: Sürücü bölümlerine olan yollar alınamadı: %s" + +msgid "ERROR: An error occurred while retrieving segments" +msgstr "HATA: Dilimler alınırken bir hata oluştu" + +#, python-format +msgid "ERROR: Unable to access %(path)s: %(error)s" +msgstr "HATA: %(path)s e erişilemiyor: %(error)s" + +#, python-format +msgid "ERROR: Unable to run auditing: %s" +msgstr "HATA: Denetim çalıştırılamıyor: %s" + +#, python-format +msgid "Error %(action)s to memcached: %(server)s" +msgstr "Memcached'e hata %(action)s: %(server)s" + +#, python-format +msgid "Error encoding to UTF-8: %s" +msgstr "UTF-8 ile kodlama hatası: %s" + +msgid "Error hashing suffix" +msgstr "Sonek özetini çıkarmada hata" + +#, python-format +msgid "Error in %r with mtime_check_interval: %s" +msgstr "mtime_check_interval ile %r de hata: %s" + +#, python-format +msgid "Error limiting server %s" +msgstr "%s sunucusu sınırlandırılırken hata" + +msgid "Error listing devices" +msgstr "Aygıtları listelemede hata" + +#, python-format +msgid "Error on render profiling results: %s" +msgstr "Profilleme sonuçlarının gerçeklenmesinde hata: %s" + +msgid "Error parsing recon cache file" +msgstr "Recon zula dosyasını ayrıştırmada hata" + +msgid "Error reading recon cache file" +msgstr "Recon zula dosyası okumada hata" + +msgid "Error reading ringfile" +msgstr "Halka dosyası okunurken hata" + +msgid "Error reading swift.conf" +msgstr "swift.conf okunurken hata" + +msgid "Error retrieving recon data" +msgstr "Recon verisini almada hata" + +msgid "Error syncing handoff partition" +msgstr "Devir bölümünü eş zamanlamada hata" + +msgid "Error syncing partition" +msgstr "Bölüm eşzamanlamada hata" + +#, python-format +msgid "Error syncing with node: %s" +msgstr "Düğüm ile eş zamanlamada hata: %s" + +#, python-format +msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" +msgstr "" +"Yeniden inşa denenirken hata %(path)s policy#%(policy)d frag#%(frag_index)s" + +msgid "Error: An error occurred" +msgstr "Hata: Bir hata oluştu" + +msgid "Error: missing config path argument" +msgstr "Hata: yapılandırma yolu değişkeni eksik" + +#, python-format +msgid "Error: unable to locate %s" +msgstr "Hata: %s bulunamıyor" + +msgid "Exception dumping recon cache" +msgstr "Yeniden bağlanma zulasının dökümünde istisna" + +msgid "Exception in top-level account reaper loop" +msgstr "Üst seviye hesap biçme döngüsünde istisna" + +msgid "Exception in top-level replication loop" +msgstr "Üst seviye çoğaltma döngüsünde istisna" + +msgid "Exception in top-levelreconstruction loop" +msgstr "Üst seviye yeniden oluşturma döngüsünde istisna" + +#, python-format +msgid "Exception while deleting container %s %s" +msgstr "%s %s kabı silinirken istisna" + +#, python-format +msgid "Exception while deleting object %s %s %s" +msgstr "%s %s %s nesnesi silinirken istisna" + +#, python-format +msgid "Exception with %(ip)s:%(port)s/%(device)s" +msgstr "%(ip)s:%(port)s/%(device)s ile istisna" + +#, python-format +msgid "Exception with account %s" +msgstr "%s hesabında istisna" + +#, python-format +msgid "Exception with containers for account %s" +msgstr "%s hesabı için kaplarla ilgili istisna" + +#, python-format +msgid "" +"Exception with objects for container %(container)s for account %(account)s" +msgstr "%(account)s hesabı için %(container)s kabı için nesneler için istisna" + +#, python-format +msgid "Expect: 100-continue on %s" +msgstr "Beklenen: 100-%s üzerinden devam et" + +#, python-format +msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" +msgstr "%(given_domain)s den %(found_domain)s e CNAME zinciri takip ediliyor" + +msgid "Found configs:" +msgstr "Yapılandırmalar bulundu:" + +msgid "Host unreachable" +msgstr "İstemci erişilebilir değil" + +#, python-format +msgid "Incomplete pass on account %s" +msgstr "%s hesabından tamamlanmamış geçiş" + +#, python-format +msgid "Invalid X-Container-Sync-To format %r" +msgstr "Geçersix X-Container-Sync-To biçimi %r" + +#, python-format +msgid "Invalid host %r in X-Container-Sync-To" +msgstr "X-Container-Sync-To'da geçersiz istemci %r" + +#, python-format +msgid "Invalid pending entry %(file)s: %(entry)s" +msgstr "Geçersiz bekleyen girdi %(file)s: %(entry)s" + +#, python-format +msgid "Invalid response %(resp)s from %(full_path)s" +msgstr "%(full_path)s den geçersiz yanıt %(resp)s" + +#, python-format +msgid "Invalid response %(resp)s from %(ip)s" +msgstr "%(ip)s den geçersiz yanıt %(resp)s" + +#, python-format +msgid "" +"Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " +"\"https\"." +msgstr "" +"X-Container-Sync-To'da geçersiz şema %r, \"//\", \"http\", veya \"https\" " +"olmalı." + +#, python-format +msgid "Killing long-running rsync: %s" +msgstr "Uzun süre çalışan rsync öldürülüyor: %s" + +msgid "Lockup detected.. killing live coros." +msgstr "Kilitleme algılandı.. canlı co-rutinler öldürülüyor." + +#, python-format +msgid "Mapped %(given_domain)s to %(found_domain)s" +msgstr "%(given_domain)s %(found_domain)s eşleştirildi" + +#, python-format +msgid "No %s running" +msgstr "Çalışan %s yok" + +#, python-format +msgid "No cluster endpoint for %r %r" +msgstr "%r %r için küme uç noktası yok" + +#, python-format +msgid "No permission to signal PID %d" +msgstr "%d PID'ine sinyalleme izni yok" + +#, python-format +msgid "No policy with index %s" +msgstr "%s indisine sahip ilke yok" + +#, python-format +msgid "No realm key for %r" +msgstr "%r için realm anahtarı yok" + +#, python-format +msgid "No space left on device for %s (%s)" +msgstr "Aygıtta %s için boş alan kalmadı (%s)" + +#, python-format +msgid "Node error limited %(ip)s:%(port)s (%(device)s)" +msgstr "Düğüm hatası sınırlandı %(ip)s:%(port)s (%(device)s)" + +#, python-format +msgid "Not enough object servers ack'ed (got %d)" +msgstr "Yeterince nesne sunucu ack'lenmedi (%d alındı)" + +#, python-format +msgid "" +"Not found %(sync_from)r => %(sync_to)r - object " +"%(obj_name)r" +msgstr "" +"Bulunamadı %(sync_from)r => %(sync_to)r - nesne %(obj_name)r" + +#, python-format +msgid "Nothing reconstructed for %s seconds." +msgstr "%s saniye boyunca hiçbir şey yeniden oluşturulmadı." + +#, python-format +msgid "Nothing replicated for %s seconds." +msgstr "%s saniyedir hiçbir şey çoğaltılmadı." + +msgid "Object" +msgstr "Nesne" + +msgid "Object PUT" +msgstr "Nesne PUT" + +#, python-format +msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" +msgstr "Nesne PUT 409 için 202 döndürüyor: %(req_timestamp)s <= %(timestamps)r" + +#, python-format +msgid "Object PUT returning 412, %(statuses)r" +msgstr "Nesne PUT 412 döndürüyor, %(statuses)r" + +#, python-format +msgid "" +"Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " +"quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " +"%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " +"%(audit_rate).2f" +msgstr "" +"Nesne denetimi (%(type)s) \"%(mode)s\" kipinde tamamlandı: %(elapsed).02fs. " +"Toplam karantina: %(quars)d, Toplam hata: %(errors)d, Toplam dosya/sn: " +"%(frate).2f, Toplam bayt/sn: %(brate).2f, Denetleme zamanı: %(audit).2f, " +"Oran: %(audit_rate).2f" + +#, python-format +msgid "" +"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " +"%(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: " +"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " +"%(audit_rate).2f" +msgstr "" +"Nesne denedimi (%(type)s). %(start_time)s den beri: Yerel olarak: %(passes)d " +"geçti, %(quars)d karantinaya alındı, %(errors)d hata dosya/sn: %(frate).2f , " +"bayt/sn: %(brate).2f, Toplam süre: %(total).2f, Denetleme süresi: " +"%(audit).2f, Oran: %(audit_rate).2f" + +#, python-format +msgid "Object audit stats: %s" +msgstr "Nesne denetim istatistikleri: %s" + +#, python-format +msgid "Object reconstruction complete (once). (%.02f minutes)" +msgstr "Nesne yeniden oluşturma tamamlandı (bir kere). (%.02f dakika)" + +#, python-format +msgid "Object reconstruction complete. (%.02f minutes)" +msgstr "Nesne yeniden oluşturma tamamlandı. (%.02f dakika)" + +#, python-format +msgid "Object replication complete (once). (%.02f minutes)" +msgstr "Nesne çoğaltma tamamlandı (bir kere). (%.02f dakika)" + +#, python-format +msgid "Object replication complete. (%.02f minutes)" +msgstr "Nesne çoğaltma tamamlandı. (%.02f dakika)" + +#, python-format +msgid "Object servers returned %s mismatched etags" +msgstr "Nesne sunucuları %s eşleşmeyen etag döndürdü" + +#, python-format +msgid "" +"Object update single threaded sweep completed: %(elapsed).02fs, %(success)s " +"successes, %(fail)s failures" +msgstr "" +"Nesne güncelleme tek iş iplikli süpürme tamamlandı: %(elapsed).02fs, " +"%(success)s başarılı, %(fail)s başarısız" + +#, python-format +msgid "Object update sweep completed: %.02fs" +msgstr "Nesne güncelleme süpürmesi tamamlandı: %.02fs" + +#, python-format +msgid "" +"Object update sweep of %(device)s completed: %(elapsed).02fs, %(success)s " +"successes, %(fail)s failures" +msgstr "" +"%(device)s ın nesne güncelleme süpürmesi tamamlandı: %(elapsed).02fs, " +"%(success)s başarılı, %(fail)s başarısız" + +msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" +msgstr "X-Container-Sync-To'da parametre, sorgular, ve parçalara izin verilmez" + +#, python-format +msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" +msgstr "" +"Bölüm zamanları: azami %(max).4fs, asgari %(min).4fs, ortalama %(med).4fs" + +#, python-format +msgid "Pass beginning; %s possible containers; %s possible objects" +msgstr "Geçiş başlıyor; %s olası kap; %s olası nesne" + +#, python-format +msgid "Pass completed in %ds; %d objects expired" +msgstr "Geçiş %ds de tamamlandı; %d nesnenin süresi doldu" + +#, python-format +msgid "Pass so far %ds; %d objects expired" +msgstr "Şimdiye kadarki geçiş %ds; %d nesnenin süresi doldu" + +msgid "Path required in X-Container-Sync-To" +msgstr "X-Container-Sync-To'de yol gerekli" + +#, python-format +msgid "Problem cleaning up %s" +msgstr "%s temizliğinde problem" + +#, python-format +msgid "Problem cleaning up %s (%s)" +msgstr "%s temizlemede problem (%s)" + +#, python-format +msgid "Problem writing durable state file %s (%s)" +msgstr "Dayanıklı durum dosyas %s ile ilgili problem (%s)" + +#, python-format +msgid "Profiling Error: %s" +msgstr "Profilleme Hatası: %s" + +#, python-format +msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" +msgstr "%(hsh_path)s %(quar_path)s karantinasına alındı çünkü bir dizin değil" + +#, python-format +msgid "" +"Quarantined %(object_path)s to %(quar_path)s because it is not a directory" +msgstr "" +"Bir dizin olmadığından %(object_path)s %(quar_path)s e karantinaya alındı" + +#, python-format +msgid "Quarantined %s to %s due to %s database" +msgstr "%s %s'e karantinaya alındı %s veri tabanı sebebiyle" + +#, python-format +msgid "Quarantining DB %s" +msgstr "DB %s karantinaya alınıyor" + +#, python-format +msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" +msgstr "" +"Oran sınırı uyku kaydı: %(account)s/%(container)s/%(object)s için %(sleep)s" + +#, python-format +msgid "Removed %(remove)d dbs" +msgstr "%(remove)d db silindi" + +#, python-format +msgid "Removing %s objects" +msgstr "%s nesne kaldırılıyor" + +#, python-format +msgid "Removing partition: %s" +msgstr "Bölüm kaldırılıyor: %s" + +#, python-format +msgid "Removing pid file %s with invalid pid" +msgstr "Geçersiz pid'e sahip pid dosyası %s siliniyor" + +#, python-format +msgid "Removing pid file %s with wrong pid %d" +msgstr "%s pid dosyası %d yanlış pid'ine sahip siliniyor" + +#, python-format +msgid "Removing stale pid file %s" +msgstr "Askıdaki pid dosyası siliniyor %s" + +msgid "Replication run OVER" +msgstr "Çoğaltma çalışması BİTTİ" + +#, python-format +msgid "Returning 497 because of blacklisting: %s" +msgstr "Kara listeleme yüzünden 497 döndürülüyor: %s" + +#, python-format +msgid "" +"Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " +"Sleep) %(e)s" +msgstr "" +"%(acc)s/%(cont)s/%(obj)s ye %(meth)s için 498 döndürülüyor. Oran sınırı " +"(Azami uyku) %(e)s" + +msgid "Ring change detected. Aborting current reconstruction pass." +msgstr "" +"Zincir değişikliği algılandı. Mevcut yeniden oluşturma geçişi iptal ediliyor." + +msgid "Ring change detected. Aborting current replication pass." +msgstr "Zincir değişimi algılandı. Mevcut çoğaltma geçişi iptal ediliyor." + +#, python-format +msgid "Running %s once" +msgstr "%s bir kere çalıştırılıyor" + +msgid "Running object reconstructor in script mode." +msgstr "Nesne yeniden oluşturma betik kipinde çalıştırılıyor." + +msgid "Running object replicator in script mode." +msgstr "Nesne çoğaltıcı betik kipinde çalıştırılıyor." + +#, python-format +msgid "Signal %s pid: %s signal: %s" +msgstr "Sinyal %s pid: %s sinyal: %s" + +#, python-format +msgid "" +"Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " +"skipped, %(fail)s failed" +msgstr "" +"%(time)s den beri: %(sync)s eşzamanlandı [%(delete)s silme, %(put)s koyma], " +"%(skip)s atlama, %(fail)s başarısız" + +#, python-format +msgid "" +"Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " +"audit" +msgstr "" +"%(time)s den beri: Hesap denetimleri: %(passed)s denetimi geçti, %(failed)s " +"denetimi geçemedi" + +#, python-format +msgid "" +"Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " +"audit" +msgstr "" +"%(time)s den beri: Kap denetimleri: %(pass)s denetimi geçti, %(fail)s " +"denetimde başarısız" + +#, python-format +msgid "Skipping %(device)s as it is not mounted" +msgstr "Bağlı olmadığından %(device)s atlanıyor" + +#, python-format +msgid "Skipping %s as it is not mounted" +msgstr "Bağlı olmadığından %s atlanıyor" + +#, python-format +msgid "Starting %s" +msgstr "%s başlatılıyor" + +msgid "Starting object reconstruction pass." +msgstr "Nesne yeniden oluşturma geçişi başlatılıyor." + +msgid "Starting object reconstructor in daemon mode." +msgstr "Nesne yeniden oluşturma artalan işlemi kipinde başlatılıyor." + +msgid "Starting object replication pass." +msgstr "Nesne çoğaltma geçişi başlatılıyor." + +msgid "Starting object replicator in daemon mode." +msgstr "Nesne çoğaltıcı artalan işlemi kipinde başlatılıyor." + +#, python-format +msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" +msgstr "%(dst)s (%(time).03f) de %(src)s başarılı rsync'i" + +msgid "The file type are forbidden to access!" +msgstr "Dosya türüne erişim yasaklanmış!" + +#, python-format +msgid "" +"The total %(key)s for the container (%(total)s) does not match the sum of " +"%(key)s across policies (%(sum)s)" +msgstr "" +"(%(total)s) kabı için %(key)s toplamı ilkeler arasındaki %(key)s toplamıyla " +"eşleşmiyor (%(sum)s)" + +#, python-format +msgid "Timeout %(action)s to memcached: %(server)s" +msgstr "Memcached'e zaman aşımı %(action)s: %(server)s" + +#, python-format +msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" +msgstr "%(ip)s:%(port)s/%(device)s ile zaman aşımı istisnası" + +#, python-format +msgid "Trying to %(method)s %(path)s" +msgstr "%(method)s %(path)s deneniyor" + +#, python-format +msgid "Trying to GET %(full_path)s" +msgstr "%(full_path)s GET deneniyor" + +#, python-format +msgid "Trying to get %s status of PUT to %s" +msgstr "%s'e PUT'un %s durumu alınmaya çalışılıyor" + +#, python-format +msgid "Trying to get final status of PUT to %s" +msgstr "%s'e PUT için son durum alınmaya çalışılıyor" + +msgid "Trying to read during GET" +msgstr "GET sırasında okuma deneniyor" + +msgid "Trying to read during GET (retrying)" +msgstr "GET sırasında okuma deneniyor (yeniden deneniyor)" + +msgid "Trying to send to client" +msgstr "İstemciye gönderilmeye çalışılıyor" + +#, python-format +msgid "Trying to sync suffixes with %s" +msgstr "%s e sahip son ekler eşzamanlanmaya çalışılıyor" + +#, python-format +msgid "Trying to write to %s" +msgstr "%s'e yazmaya çalışılıyor" + +msgid "UNCAUGHT EXCEPTION" +msgstr "YAKALANMAYAN İSTİSNA" + +#, python-format +msgid "Unable to find %s config section in %s" +msgstr "%s yapılandırma kısmı %s'de bulunamıyor" + +#, python-format +msgid "Unable to load internal client from config: %r (%s)" +msgstr "Yapılandırmadan dahili istemci yüklenemedi: %r (%s)" + +#, python-format +msgid "Unable to locate %s in libc. Leaving as a no-op." +msgstr "%s libc'de bulunamadı. No-op olarak çıkılıyor." + +#, python-format +msgid "Unable to locate config for %s" +msgstr "%s için yapılandırma bulunamıyor" + +#, python-format +msgid "Unable to locate config number %s for %s" +msgstr "Yapılandırma sayısı %s %s için bulunamıyor" + +msgid "" +"Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." +msgstr "" +"fallocate, posix_fallocate libc'de bulunamadı. No-op olarak çıkılıyor." + +#, python-format +msgid "Unable to perform fsync() on directory %s: %s" +msgstr "%s dizininde fsynıc() yapılamıyor: %s" + +#, python-format +msgid "Unable to read config from %s" +msgstr "%s'den yapılandırma okunamıyor" + +#, python-format +msgid "Unauth %(sync_from)r => %(sync_to)r" +msgstr "%(sync_from)r => %(sync_to)r yetki al" + +#, python-format +msgid "Unexpected response: %s" +msgstr "Beklenmeyen yanıt: %s" + +msgid "Unhandled exception" +msgstr "Yakalanmamış istisna" + +#, python-format +msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" +msgstr "GET sırasında bilinmeyen istisna: %(account)r %(container)r %(object)r" + +#, python-format +msgid "Update report failed for %(container)s %(dbfile)s" +msgstr "%(container)s %(dbfile)s için güncelleme raporu başarısız" + +#, python-format +msgid "Update report sent for %(container)s %(dbfile)s" +msgstr "%(container)s %(dbfile)s için güncelleme raporu gönderildi" + +msgid "" +"WARNING: SSL should only be enabled for testing purposes. Use external SSL " +"termination for a production deployment." +msgstr "" +"UYARI: SSL yalnızca test amaçlı etkinleştirilmelidir. Üretim için kurulumda " +"harici SSL sonlandırma kullanın." + +msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" +msgstr "UYARI: Dosya göstericisi sınırı değiştirilemiyor. Root değil misiniz?" + +msgid "WARNING: Unable to modify max process limit. Running as non-root?" +msgstr "UYARI: Azami süreç limiti değiştirilemiyor. Root değil misiniz?" + +msgid "WARNING: Unable to modify memory limit. Running as non-root?" +msgstr "UYARI: Hafıza sınırı değiştirilemiyor. Root değil misiniz?" + +#, python-format +msgid "Waited %s seconds for %s to die; giving up" +msgstr "%s saniye %s'in ölmesi için beklendi; vaz geçiliyor" + +msgid "Warning: Cannot ratelimit without a memcached client" +msgstr "Uyarı: Memcached istemcisi olmadan oran sınırlama yapılamaz" + +#, python-format +msgid "method %s is not allowed." +msgstr "%s metoduna izin verilmez." + +msgid "no log file found" +msgstr "kayıt dosyası bulunamadı" + +msgid "odfpy not installed." +msgstr "odfpy kurulu değil." + +#, python-format +msgid "plotting results failed due to %s" +msgstr "çizdirme sonuçlaru %s sebebiyle başarısız" + +msgid "python-matplotlib not installed." +msgstr "python-matplotlib kurulu değil." diff --git a/swift/locale/zh_CN/LC_MESSAGES/swift.po b/swift/locale/zh_CN/LC_MESSAGES/swift.po index 7f02e6febd..823a9a1a8c 100644 --- a/swift/locale/zh_CN/LC_MESSAGES/swift.po +++ b/swift/locale/zh_CN/LC_MESSAGES/swift.po @@ -8,16 +8,16 @@ msgid "" msgstr "" "Project-Id-Version: Swift\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-07-09 06:14+0000\n" -"PO-Revision-Date: 2015-07-09 05:58+0000\n" +"POT-Creation-Date: 2015-08-04 06:29+0000\n" +"PO-Revision-Date: 2015-07-28 00:33+0000\n" "Last-Translator: openstackjenkins \n" -"Language-Team: Chinese (China) (http://www.transifex.com/p/swift/language/" -"zh_CN/)\n" +"Language-Team: Chinese (China) (http://www.transifex.com/openstack/swift/" +"language/zh_CN/)\n" "Plural-Forms: nplurals=1; plural=0\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 1.3\n" +"Generated-By: Babel 2.0\n" msgid "" "\n" diff --git a/swift/obj/auditor.py b/swift/obj/auditor.py index 4c78d07af3..4875bb2520 100644 --- a/swift/obj/auditor.py +++ b/swift/obj/auditor.py @@ -79,7 +79,7 @@ class AuditorWorker(object): else: description = _(' - %s') % device_dir_str self.logger.info(_('Begin object audit "%s" mode (%s%s)') % - (mode, self.auditor_type, description)) + (mode, self.auditor_type, description)) begin = reported = time.time() self.total_bytes_processed = 0 self.total_files_processed = 0 @@ -331,7 +331,7 @@ class ObjectAuditor(Daemon): try: self.audit_loop(parent, zbo_fps, **kwargs) except (Exception, Timeout) as err: - self.logger.exception(_('ERROR auditing: %s' % err)) + self.logger.exception(_('ERROR auditing: %s'), err) self._sleep() def run_once(self, *args, **kwargs): @@ -352,4 +352,4 @@ class ObjectAuditor(Daemon): self.audit_loop(parent, zbo_fps, override_devices=override_devices, **kwargs) except (Exception, Timeout) as err: - self.logger.exception(_('ERROR auditing: %s' % err)) + self.logger.exception(_('ERROR auditing: %s'), err) diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py index 2519e1dd5d..d3937cf9a5 100644 --- a/swift/obj/diskfile.py +++ b/swift/obj/diskfile.py @@ -30,7 +30,7 @@ The remaining methods in this module are considered implementation specific and are also not considered part of the backend API. """ -import cPickle as pickle +import six.moves.cPickle as pickle import errno import fcntl import os @@ -175,11 +175,11 @@ def extract_policy(obj_path): objects-5/179/485dc017205a81df3af616d917c90179/1401811134.873649.data - :param obj_path: device-relative path of an object + :param obj_path: device-relative path of an object, or the full path :returns: a :class:`~swift.common.storage_policy.BaseStoragePolicy` or None """ try: - obj_portion = obj_path[obj_path.index(DATADIR_BASE):] + obj_portion = obj_path[obj_path.rindex(DATADIR_BASE):] obj_dirname = obj_portion[:obj_portion.index('/')] except Exception: return None @@ -221,128 +221,6 @@ def quarantine_renamer(device_path, corrupted_file_path): return to_dir -def get_ondisk_files(files, datadir): - """ - Given a simple list of files names, determine the files to use. - - :params files: simple set of files as a python list - :params datadir: directory name files are from for convenience - :returns: a tuple of data, meta and ts (tombstone) files, in one of - two states: - - * ts_file is not None, data_file is None, meta_file is None - - object is considered deleted - - * data_file is not None, ts_file is None - - object exists, and optionally has fast-POST metadata - """ - files.sort(reverse=True) - data_file = meta_file = ts_file = None - for afile in files: - assert ts_file is None, "On-disk file search loop" \ - " continuing after tombstone, %s, encountered" % ts_file - assert data_file is None, "On-disk file search loop" \ - " continuing after data file, %s, encountered" % data_file - if afile.endswith('.ts'): - meta_file = None - ts_file = join(datadir, afile) - break - if afile.endswith('.meta') and not meta_file: - meta_file = join(datadir, afile) - # NOTE: this does not exit this loop, since a fast-POST - # operation just updates metadata, writing one or more - # .meta files, the data file will have an older timestamp, - # so we keep looking. - continue - if afile.endswith('.data'): - data_file = join(datadir, afile) - break - assert ((data_file is None and meta_file is None and ts_file is None) - or (ts_file is not None and data_file is None - and meta_file is None) - or (data_file is not None and ts_file is None)), \ - "On-disk file search algorithm contract is broken: data_file:" \ - " %s, meta_file: %s, ts_file: %s" % (data_file, meta_file, ts_file) - return data_file, meta_file, ts_file - - -def hash_cleanup_listdir(hsh_path, reclaim_age=ONE_WEEK): - """ - List contents of a hash directory and clean up any old files. - - :param hsh_path: object hash path - :param reclaim_age: age in seconds at which to remove tombstones - :returns: list of files remaining in the directory, reverse sorted - """ - files = listdir(hsh_path) - if len(files) == 1: - if files[0].endswith('.ts'): - # remove tombstones older than reclaim_age - ts = files[0].rsplit('.', 1)[0] - if (time.time() - float(Timestamp(ts))) > reclaim_age: - remove_file(join(hsh_path, files[0])) - files.remove(files[0]) - elif files: - files.sort(reverse=True) - data_file, meta_file, ts_file = get_ondisk_files(files, '') - newest_file = data_file or ts_file - for filename in list(files): - if ((filename < newest_file) - or (meta_file - and filename.endswith('.meta') - and filename < meta_file)): - remove_file(join(hsh_path, filename)) - files.remove(filename) - return files - - -def hash_suffix(path, reclaim_age): - """ - Performs reclamation and returns an md5 of all (remaining) files. - - :param reclaim_age: age in seconds at which to remove tombstones - :raises PathNotDir: if given path is not a valid directory - :raises OSError: for non-ENOTDIR errors - """ - md5 = hashlib.md5() - try: - path_contents = sorted(os.listdir(path)) - except OSError as err: - if err.errno in (errno.ENOTDIR, errno.ENOENT): - raise PathNotDir() - raise - for hsh in path_contents: - hsh_path = join(path, hsh) - try: - files = hash_cleanup_listdir(hsh_path, reclaim_age) - except OSError as err: - if err.errno == errno.ENOTDIR: - partition_path = dirname(path) - objects_path = dirname(partition_path) - device_path = dirname(objects_path) - quar_path = quarantine_renamer(device_path, hsh_path) - logging.exception( - _('Quarantined %(hsh_path)s to %(quar_path)s because ' - 'it is not a directory'), {'hsh_path': hsh_path, - 'quar_path': quar_path}) - continue - raise - if not files: - try: - os.rmdir(hsh_path) - except OSError: - pass - for filename in files: - md5.update(filename) - try: - os.rmdir(path) - except OSError: - pass - return md5.hexdigest() - - def invalidate_hash(suffix_dir): """ Invalidates the hash for a suffix_dir in the partition's hashes file. @@ -368,68 +246,6 @@ def invalidate_hash(suffix_dir): write_pickle(hashes, hashes_file, partition_dir, PICKLE_PROTOCOL) -def get_hashes(partition_dir, recalculate=None, do_listdir=False, - reclaim_age=ONE_WEEK): - """ - Get a list of hashes for the suffix dir. do_listdir causes it to mistrust - the hash cache for suffix existence at the (unexpectedly high) cost of a - listdir. reclaim_age is just passed on to hash_suffix. - - :param partition_dir: absolute path of partition to get hashes for - :param recalculate: list of suffixes which should be recalculated when got - :param do_listdir: force existence check for all hashes in the partition - :param reclaim_age: age at which to remove tombstones - - :returns: tuple of (number of suffix dirs hashed, dictionary of hashes) - """ - - hashed = 0 - hashes_file = join(partition_dir, HASH_FILE) - modified = False - force_rewrite = False - hashes = {} - mtime = -1 - - if recalculate is None: - recalculate = [] - - try: - with open(hashes_file, 'rb') as fp: - hashes = pickle.load(fp) - mtime = getmtime(hashes_file) - except Exception: - do_listdir = True - force_rewrite = True - if do_listdir: - for suff in os.listdir(partition_dir): - if len(suff) == 3: - hashes.setdefault(suff, None) - modified = True - hashes.update((suffix, None) for suffix in recalculate) - for suffix, hash_ in hashes.items(): - if not hash_: - suffix_dir = join(partition_dir, suffix) - try: - hashes[suffix] = hash_suffix(suffix_dir, reclaim_age) - hashed += 1 - except PathNotDir: - del hashes[suffix] - except OSError: - logging.exception(_('Error hashing suffix')) - modified = True - if modified: - with lock_path(partition_dir): - if force_rewrite or not exists(hashes_file) or \ - getmtime(hashes_file) == mtime: - write_pickle( - hashes, hashes_file, partition_dir, PICKLE_PROTOCOL) - return hashed, hashes - return get_hashes(partition_dir, recalculate, do_listdir, - reclaim_age) - else: - return hashed, hashes - - class AuditLocation(object): """ Represents an object location to be audited. @@ -553,8 +369,7 @@ class DiskFileRouter(object): return self.policy_to_manager[policy] -@DiskFileRouter.register(REPL_POLICY) -class DiskFileManager(object): +class BaseDiskFileManager(object): """ Management class for devices, providing common place for shared parameters and methods not provided by the DiskFile class (which primarily services @@ -577,13 +392,9 @@ class DiskFileManager(object): :param logger: caller provided logger """ - diskfile_cls = None # DiskFile will be set after that class is defined + diskfile_cls = None # must be set by subclasses - # module level functions dropped to implementation specific - hash_cleanup_listdir = strip_self(hash_cleanup_listdir) - _get_hashes = strip_self(get_hashes) invalidate_hash = strip_self(invalidate_hash) - get_ondisk_files = strip_self(get_ondisk_files) quarantine_renamer = strip_self(quarantine_renamer) def __init__(self, conf, logger): @@ -631,6 +442,277 @@ class DiskFileManager(object): max_pipe_size = int(f.read()) self.pipe_size = min(max_pipe_size, self.disk_chunk_size) + def parse_on_disk_filename(self, filename): + """ + Parse an on disk file name. + + :param filename: the data file name including extension + :returns: a dict, with keys for timestamp, and ext:: + + * timestamp is a :class:`~swift.common.utils.Timestamp` + * ext is a string, the file extension including the leading dot or + the empty string if the filename has no extension. + + Subclases may add further keys to the returned dict. + + :raises DiskFileError: if any part of the filename is not able to be + validated. + """ + raise NotImplementedError + + def _gather_on_disk_file(self, filename, ext, context, frag_index=None, + **kwargs): + """ + Called by gather_ondisk_files() for each file in an object + datadir in reverse sorted order. If a file is considered part of a + valid on-disk file set it will be added to the context dict, keyed by + its extension. If a file is considered to be obsolete it will be added + to a list stored under the key 'obsolete' in the context dict. + + :param filename: name of file to be accepted or not + :param ext: extension part of filename + :param context: a context dict that may have been populated by previous + calls to this method + :returns: True if a valid file set has been found, False otherwise + """ + raise NotImplementedError + + def _verify_on_disk_files(self, accepted_files, **kwargs): + """ + Verify that the final combination of on disk files complies with the + diskfile contract. + + :param accepted_files: files that have been found and accepted + :returns: True if the file combination is compliant, False otherwise + """ + raise NotImplementedError + + def gather_ondisk_files(self, files, include_obsolete=False, + verify=False, **kwargs): + """ + Given a simple list of files names, iterate over them to determine the + files that constitute a valid object, and optionally determine the + files that are obsolete and could be deleted. Note that some files may + fall into neither category. + + :param files: a list of file names. + :param include_obsolete: By default the iteration will stop when a + valid file set has been found. Setting this + argument to True will cause the iteration to + continue in order to find all obsolete files. + :param verify: if True verify that the ondisk file contract has not + been violated, otherwise do not verify. + :returns: a dict that may contain: valid on disk files keyed by their + filename extension; a list of obsolete files stored under the + key 'obsolete'. + """ + files.sort(reverse=True) + results = {} + for afile in files: + ts_file = results.get('.ts') + data_file = results.get('.data') + if not include_obsolete: + assert ts_file is None, "On-disk file search loop" \ + " continuing after tombstone, %s, encountered" % ts_file + assert data_file is None, "On-disk file search loop" \ + " continuing after data file, %s, encountered" % data_file + + ext = splitext(afile)[1] + if self._gather_on_disk_file( + afile, ext, results, **kwargs): + if not include_obsolete: + break + + if verify: + assert self._verify_on_disk_files( + results, **kwargs), \ + "On-disk file search algorithm contract is broken: %s" \ + % results.values() + return results + + def get_ondisk_files(self, files, datadir, **kwargs): + """ + Given a simple list of files names, determine the files to use. + + :param files: simple set of files as a python list + :param datadir: directory name files are from for convenience + :returns: a tuple of data, meta, and tombstone + """ + # maintain compatibility with 'legacy' get_ondisk_files return value + accepted_files = self.gather_ondisk_files(files, verify=True, **kwargs) + result = [(join(datadir, accepted_files.get(ext)) + if accepted_files.get(ext) else None) + for ext in ('.data', '.meta', '.ts')] + return tuple(result) + + def cleanup_ondisk_files(self, hsh_path, reclaim_age=ONE_WEEK, **kwargs): + """ + Clean up on-disk files that are obsolete and gather the set of valid + on-disk files for an object. + + :param hsh_path: object hash path + :param reclaim_age: age in seconds at which to remove tombstones + :param frag_index: if set, search for a specific fragment index .data + file, otherwise accept the first valid .data file + :returns: a dict that may contain: valid on disk files keyed by their + filename extension; a list of obsolete files stored under the + key 'obsolete'; a list of files remaining in the directory, + reverse sorted, stored under the key 'files'. + """ + def is_reclaimable(filename): + timestamp = self.parse_on_disk_filename(filename)['timestamp'] + return (time.time() - float(timestamp)) > reclaim_age + + files = listdir(hsh_path) + files.sort(reverse=True) + results = self.gather_ondisk_files(files, include_obsolete=True, + **kwargs) + # TODO ref to durables here + if '.durable' in results and not results.get('fragments'): + # a .durable with no .data is deleted as soon as it is found + results.setdefault('obsolete', []).append(results.pop('.durable')) + if '.ts' in results and is_reclaimable(results['.ts']): + results.setdefault('obsolete', []).append(results.pop('.ts')) + for filename in results.get('fragments_without_durable', []): + # stray fragments are not deleted until reclaim-age + if is_reclaimable(filename): + results.setdefault('obsolete', []).append(filename) + for filename in results.get('obsolete', []): + remove_file(join(hsh_path, filename)) + files.remove(filename) + results['files'] = files + return results + + def hash_cleanup_listdir(self, hsh_path, reclaim_age=ONE_WEEK): + """ + List contents of a hash directory and clean up any old files. + For EC policy, delete files older than a .durable or .ts file. + + :param hsh_path: object hash path + :param reclaim_age: age in seconds at which to remove tombstones + :returns: list of files remaining in the directory, reverse sorted + """ + # maintain compatibility with 'legacy' hash_cleanup_listdir + # return value + return self.cleanup_ondisk_files( + hsh_path, reclaim_age=reclaim_age)['files'] + + def _hash_suffix_dir(self, path, mapper, reclaim_age): + hashes = defaultdict(hashlib.md5) + try: + path_contents = sorted(os.listdir(path)) + except OSError as err: + if err.errno in (errno.ENOTDIR, errno.ENOENT): + raise PathNotDir() + raise + for hsh in path_contents: + hsh_path = join(path, hsh) + try: + files = self.hash_cleanup_listdir(hsh_path, reclaim_age) + except OSError as err: + if err.errno == errno.ENOTDIR: + partition_path = dirname(path) + objects_path = dirname(partition_path) + device_path = dirname(objects_path) + quar_path = quarantine_renamer(device_path, hsh_path) + logging.exception( + _('Quarantined %(hsh_path)s to %(quar_path)s because ' + 'it is not a directory'), {'hsh_path': hsh_path, + 'quar_path': quar_path}) + continue + raise + if not files: + try: + os.rmdir(hsh_path) + except OSError: + pass + for filename in files: + key, value = mapper(filename) + hashes[key].update(value) + try: + os.rmdir(path) + except OSError as e: + if e.errno == errno.ENOENT: + raise PathNotDir() + else: + # if we remove it, pretend like it wasn't there to begin with so + # that the suffix key gets removed + raise PathNotDir() + return hashes + + def _hash_suffix(self, path, reclaim_age): + """ + Performs reclamation and returns an md5 of all (remaining) files. + + :param reclaim_age: age in seconds at which to remove tombstones + :raises PathNotDir: if given path is not a valid directory + :raises OSError: for non-ENOTDIR errors + """ + raise NotImplementedError + + def _get_hashes(self, partition_path, recalculate=None, do_listdir=False, + reclaim_age=None): + """ + Get a list of hashes for the suffix dir. do_listdir causes it to + mistrust the hash cache for suffix existence at the (unexpectedly high) + cost of a listdir. reclaim_age is just passed on to hash_suffix. + + :param partition_path: absolute path of partition to get hashes for + :param recalculate: list of suffixes which should be recalculated when + got + :param do_listdir: force existence check for all hashes in the + partition + :param reclaim_age: age at which to remove tombstones + + :returns: tuple of (number of suffix dirs hashed, dictionary of hashes) + """ + reclaim_age = reclaim_age or self.reclaim_age + hashed = 0 + hashes_file = join(partition_path, HASH_FILE) + modified = False + force_rewrite = False + hashes = {} + mtime = -1 + + if recalculate is None: + recalculate = [] + + try: + with open(hashes_file, 'rb') as fp: + hashes = pickle.load(fp) + mtime = getmtime(hashes_file) + except Exception: + do_listdir = True + force_rewrite = True + if do_listdir: + for suff in os.listdir(partition_path): + if len(suff) == 3: + hashes.setdefault(suff, None) + modified = True + hashes.update((suffix, None) for suffix in recalculate) + for suffix, hash_ in hashes.items(): + if not hash_: + suffix_dir = join(partition_path, suffix) + try: + hashes[suffix] = self._hash_suffix(suffix_dir, reclaim_age) + hashed += 1 + except PathNotDir: + del hashes[suffix] + except OSError: + logging.exception(_('Error hashing suffix')) + modified = True + if modified: + with lock_path(partition_path): + if force_rewrite or not exists(hashes_file) or \ + getmtime(hashes_file) == mtime: + write_pickle( + hashes, hashes_file, partition_path, PICKLE_PROTOCOL) + return hashed, hashes + return self._get_hashes(partition_path, recalculate, do_listdir, + reclaim_age) + else: + return hashed, hashes + def construct_dev_path(self, device): """ Construct the path to a device without checking if it is mounted. @@ -801,7 +883,8 @@ class DiskFileManager(object): continue yield (os.path.join(partition_path, suffix), suffix) - def yield_hashes(self, device, partition, policy, suffixes=None, **kwargs): + def yield_hashes(self, device, partition, policy, + suffixes=None, **kwargs): """ Yields tuples of (full_path, hash_only, timestamp) for object information stored for the given device, partition, and @@ -825,14 +908,27 @@ class DiskFileManager(object): for suffix_path, suffix in suffixes: for object_hash in self._listdir(suffix_path): object_path = os.path.join(suffix_path, object_hash) - for name in self.hash_cleanup_listdir( - object_path, self.reclaim_age): - ts, ext = name.rsplit('.', 1) - yield (object_path, object_hash, ts) - break + newest_valid_file = None + try: + results = self.cleanup_ondisk_files( + object_path, self.reclaim_age, **kwargs) + newest_valid_file = (results.get('.meta') + or results.get('.data') + or results.get('.ts')) + if newest_valid_file: + timestamp = self.parse_on_disk_filename( + newest_valid_file)['timestamp'] + yield (object_path, object_hash, timestamp.internal) + except AssertionError as err: + self.logger.debug('Invalid file set in %s (%s)' % ( + object_path, err)) + except DiskFileError as err: + self.logger.debug( + 'Invalid diskfile filename %r in %r (%s)' % ( + newest_valid_file, object_path, err)) -class DiskFileWriter(object): +class BaseDiskFileWriter(object): """ Encapsulation of the write context for servicing PUT REST API requests. Serves as the context manager object for the @@ -944,21 +1040,10 @@ class DiskFileWriter(object): """ Finalize writing the file on disk. - For this implementation, this method is responsible for renaming the - temporary file to the final name and directory location. This method - should be called after the final call to - :func:`swift.obj.diskfile.DiskFileWriter.write`. - :param metadata: dictionary of metadata to be associated with the object """ - timestamp = Timestamp(metadata['X-Timestamp']).internal - metadata['name'] = self._name - target_path = join(self._datadir, timestamp + self._extension) - cleanup = True - - self._threadpool.force_run_in_thread( - self._finalize_put, metadata, target_path, cleanup) + raise NotImplementedError def commit(self, timestamp): """ @@ -971,7 +1056,7 @@ class DiskFileWriter(object): pass -class DiskFileReader(object): +class BaseDiskFileReader(object): """ Encapsulation of the WSGI read context for servicing GET REST API requests. Serves as the context manager object for the @@ -1266,7 +1351,7 @@ class DiskFileReader(object): fp.close() -class DiskFile(object): +class BaseDiskFile(object): """ Manage object files. @@ -1279,6 +1364,10 @@ class DiskFile(object): The arguments to the constructor are considered implementation specific. The API does not define the constructor arguments. + The following path format is used for data file locations: + ///// + . + :param mgr: associated DiskFileManager instance :param device_path: path to the target device or drive :param threadpool: thread pool to use for blocking operations @@ -1291,9 +1380,8 @@ class DiskFile(object): :param use_splice: if true, use zero-copy splice() to send data :param pipe_size: size of pipe buffer used in zero-copy operations """ - - reader_cls = DiskFileReader - writer_cls = DiskFileWriter + reader_cls = None # must be set by subclasses + writer_cls = None # must be set by subclasses def __init__(self, mgr, device_path, threadpool, partition, account=None, container=None, obj=None, _datadir=None, @@ -1466,26 +1554,7 @@ class DiskFile(object): object exists, and optionally has fast-POST metadata """ - try: - files = os.listdir(self._datadir) - except OSError as err: - if err.errno == errno.ENOTDIR: - # If there's a file here instead of a directory, quarantine - # it; something's gone wrong somewhere. - raise self._quarantine( - # hack: quarantine_renamer actually renames the directory - # enclosing the filename you give it, but here we just - # want this one file and not its parent. - os.path.join(self._datadir, "made-up-filename"), - "Expected directory, found file at %s" % self._datadir) - elif err.errno != errno.ENOENT: - raise DiskFileError( - "Error listing directory %s: %s" % (self._datadir, err)) - # The data directory does not exist, so the object cannot exist. - fileset = (None, None, None) - else: - fileset = self.manager.get_ondisk_files(files, self._datadir) - return fileset + raise NotImplementedError def _construct_exception_from_ts_file(self, ts_file): """ @@ -1781,15 +1850,195 @@ class DiskFile(object): deleter._extension = '.ts' deleter.put({'X-Timestamp': timestamp.internal}) -# TODO: move DiskFileManager definition down here -DiskFileManager.diskfile_cls = DiskFile - -class ECDiskFileReader(DiskFileReader): +class DiskFileReader(BaseDiskFileReader): pass -class ECDiskFileWriter(DiskFileWriter): +class DiskFileWriter(BaseDiskFileWriter): + def put(self, metadata): + """ + Finalize writing the file on disk. + + For this implementation, this method is responsible for renaming the + temporary file to the final name and directory location. This method + should be called after the final call to + :func:`swift.obj.diskfile.DiskFileWriter.write`. + + :param metadata: dictionary of metadata to be associated with the + object + """ + timestamp = Timestamp(metadata['X-Timestamp']).internal + metadata['name'] = self._name + target_path = join(self._datadir, timestamp + self._extension) + cleanup = True + + self._threadpool.force_run_in_thread( + self._finalize_put, metadata, target_path, cleanup) + + +class DiskFile(BaseDiskFile): + reader_cls = DiskFileReader + writer_cls = DiskFileWriter + + def _get_ondisk_file(self): + """ + Do the work to figure out if the data directory exists, and if so, + determine the on-disk files to use. + + :returns: a tuple of data, meta and ts (tombstone) files, in one of + three states: + + * all three are None + + data directory does not exist, or there are no files in + that directory + + * ts_file is not None, data_file is None, meta_file is None + + object is considered deleted + + * data_file is not None, ts_file is None + + object exists, and optionally has fast-POST metadata + """ + try: + files = os.listdir(self._datadir) + except OSError as err: + if err.errno == errno.ENOTDIR: + # If there's a file here instead of a directory, quarantine + # it; something's gone wrong somewhere. + raise self._quarantine( + # hack: quarantine_renamer actually renames the directory + # enclosing the filename you give it, but here we just + # want this one file and not its parent. + os.path.join(self._datadir, "made-up-filename"), + "Expected directory, found file at %s" % self._datadir) + elif err.errno != errno.ENOENT: + raise DiskFileError( + "Error listing directory %s: %s" % (self._datadir, err)) + # The data directory does not exist, so the object cannot exist. + fileset = (None, None, None) + else: + fileset = self.manager.get_ondisk_files(files, self._datadir) + return fileset + + +@DiskFileRouter.register(REPL_POLICY) +class DiskFileManager(BaseDiskFileManager): + diskfile_cls = DiskFile + + def parse_on_disk_filename(self, filename): + """ + Returns the timestamp extracted .data file name. + + :param filename: the data file name including extension + :returns: a dict, with keys for timestamp, and ext:: + + * timestamp is a :class:`~swift.common.utils.Timestamp` + * ext is a string, the file extension including the leading dot or + the empty string if the filename has no extenstion. + + :raises DiskFileError: if any part of the filename is not able to be + validated. + """ + filename, ext = splitext(filename) + return { + 'timestamp': Timestamp(filename), + 'ext': ext, + } + + def _gather_on_disk_file(self, filename, ext, context, frag_index=None, + **kwargs): + """ + Called by gather_ondisk_files() for each file in an object + datadir in reverse sorted order. If a file is considered part of a + valid on-disk file set it will be added to the context dict, keyed by + its extension. If a file is considered to be obsolete it will be added + to a list stored under the key 'obsolete' in the context dict. + + :param filename: name of file to be accepted or not + :param ext: extension part of filename + :param context: a context dict that may have been populated by previous + calls to this method + :returns: True if a valid file set has been found, False otherwise + """ + + # if first file with given extension then add filename to context + # dict and return True + accept_first = lambda: context.setdefault(ext, filename) == filename + # add the filename to the list of obsolete files in context dict + discard = lambda: context.setdefault('obsolete', []).append(filename) + # set a flag in the context dict indicating that a valid fileset has + # been found + set_valid_fileset = lambda: context.setdefault('found_valid', True) + # return True if the valid fileset flag is set in the context dict + have_valid_fileset = lambda: context.get('found_valid') + + if ext == '.data': + if have_valid_fileset(): + # valid fileset means we must have a newer + # .data or .ts, so discard the older .data file + discard() + else: + accept_first() + set_valid_fileset() + elif ext == '.ts': + if have_valid_fileset() or not accept_first(): + # newer .data or .ts already found so discard this + discard() + # if not have_valid_fileset(): + # # remove any .meta that may have been previously found + # context['.meta'] = None + set_valid_fileset() + elif ext == '.meta': + if have_valid_fileset() or not accept_first(): + # newer .data, .durable or .ts already found so discard this + discard() + else: + # ignore unexpected files + pass + return have_valid_fileset() + + def _verify_on_disk_files(self, accepted_files, **kwargs): + """ + Verify that the final combination of on disk files complies with the + diskfile contract. + + :param accepted_files: files that have been found and accepted + :returns: True if the file combination is compliant, False otherwise + """ + # mimic legacy behavior - .meta is ignored when .ts is found + if accepted_files.get('.ts'): + accepted_files['.meta'] = None + + data_file, meta_file, ts_file, durable_file = tuple( + [accepted_files.get(ext) + for ext in ('.data', '.meta', '.ts', '.durable')]) + + return ((data_file is None and meta_file is None and ts_file is None) + or (ts_file is not None and data_file is None + and meta_file is None) + or (data_file is not None and ts_file is None)) + + def _hash_suffix(self, path, reclaim_age): + """ + Performs reclamation and returns an md5 of all (remaining) files. + + :param reclaim_age: age in seconds at which to remove tombstones + :raises PathNotDir: if given path is not a valid directory + :raises OSError: for non-ENOTDIR errors + """ + mapper = lambda filename: (None, filename) + hashes = self._hash_suffix_dir(path, mapper, reclaim_age) + return hashes[None].hexdigest() + + +class ECDiskFileReader(BaseDiskFileReader): + pass + + +class ECDiskFileWriter(BaseDiskFileWriter): def _finalize_durable(self, durable_file_path): exc = None @@ -1862,7 +2111,7 @@ class ECDiskFileWriter(DiskFileWriter): self._finalize_put, metadata, target_path, cleanup) -class ECDiskFile(DiskFile): +class ECDiskFile(BaseDiskFile): reader_cls = ECDiskFileReader writer_cls = ECDiskFileWriter @@ -1927,7 +2176,7 @@ class ECDiskFile(DiskFile): @DiskFileRouter.register(EC_POLICY) -class ECDiskFileManager(DiskFileManager): +class ECDiskFileManager(BaseDiskFileManager): diskfile_cls = ECDiskFile def validate_fragment_index(self, frag_index): @@ -2134,267 +2383,25 @@ class ECDiskFileManager(DiskFileManager): or (durable_file is not None and meta_file is None and ts_file is None)) - def gather_ondisk_files(self, files, include_obsolete=False, - frag_index=None, verify=False, **kwargs): - """ - Given a simple list of files names, iterate over them to determine the - files that constitute a valid object, and optionally determine the - files that are obsolete and could be deleted. Note that some files may - fall into neither category. - - :param files: a list of file names. - :param include_obsolete: By default the iteration will stop when a - valid file set has been found. Setting this - argument to True will cause the iteration to - continue in order to find all obsolete files. - :param frag_index: if set, search for a specific fragment index .data - file, otherwise accept the first valid .data file. - :returns: a dict that may contain: valid on disk files keyed by their - filename extension; a list of obsolete files stored under the - key 'obsolete'. - """ - # This visitor pattern enables future refactoring of other disk - # manager implementations to re-use this method and override - # _gather_ondisk_file and _verify_ondisk_files to apply implementation - # specific selection and verification of on-disk files. - files.sort(reverse=True) - results = {} - for afile in files: - ts_file = results.get('.ts') - data_file = results.get('.data') - if not include_obsolete: - assert ts_file is None, "On-disk file search loop" \ - " continuing after tombstone, %s, encountered" % ts_file - assert data_file is None, "On-disk file search loop" \ - " continuing after data file, %s, encountered" % data_file - - ext = splitext(afile)[1] - if self._gather_on_disk_file( - afile, ext, results, frag_index=frag_index, **kwargs): - if not include_obsolete: - break - - if verify: - assert self._verify_on_disk_files( - results, frag_index=frag_index, **kwargs), \ - "On-disk file search algorithm contract is broken: %s" \ - % results.values() - return results - - def get_ondisk_files(self, files, datadir, **kwargs): - """ - Given a simple list of files names, determine the files to use. - - :param files: simple set of files as a python list - :param datadir: directory name files are from for convenience - :returns: a tuple of data, meta, and tombstone - """ - # maintain compatibility with 'legacy' get_ondisk_files return value - accepted_files = self.gather_ondisk_files(files, verify=True, **kwargs) - result = [(join(datadir, accepted_files.get(ext)) - if accepted_files.get(ext) else None) - for ext in ('.data', '.meta', '.ts')] - return tuple(result) - - def cleanup_ondisk_files(self, hsh_path, reclaim_age=ONE_WEEK, - frag_index=None): - """ - Clean up on-disk files that are obsolete and gather the set of valid - on-disk files for an object. - - :param hsh_path: object hash path - :param reclaim_age: age in seconds at which to remove tombstones - :param frag_index: if set, search for a specific fragment index .data - file, otherwise accept the first valid .data file - :returns: a dict that may contain: valid on disk files keyed by their - filename extension; a list of obsolete files stored under the - key 'obsolete'; a list of files remaining in the directory, - reverse sorted, stored under the key 'files'. - """ - def is_reclaimable(filename): - timestamp = self.parse_on_disk_filename(filename)['timestamp'] - return (time.time() - float(timestamp)) > reclaim_age - - files = listdir(hsh_path) - files.sort(reverse=True) - results = self.gather_ondisk_files(files, include_obsolete=True, - frag_index=frag_index) - if '.durable' in results and not results.get('fragments'): - # a .durable with no .data is deleted as soon as it is found - results.setdefault('obsolete', []).append(results.pop('.durable')) - if '.ts' in results and is_reclaimable(results['.ts']): - results.setdefault('obsolete', []).append(results.pop('.ts')) - for filename in results.get('fragments_without_durable', []): - # stray fragments are not deleted until reclaim-age - if is_reclaimable(filename): - results.setdefault('obsolete', []).append(filename) - for filename in results.get('obsolete', []): - remove_file(join(hsh_path, filename)) - files.remove(filename) - results['files'] = files - return results - - def hash_cleanup_listdir(self, hsh_path, reclaim_age=ONE_WEEK): - """ - List contents of a hash directory and clean up any old files. - For EC policy, delete files older than a .durable or .ts file. - - :param hsh_path: object hash path - :param reclaim_age: age in seconds at which to remove tombstones - :returns: list of files remaining in the directory, reverse sorted - """ - # maintain compatibility with 'legacy' hash_cleanup_listdir - # return value - return self.cleanup_ondisk_files( - hsh_path, reclaim_age=reclaim_age)['files'] - - def yield_hashes(self, device, partition, policy, - suffixes=None, frag_index=None): - """ - This is the same as the replicated yield_hashes except when frag_index - is provided data files for fragment indexes not matching the given - frag_index are skipped. - """ - dev_path = self.get_dev_path(device) - if not dev_path: - raise DiskFileDeviceUnavailable() - if suffixes is None: - suffixes = self.yield_suffixes(device, partition, policy) - else: - partition_path = os.path.join(dev_path, - get_data_dir(policy), - str(partition)) - suffixes = ( - (os.path.join(partition_path, suffix), suffix) - for suffix in suffixes) - for suffix_path, suffix in suffixes: - for object_hash in self._listdir(suffix_path): - object_path = os.path.join(suffix_path, object_hash) - newest_valid_file = None - try: - results = self.cleanup_ondisk_files( - object_path, self.reclaim_age, frag_index=frag_index) - newest_valid_file = (results.get('.meta') - or results.get('.data') - or results.get('.ts')) - if newest_valid_file: - timestamp = self.parse_on_disk_filename( - newest_valid_file)['timestamp'] - yield (object_path, object_hash, timestamp.internal) - except AssertionError as err: - self.logger.debug('Invalid file set in %s (%s)' % ( - object_path, err)) - except DiskFileError as err: - self.logger.debug( - 'Invalid diskfile filename %r in %r (%s)' % ( - newest_valid_file, object_path, err)) - def _hash_suffix(self, path, reclaim_age): """ - The only difference between this method and the module level function - hash_suffix is the way that files are updated on the returned hash. + The only difference between this method and the replication policy + function is the way that files are updated on the returned hash. Instead of all filenames hashed into a single hasher, each file name will fall into a bucket either by fragment index for datafiles, or None (indicating a durable, metadata or tombstone). """ # hash_per_fi instead of single hash for whole suffix - hash_per_fi = defaultdict(hashlib.md5) - try: - path_contents = sorted(os.listdir(path)) - except OSError as err: - if err.errno in (errno.ENOTDIR, errno.ENOENT): - raise PathNotDir() - raise - for hsh in path_contents: - hsh_path = join(path, hsh) - try: - files = self.hash_cleanup_listdir(hsh_path, reclaim_age) - except OSError as err: - if err.errno == errno.ENOTDIR: - partition_path = dirname(path) - objects_path = dirname(partition_path) - device_path = dirname(objects_path) - quar_path = quarantine_renamer(device_path, hsh_path) - logging.exception( - _('Quarantined %(hsh_path)s to %(quar_path)s because ' - 'it is not a directory'), {'hsh_path': hsh_path, - 'quar_path': quar_path}) - continue - raise - if not files: - try: - os.rmdir(hsh_path) - except OSError: - pass - # we just deleted this hsh_path, why are we waiting - # until the next suffix hash to raise PathNotDir so that - # this suffix will get del'd from the suffix hashes? - for filename in files: + # here we flatten out the hashers hexdigest into a dictionary instead + # of just returning the one hexdigest for the whole suffix + def mapper(filename): info = self.parse_on_disk_filename(filename) fi = info['frag_index'] if fi is None: - hash_per_fi[fi].update(filename) + return None, filename else: - hash_per_fi[fi].update(info['timestamp'].internal) - try: - os.rmdir(path) - except OSError: - pass - # here we flatten out the hashers hexdigest into a dictionary instead - # of just returning the one hexdigest for the whole suffix + return fi, info['timestamp'].internal + + hash_per_fi = self._hash_suffix_dir(path, mapper, reclaim_age) return dict((fi, md5.hexdigest()) for fi, md5 in hash_per_fi.items()) - - def _get_hashes(self, partition_path, recalculate=None, do_listdir=False, - reclaim_age=None): - """ - The only difference with this method and the module level function - get_hashes is the call to hash_suffix routes to a method _hash_suffix - on this instance. - """ - reclaim_age = reclaim_age or self.reclaim_age - hashed = 0 - hashes_file = join(partition_path, HASH_FILE) - modified = False - force_rewrite = False - hashes = {} - mtime = -1 - - if recalculate is None: - recalculate = [] - - try: - with open(hashes_file, 'rb') as fp: - hashes = pickle.load(fp) - mtime = getmtime(hashes_file) - except Exception: - do_listdir = True - force_rewrite = True - if do_listdir: - for suff in os.listdir(partition_path): - if len(suff) == 3: - hashes.setdefault(suff, None) - modified = True - hashes.update((suffix, None) for suffix in recalculate) - for suffix, hash_ in hashes.items(): - if not hash_: - suffix_dir = join(partition_path, suffix) - try: - hashes[suffix] = self._hash_suffix(suffix_dir, reclaim_age) - hashed += 1 - except PathNotDir: - del hashes[suffix] - except OSError: - logging.exception(_('Error hashing suffix')) - modified = True - if modified: - with lock_path(partition_path): - if force_rewrite or not exists(hashes_file) or \ - getmtime(hashes_file) == mtime: - write_pickle( - hashes, hashes_file, partition_path, PICKLE_PROTOCOL) - return hashed, hashes - return self._get_hashes(partition_path, recalculate, do_listdir, - reclaim_age) - else: - return hashed, hashes diff --git a/swift/obj/mem_diskfile.py b/swift/obj/mem_diskfile.py index be5fbf1349..97d209cd15 100644 --- a/swift/obj/mem_diskfile.py +++ b/swift/obj/mem_diskfile.py @@ -15,12 +15,12 @@ """ In-Memory Disk File Interface for Swift Object Server""" -import cStringIO import time import hashlib from contextlib import contextmanager from eventlet import Timeout +from six import moves from swift.common.utils import Timestamp from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist, \ @@ -385,7 +385,7 @@ class DiskFile(object): disk :raises DiskFileNoSpace: if a size is specified and allocation fails """ - fp = cStringIO.StringIO() + fp = moves.cStringIO() try: yield DiskFileWriter(self._filesystem, self._name, fp) finally: diff --git a/swift/obj/reconstructor.py b/swift/obj/reconstructor.py index 6f6d6bda81..cfe6239c2b 100644 --- a/swift/obj/reconstructor.py +++ b/swift/obj/reconstructor.py @@ -19,7 +19,7 @@ import random import time import itertools from collections import defaultdict -import cPickle as pickle +import six.moves.cPickle as pickle import shutil from eventlet import (GreenPile, GreenPool, Timeout, sleep, hubs, tpool, @@ -319,11 +319,11 @@ class ObjectReconstructor(Daemon): except (Exception, Timeout): self.logger.exception( _("Error trying to rebuild %(path)s " - "policy#%(policy)d frag#%(frag_index)s"), { - 'path': path, - 'policy': policy, - 'frag_index': frag_index, - }) + "policy#%(policy)d frag#%(frag_index)s"), + {'path': path, + 'policy': policy, + 'frag_index': frag_index, + }) break if not all(fragment_payload): break @@ -337,22 +337,34 @@ class ObjectReconstructor(Daemon): """ Logs various stats for the currently running reconstruction pass. """ - if self.reconstruction_count: + if (self.device_count and self.part_count and + self.reconstruction_device_count): elapsed = (time.time() - self.start) or 0.000001 - rate = self.reconstruction_count / elapsed + rate = self.reconstruction_part_count / elapsed + total_part_count = (self.part_count * + self.device_count / + self.reconstruction_device_count) self.logger.info( _("%(reconstructed)d/%(total)d (%(percentage).2f%%)" - " partitions reconstructed in %(time).2fs (%(rate).2f/sec, " - "%(remaining)s remaining)"), - {'reconstructed': self.reconstruction_count, - 'total': self.job_count, + " partitions of %(device)d/%(dtotal)d " + "(%(dpercentage).2f%%) devices" + " reconstructed in %(time).2fs " + "(%(rate).2f/sec, %(remaining)s remaining)"), + {'reconstructed': self.reconstruction_part_count, + 'total': self.part_count, 'percentage': - self.reconstruction_count * 100.0 / self.job_count, + self.reconstruction_part_count * 100.0 / self.part_count, + 'device': self.reconstruction_device_count, + 'dtotal': self.device_count, + 'dpercentage': + self.reconstruction_device_count * 100.0 / self.device_count, 'time': time.time() - self.start, 'rate': rate, - 'remaining': '%d%s' % compute_eta(self.start, - self.reconstruction_count, - self.job_count)}) - if self.suffix_count: + 'remaining': '%d%s' % + compute_eta(self.start, + self.reconstruction_part_count, + total_part_count)}) + + if self.suffix_count and self.partition_times: self.logger.info( _("%(checked)d suffixes checked - " "%(hashed).2f%% hashed, %(synced).2f%% synced"), @@ -474,14 +486,11 @@ class ObjectReconstructor(Daemon): self._full_path(node, job['partition'], '', job['policy'])) elif resp.status != HTTP_OK: + full_path = self._full_path(node, job['partition'], '', + job['policy']) self.logger.error( - _("Invalid response %(resp)s " - "from %(full_path)s"), { - 'resp': resp.status, - 'full_path': self._full_path( - node, job['partition'], '', - job['policy']) - }) + _("Invalid response %(resp)s from %(full_path)s"), + {'resp': resp.status, 'full_path': full_path}) else: remote_suffixes = pickle.loads(resp.read()) except (Exception, Timeout): @@ -781,16 +790,22 @@ class ObjectReconstructor(Daemon): self._diskfile_mgr = self._df_router[policy] self.load_object_ring(policy) data_dir = get_data_dir(policy) - local_devices = itertools.ifilter( + local_devices = list(itertools.ifilter( lambda dev: dev and is_local_device( ips, self.port, dev['replication_ip'], dev['replication_port']), - policy.object_ring.devs) + policy.object_ring.devs)) + + if override_devices: + self.device_count = len(override_devices) + else: + self.device_count = len(local_devices) for local_dev in local_devices: if override_devices and (local_dev['device'] not in override_devices): continue + self.reconstruction_device_count += 1 dev_path = self._df_router[policy].get_dev_path( local_dev['device']) if not dev_path: @@ -814,6 +829,8 @@ class ObjectReconstructor(Daemon): self.logger.exception( 'Unable to list partitions in %r' % obj_path) continue + + self.part_count += len(partitions) for partition in partitions: part_path = join(obj_path, partition) if not (partition.isdigit() and @@ -821,6 +838,7 @@ class ObjectReconstructor(Daemon): self.logger.warning( 'Unexpected entity in data dir: %r' % part_path) remove_file(part_path) + self.reconstruction_part_count += 1 continue partition = int(partition) if override_partitions and (partition not in @@ -833,6 +851,7 @@ class ObjectReconstructor(Daemon): 'part_path': part_path, } yield part_info + self.reconstruction_part_count += 1 def build_reconstruction_jobs(self, part_info): """ @@ -850,10 +869,14 @@ class ObjectReconstructor(Daemon): def _reset_stats(self): self.start = time.time() self.job_count = 0 + self.part_count = 0 + self.device_count = 0 self.suffix_count = 0 self.suffix_sync = 0 self.suffix_hash = 0 self.reconstruction_count = 0 + self.reconstruction_part_count = 0 + self.reconstruction_device_count = 0 self.last_reconstruction_count = -1 def delete_partition(self, path): diff --git a/swift/obj/replicator.py b/swift/obj/replicator.py index 5156ad7d8b..842a2a859c 100644 --- a/swift/obj/replicator.py +++ b/swift/obj/replicator.py @@ -20,7 +20,7 @@ import random import shutil import time import itertools -import cPickle as pickle +import six.moves.cPickle as pickle from swift import gettext_ as _ import eventlet @@ -37,8 +37,7 @@ from swift.common.bufferedhttp import http_connect from swift.common.daemon import Daemon from swift.common.http import HTTP_OK, HTTP_INSUFFICIENT_STORAGE from swift.obj import ssync_sender -from swift.obj.diskfile import (DiskFileManager, get_hashes, get_data_dir, - get_tmp_dir) +from swift.obj.diskfile import DiskFileManager, get_data_dir, get_tmp_dir from swift.common.storage_policy import POLICIES, REPL_POLICY @@ -54,13 +53,13 @@ class ObjectReplicator(Daemon): caller to do this in a loop. """ - def __init__(self, conf): + def __init__(self, conf, logger=None): """ :param conf: configuration object obtained from ConfigParser :param logger: logging object """ self.conf = conf - self.logger = get_logger(conf, log_route='object-replicator') + self.logger = logger or get_logger(conf, log_route='object-replicator') self.devices_dir = conf.get('devices', '/srv/node') self.mount_check = config_true_value(conf.get('mount_check', 'true')) self.vm_test_mode = config_true_value(conf.get('vm_test_mode', 'no')) @@ -91,7 +90,7 @@ class ObjectReplicator(Daemon): self.node_timeout = float(conf.get('node_timeout', 10)) self.sync_method = getattr(self, conf.get('sync_method') or 'rsync') self.network_chunk_size = int(conf.get('network_chunk_size', 65536)) - self.headers = { + self.default_headers = { 'Content-Length': '0', 'user-agent': 'object-replicator %s' % os.getpid()} self.rsync_error_log_line_length = \ @@ -100,8 +99,37 @@ class ObjectReplicator(Daemon): False)) self.handoff_delete = config_auto_int_value( conf.get('handoff_delete', 'auto'), 0) + if any((self.handoff_delete, self.handoffs_first)): + self.logger.warn('Handoff only mode is not intended for normal ' + 'operation, please disable handoffs_first and ' + 'handoff_delete before the next ' + 'normal rebalance') self._diskfile_mgr = DiskFileManager(conf, self.logger) + def _zero_stats(self): + """Zero out the stats.""" + self.stats = {'attempted': 0, 'success': 0, 'failure': 0, + 'hashmatch': 0, 'rsync': 0, 'remove': 0, + 'start': time.time(), 'failure_nodes': {}} + + def _add_failure_stats(self, failure_devs_info): + for node, dev in failure_devs_info: + self.stats['failure'] += 1 + failure_devs = self.stats['failure_nodes'].setdefault(node, {}) + failure_devs.setdefault(dev, 0) + failure_devs[dev] += 1 + + def _get_my_replication_ips(self): + my_replication_ips = set() + ips = whataremyips() + for policy in POLICIES: + self.load_object_ring(policy) + for local_dev in [dev for dev in policy.object_ring.devs + if dev and dev['replication_ip'] in ips and + dev['replication_port'] == self.port]: + my_replication_ips.add(local_dev['replication_ip']) + return list(my_replication_ips) + # Just exists for doc anchor point def sync(self, node, job, suffixes, *args, **kwargs): """ @@ -243,7 +271,9 @@ class ObjectReplicator(Daemon): if len(suff) == 3 and isdir(join(path, suff))] self.replication_count += 1 self.logger.increment('partition.delete.count.%s' % (job['device'],)) - self.headers['X-Backend-Storage-Policy-Index'] = int(job['policy']) + headers = dict(self.default_headers) + headers['X-Backend-Storage-Policy-Index'] = int(job['policy']) + failure_devs_info = set() begin = time.time() try: responses = [] @@ -252,6 +282,7 @@ class ObjectReplicator(Daemon): delete_objs = None if suffixes: for node in job['nodes']: + self.stats['rsync'] += 1 kwargs = {} if node['region'] in synced_remote_regions and \ self.conf.get('sync_method', 'rsync') == 'ssync': @@ -267,11 +298,14 @@ class ObjectReplicator(Daemon): node['replication_ip'], node['replication_port'], node['device'], job['partition'], 'REPLICATE', - '/' + '-'.join(suffixes), headers=self.headers) + '/' + '-'.join(suffixes), headers=headers) conn.getresponse().read() if node['region'] != job['region']: synced_remote_regions[node['region']] = \ candidates.keys() + else: + failure_devs_info.add((node['replication_ip'], + node['device'])) responses.append(success) for region, cand_objs in synced_remote_regions.items(): if delete_objs is None: @@ -287,11 +321,23 @@ class ObjectReplicator(Daemon): delete_handoff = len(responses) == len(job['nodes']) and \ all(responses) if delete_handoff: + self.stats['remove'] += 1 if (self.conf.get('sync_method', 'rsync') == 'ssync' and delete_objs is not None): self.logger.info(_("Removing %s objects"), len(delete_objs)) - self.delete_handoff_objs(job, delete_objs) + _junk, error_paths = self.delete_handoff_objs( + job, delete_objs) + # if replication works for a hand-off device and it failed, + # the remote devices which are target of the replication + # from the hand-off device will be marked. Because cleanup + # after replication failed means replicator needs to + # replicate again with the same info. + if error_paths: + failure_devs_info.update( + [(failure_dev['replication_ip'], + failure_dev['device']) + for failure_dev in job['nodes']]) else: self.delete_partition(job['path']) elif not suffixes: @@ -299,14 +345,21 @@ class ObjectReplicator(Daemon): except (Exception, Timeout): self.logger.exception(_("Error syncing handoff partition")) finally: + target_devs_info = set([(target_dev['replication_ip'], + target_dev['device']) + for target_dev in job['nodes']]) + self.stats['success'] += len(target_devs_info - failure_devs_info) + self._add_failure_stats(failure_devs_info) self.partition_times.append(time.time() - begin) self.logger.timing_since('partition.delete.timing', begin) def delete_partition(self, path): self.logger.info(_("Removing partition: %s"), path) - tpool.execute(shutil.rmtree, path, ignore_errors=True) + tpool.execute(shutil.rmtree, path) def delete_handoff_objs(self, job, delete_objs): + success_paths = [] + error_paths = [] for object_hash in delete_objs: object_path = storage_directory(job['obj_path'], job['partition'], object_hash) @@ -314,11 +367,14 @@ class ObjectReplicator(Daemon): suffix_dir = dirname(object_path) try: os.rmdir(suffix_dir) + success_paths.append(object_path) except OSError as e: if e.errno not in (errno.ENOENT, errno.ENOTEMPTY): + error_paths.append(object_path) self.logger.exception( "Unexpected error trying to cleanup suffix dir:%r", suffix_dir) + return success_paths, error_paths def update(self, job): """ @@ -328,11 +384,14 @@ class ObjectReplicator(Daemon): """ self.replication_count += 1 self.logger.increment('partition.update.count.%s' % (job['device'],)) - self.headers['X-Backend-Storage-Policy-Index'] = int(job['policy']) + headers = dict(self.default_headers) + headers['X-Backend-Storage-Policy-Index'] = int(job['policy']) + target_devs_info = set() + failure_devs_info = set() begin = time.time() try: hashed, local_hash = tpool_reraise( - get_hashes, job['path'], + self._diskfile_mgr._get_hashes, job['path'], do_listdir=(self.replication_count % 10) == 0, reclaim_age=self.reclaim_age) self.suffix_hash += hashed @@ -347,6 +406,7 @@ class ObjectReplicator(Daemon): while attempts_left > 0: # If this throws StopIteration it will be caught way below node = next(nodes) + target_devs_info.add((node['replication_ip'], node['device'])) attempts_left -= 1 # if we have already synced to this remote region, # don't sync again on this replication pass @@ -357,17 +417,21 @@ class ObjectReplicator(Daemon): resp = http_connect( node['replication_ip'], node['replication_port'], node['device'], job['partition'], 'REPLICATE', - '', headers=self.headers).getresponse() + '', headers=headers).getresponse() if resp.status == HTTP_INSUFFICIENT_STORAGE: self.logger.error(_('%(ip)s/%(device)s responded' ' as unmounted'), node) attempts_left += 1 + failure_devs_info.add((node['replication_ip'], + node['device'])) continue if resp.status != HTTP_OK: self.logger.error(_("Invalid response %(resp)s " "from %(ip)s"), {'resp': resp.status, 'ip': node['replication_ip']}) + failure_devs_info.add((node['replication_ip'], + node['device'])) continue remote_hash = pickle.loads(resp.read()) del resp @@ -375,9 +439,10 @@ class ObjectReplicator(Daemon): local_hash[suffix] != remote_hash.get(suffix, -1)] if not suffixes: + self.stats['hashmatch'] += 1 continue hashed, recalc_hash = tpool_reraise( - get_hashes, + self._diskfile_mgr._get_hashes, job['path'], recalculate=suffixes, reclaim_age=self.reclaim_age) self.logger.update_stats('suffix.hashes', hashed) @@ -385,26 +450,35 @@ class ObjectReplicator(Daemon): suffixes = [suffix for suffix in local_hash if local_hash[suffix] != remote_hash.get(suffix, -1)] + self.stats['rsync'] += 1 success, _junk = self.sync(node, job, suffixes) with Timeout(self.http_timeout): conn = http_connect( node['replication_ip'], node['replication_port'], node['device'], job['partition'], 'REPLICATE', '/' + '-'.join(suffixes), - headers=self.headers) + headers=headers) conn.getresponse().read() + if not success: + failure_devs_info.add((node['replication_ip'], + node['device'])) # add only remote region when replicate succeeded if success and node['region'] != job['region']: synced_remote_regions.add(node['region']) self.suffix_sync += len(suffixes) self.logger.update_stats('suffix.syncs', len(suffixes)) except (Exception, Timeout): + failure_devs_info.add((node['replication_ip'], + node['device'])) self.logger.exception(_("Error syncing with node: %s") % node) self.suffix_count += len(local_hash) except (Exception, Timeout): + failure_devs_info.update(target_devs_info) self.logger.exception(_("Error syncing partition")) finally: + self.stats['success'] += len(target_devs_info - failure_devs_info) + self._add_failure_stats(failure_devs_info) self.partition_times.append(time.time() - begin) self.logger.timing_since('partition.update.timing', begin) @@ -482,6 +556,9 @@ class ObjectReplicator(Daemon): using replication style storage policy """ jobs = [] + self.all_devs_info.update( + [(dev['replication_ip'], dev['device']) + for dev in policy.object_ring.devs if dev]) data_dir = get_data_dir(policy) for local_dev in [dev for dev in policy.object_ring.devs if (dev @@ -495,6 +572,11 @@ class ObjectReplicator(Daemon): obj_path = join(dev_path, data_dir) tmp_path = join(dev_path, get_tmp_dir(policy)) if self.mount_check and not ismount(dev_path): + self._add_failure_stats( + [(failure_dev['replication_ip'], + failure_dev['device']) + for failure_dev in policy.object_ring.devs + if failure_dev]) self.logger.warn(_('%s is not mounted'), local_dev['device']) continue unlink_older_than(tmp_path, time.time() - self.reclaim_age) @@ -509,6 +591,7 @@ class ObjectReplicator(Daemon): and partition not in override_partitions): continue + part_nodes = None try: job_path = join(obj_path, partition) part_nodes = policy.object_ring.get_part_nodes( @@ -525,6 +608,17 @@ class ObjectReplicator(Daemon): partition=partition, region=local_dev['region'])) except ValueError: + if part_nodes: + self._add_failure_stats( + [(failure_dev['replication_ip'], + failure_dev['device']) + for failure_dev in nodes]) + else: + self._add_failure_stats( + [(failure_dev['replication_ip'], + failure_dev['device']) + for failure_dev in policy.object_ring.devs + if failure_dev]) continue return jobs @@ -570,19 +664,31 @@ class ObjectReplicator(Daemon): self.replication_count = 0 self.last_replication_count = -1 self.partition_times = [] + self.my_replication_ips = self._get_my_replication_ips() + self.all_devs_info = set() stats = eventlet.spawn(self.heartbeat) lockup_detector = eventlet.spawn(self.detect_lockups) eventlet.sleep() # Give spawns a cycle + current_nodes = None try: self.run_pool = GreenPool(size=self.concurrency) jobs = self.collect_jobs(override_devices=override_devices, override_partitions=override_partitions, override_policies=override_policies) for job in jobs: + current_nodes = job['nodes'] + if override_devices and job['device'] not in override_devices: + continue + if override_partitions and \ + job['partition'] not in override_partitions: + continue dev_path = join(self.devices_dir, job['device']) if self.mount_check and not ismount(dev_path): + self._add_failure_stats([(failure_dev['replication_ip'], + failure_dev['device']) + for failure_dev in job['nodes']]) self.logger.warn(_('%s is not mounted'), job['device']) continue if not self.check_ring(job['policy'].object_ring): @@ -604,18 +710,26 @@ class ObjectReplicator(Daemon): self.run_pool.spawn(self.update_deleted, job) else: self.run_pool.spawn(self.update, job) + current_nodes = None with Timeout(self.lockup_timeout): self.run_pool.waitall() except (Exception, Timeout): + if current_nodes: + self._add_failure_stats([(failure_dev['replication_ip'], + failure_dev['device']) + for failure_dev in current_nodes]) + else: + self._add_failure_stats(self.all_devs_info) self.logger.exception(_("Exception in top-level replication loop")) self.kill_coros() finally: stats.kill() lockup_detector.kill() self.stats_line() + self.stats['attempted'] = self.replication_count def run_once(self, *args, **kwargs): - start = time.time() + self._zero_stats() self.logger.info(_("Running object replicator in script mode.")) override_devices = list_from_csv(kwargs.get('devices')) @@ -632,27 +746,35 @@ class ObjectReplicator(Daemon): override_devices=override_devices, override_partitions=override_partitions, override_policies=override_policies) - total = (time.time() - start) / 60 + total = (time.time() - self.stats['start']) / 60 self.logger.info( _("Object replication complete (once). (%.02f minutes)"), total) if not (override_partitions or override_devices): - dump_recon_cache({'object_replication_time': total, - 'object_replication_last': time.time()}, + replication_last = time.time() + dump_recon_cache({'replication_stats': self.stats, + 'replication_time': total, + 'replication_last': replication_last, + 'object_replication_time': total, + 'object_replication_last': replication_last}, self.rcache, self.logger) def run_forever(self, *args, **kwargs): self.logger.info(_("Starting object replicator in daemon mode.")) # Run the replicator continually while True: - start = time.time() + self._zero_stats() self.logger.info(_("Starting object replication pass.")) # Run the replicator self.replicate() - total = (time.time() - start) / 60 + total = (time.time() - self.stats['start']) / 60 self.logger.info( _("Object replication complete. (%.02f minutes)"), total) - dump_recon_cache({'object_replication_time': total, - 'object_replication_last': time.time()}, + replication_last = time.time() + dump_recon_cache({'replication_stats': self.stats, + 'replication_time': total, + 'replication_last': replication_last, + 'object_replication_time': total, + 'object_replication_last': replication_last}, self.rcache, self.logger) self.logger.debug('Replication sleeping for %s seconds.', self.interval) diff --git a/swift/obj/server.py b/swift/obj/server.py index 85c85544e4..e0d43920c5 100644 --- a/swift/obj/server.py +++ b/swift/obj/server.py @@ -15,7 +15,7 @@ """ Object Server for Swift """ -import cPickle as pickle +import six.moves.cPickle as pickle import json import os import multiprocessing @@ -28,6 +28,7 @@ from swift import gettext_ as _ from hashlib import md5 from eventlet import sleep, wsgi, Timeout +from eventlet.greenthread import spawn from swift.common.utils import public, get_logger, \ config_true_value, timing_stats, replication, \ @@ -108,7 +109,9 @@ class ObjectController(BaseStorageServer): """ super(ObjectController, self).__init__(conf) self.logger = logger or get_logger(conf, log_route='object-server') - self.node_timeout = int(conf.get('node_timeout', 3)) + self.node_timeout = float(conf.get('node_timeout', 3)) + self.container_update_timeout = float( + conf.get('container_update_timeout', 1)) self.conn_timeout = float(conf.get('conn_timeout', 0.5)) self.client_timeout = int(conf.get('client_timeout', 60)) self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536)) @@ -198,7 +201,8 @@ class ObjectController(BaseStorageServer): device, partition, account, container, obj, policy, **kwargs) def async_update(self, op, account, container, obj, host, partition, - contdevice, headers_out, objdevice, policy): + contdevice, headers_out, objdevice, policy, + logger_thread_locals=None): """ Sends or saves an async update. @@ -213,7 +217,12 @@ class ObjectController(BaseStorageServer): request :param objdevice: device name that the object is in :param policy: the associated BaseStoragePolicy instance + :param logger_thread_locals: The thread local values to be set on the + self.logger to retain transaction + logging information. """ + if logger_thread_locals: + self.logger.thread_locals = logger_thread_locals headers_out['user-agent'] = 'object-server %s' % os.getpid() full_path = '/%s/%s/%s' % (account, container, obj) if all([host, partition, contdevice]): @@ -285,10 +294,28 @@ class ObjectController(BaseStorageServer): headers_out['x-trans-id'] = headers_in.get('x-trans-id', '-') headers_out['referer'] = request.as_referer() headers_out['X-Backend-Storage-Policy-Index'] = int(policy) + update_greenthreads = [] for conthost, contdevice in updates: - self.async_update(op, account, container, obj, conthost, - contpartition, contdevice, headers_out, - objdevice, policy) + gt = spawn(self.async_update, op, account, container, obj, + conthost, contpartition, contdevice, headers_out, + objdevice, policy, + logger_thread_locals=self.logger.thread_locals) + update_greenthreads.append(gt) + # Wait a little bit to see if the container updates are successful. + # If we immediately return after firing off the greenthread above, then + # we're more likely to confuse the end-user who does a listing right + # after getting a successful response to the object create. The + # `container_update_timeout` bounds the length of time we wait so that + # one slow container server doesn't make the entire request lag. + try: + with Timeout(self.container_update_timeout): + for gt in update_greenthreads: + gt.wait() + except Timeout: + # updates didn't go through, log it and return + self.logger.debug( + 'Container update timeout (%.4fs) waiting for %s', + self.container_update_timeout, updates) def delete_at_update(self, op, delete_at, account, container, obj, request, objdevice, policy): @@ -417,6 +444,11 @@ class ObjectController(BaseStorageServer): override = key.lower().replace(override_prefix, 'x-') update_headers[override] = val + def _preserve_slo_manifest(self, update_metadata, orig_metadata): + if 'X-Static-Large-Object' in orig_metadata: + update_metadata['X-Static-Large-Object'] = \ + orig_metadata['X-Static-Large-Object'] + @public @timing_stats() def POST(self, request): @@ -446,6 +478,7 @@ class ObjectController(BaseStorageServer): request=request, headers={'X-Backend-Timestamp': orig_timestamp.internal}) metadata = {'X-Timestamp': req_timestamp.internal} + self._preserve_slo_manifest(metadata, orig_metadata) metadata.update(val for val in request.headers.items() if is_user_meta('object', val[0])) for header_key in self.allowed_headers: @@ -685,9 +718,6 @@ class ObjectController(BaseStorageServer): """Handle HTTP GET requests for the Swift Object Server.""" device, partition, account, container, obj, policy = \ get_name_and_placement(request, 5, 5, True) - keep_cache = self.keep_cache_private or ( - 'X-Auth-Token' not in request.headers and - 'X-Storage-Token' not in request.headers) try: disk_file = self.get_diskfile( device, partition, account, container, obj, diff --git a/swift/obj/ssync_receiver.py b/swift/obj/ssync_receiver.py index 6aeb4c401f..394d2b0158 100644 --- a/swift/obj/ssync_receiver.py +++ b/swift/obj/ssync_receiver.py @@ -164,7 +164,7 @@ class Receiver(object): self.node_index = int( self.request.headers['X-Backend-Ssync-Node-Index']) if self.node_index != self.frag_index: - # a primary node should only recieve it's own fragments + # a primary node should only receive it's own fragments raise swob.HTTPBadRequest( 'Frag-Index (%s) != Node-Index (%s)' % ( self.frag_index, self.node_index)) @@ -319,7 +319,11 @@ class Receiver(object): header = header.strip().lower() value = value.strip() subreq.headers[header] = value - replication_headers.append(header) + if header != 'etag': + # make sure ssync doesn't cause 'Etag' to be added to + # obj metadata in addition to 'ETag' which object server + # sets (note capitalization) + replication_headers.append(header) if header == 'content-length': content_length = int(value) # Establish subrequest body, if needed. diff --git a/swift/obj/ssync_sender.py b/swift/obj/ssync_sender.py index 8367ba8aec..cf6fcad6a4 100644 --- a/swift/obj/ssync_sender.py +++ b/swift/obj/ssync_sender.py @@ -82,7 +82,6 @@ class Sender(object): set(self.send_list)) can_delete_obj = dict((hash_, self.available_map[hash_]) for hash_ in in_sync_hashes) - self.disconnect() if not self.failures: return True, can_delete_obj else: @@ -103,6 +102,8 @@ class Sender(object): self.node.get('replication_ip'), self.node.get('replication_port'), self.node.get('device'), self.job.get('partition')) + finally: + self.disconnect() except Exception: # We don't want any exceptions to escape our code and possibly # mess up the original replicator code that called us since it @@ -211,8 +212,10 @@ class Sender(object): self.job['policy'], self.suffixes, frag_index=self.job.get('frag_index')) if self.remote_check_objs is not None: - hash_gen = ifilter(lambda (path, object_hash, timestamp): - object_hash in self.remote_check_objs, hash_gen) + hash_gen = ifilter( + lambda path_objhash_timestamp: + path_objhash_timestamp[1] in + self.remote_check_objs, hash_gen) for path, object_hash, timestamp in hash_gen: self.available_map[object_hash] = timestamp with exceptions.MessageTimeout( @@ -349,6 +352,8 @@ class Sender(object): Closes down the connection to the object server once done with the SSYNC request. """ + if not self.connection: + return try: with exceptions.MessageTimeout( self.daemon.node_timeout, 'disconnect'): diff --git a/swift/obj/updater.py b/swift/obj/updater.py index f5d1f37fa4..d588de72b1 100644 --- a/swift/obj/updater.py +++ b/swift/obj/updater.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import cPickle as pickle +import six.moves.cPickle as pickle import os import signal import sys @@ -256,7 +256,7 @@ class ObjectUpdater(Daemon): :param node: node dictionary from the container ring :param part: partition that holds the container - :param op: operation performed (ex: 'POST' or 'DELETE') + :param op: operation performed (ex: 'PUT' or 'DELETE') :param obj: object name being updated :param headers_out: headers to send with the update """ diff --git a/swift/proxy/controllers/base.py b/swift/proxy/controllers/base.py index 5b7d089559..ec81bedefa 100644 --- a/swift/proxy/controllers/base.py +++ b/swift/proxy/controllers/base.py @@ -28,6 +28,7 @@ import os import time import functools import inspect +import itertools import operator from sys import exc_info from swift import gettext_ as _ @@ -696,7 +697,12 @@ class ResumingGetter(object): If we have no Range header, this is a no-op. """ if 'Range' in self.backend_headers: - req_range = Range(self.backend_headers['Range']) + try: + req_range = Range(self.backend_headers['Range']) + except ValueError: + # there's a Range header, but it's garbage, so get rid of it + self.backend_headers.pop('Range') + return begin, end = req_range.ranges.pop(0) if len(req_range.ranges) > 0: self.backend_headers['Range'] = str(req_range) @@ -922,6 +928,7 @@ class ResumingGetter(object): 'part_iter': part_iter} self.pop_range() except StopIteration: + req.environ['swift.non_client_disconnect'] = True return except ChunkReadTimeout: @@ -1016,7 +1023,7 @@ class ResumingGetter(object): self.statuses.append(possible_source.status) self.reasons.append(possible_source.reason) - self.bodies.append('') + self.bodies.append(None) self.source_headers.append(possible_source.getheaders()) sources.append((possible_source, node)) if not self.newest: # one good source is enough @@ -1045,6 +1052,13 @@ class ResumingGetter(object): src_headers = dict( (k.lower(), v) for k, v in possible_source.getheaders()) + + # Save off the source etag so that, if we lose the connection + # and have to resume from a different node, we can be sure that + # we have the same object (replication) or a fragment archive + # from the same object (EC). Otherwise, if the cluster has two + # versions of the same object, we might end up switching between + # old and new mid-stream and giving garbage to the client. self.used_source_etag = src_headers.get( 'x-object-sysmeta-ec-etag', src_headers.get('etag', '')).strip('"') @@ -1113,6 +1127,99 @@ class GetOrHeadHandler(ResumingGetter): return res +class NodeIter(object): + """ + Yields nodes for a ring partition, skipping over error + limited nodes and stopping at the configurable number of nodes. If a + node yielded subsequently gets error limited, an extra node will be + yielded to take its place. + + Note that if you're going to iterate over this concurrently from + multiple greenthreads, you'll want to use a + swift.common.utils.GreenthreadSafeIterator to serialize access. + Otherwise, you may get ValueErrors from concurrent access. (You also + may not, depending on how logging is configured, the vagaries of + socket IO and eventlet, and the phase of the moon.) + + :param app: a proxy app + :param ring: ring to get yield nodes from + :param partition: ring partition to yield nodes for + :param node_iter: optional iterable of nodes to try. Useful if you + want to filter or reorder the nodes. + """ + + def __init__(self, app, ring, partition, node_iter=None): + self.app = app + self.ring = ring + self.partition = partition + + part_nodes = ring.get_part_nodes(partition) + if node_iter is None: + node_iter = itertools.chain( + part_nodes, ring.get_more_nodes(partition)) + num_primary_nodes = len(part_nodes) + self.nodes_left = self.app.request_node_count(num_primary_nodes) + self.expected_handoffs = self.nodes_left - num_primary_nodes + + # Use of list() here forcibly yanks the first N nodes (the primary + # nodes) from node_iter, so the rest of its values are handoffs. + self.primary_nodes = self.app.sort_nodes( + list(itertools.islice(node_iter, num_primary_nodes))) + self.handoff_iter = node_iter + + def __iter__(self): + self._node_iter = self._node_gen() + return self + + def log_handoffs(self, handoffs): + """ + Log handoff requests if handoff logging is enabled and the + handoff was not expected. + + We only log handoffs when we've pushed the handoff count further + than we would normally have expected under normal circumstances, + that is (request_node_count - num_primaries), when handoffs goes + higher than that it means one of the primaries must have been + skipped because of error limiting before we consumed all of our + nodes_left. + """ + if not self.app.log_handoffs: + return + extra_handoffs = handoffs - self.expected_handoffs + if extra_handoffs > 0: + self.app.logger.increment('handoff_count') + self.app.logger.warning( + 'Handoff requested (%d)' % handoffs) + if (extra_handoffs == len(self.primary_nodes)): + # all the primaries were skipped, and handoffs didn't help + self.app.logger.increment('handoff_all_count') + + def _node_gen(self): + for node in self.primary_nodes: + if not self.app.error_limited(node): + yield node + if not self.app.error_limited(node): + self.nodes_left -= 1 + if self.nodes_left <= 0: + return + handoffs = 0 + for node in self.handoff_iter: + if not self.app.error_limited(node): + handoffs += 1 + self.log_handoffs(handoffs) + yield node + if not self.app.error_limited(node): + self.nodes_left -= 1 + if self.nodes_left <= 0: + return + + def next(self): + return next(self._node_iter) + + def __next__(self): + return self.next() + + class Controller(object): """Base WSGI controller class for the proxy""" server_type = 'Base' diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py index 349f182159..61ff11763a 100644 --- a/swift/proxy/controllers/obj.py +++ b/swift/proxy/controllers/obj.py @@ -51,13 +51,12 @@ from swift.common.constraints import check_metadata, check_object_creation, \ check_account_format from swift.common import constraints from swift.common.exceptions import ChunkReadTimeout, \ - ChunkWriteTimeout, ConnectionTimeout, ListingIterNotFound, \ - ListingIterNotAuthorized, ListingIterError, ResponseTimeout, \ + ChunkWriteTimeout, ConnectionTimeout, ResponseTimeout, \ InsufficientStorage, FooterNotSupported, MultiphasePUTNotSupported, \ PutterConnectError from swift.common.http import ( - is_success, is_client_error, is_server_error, HTTP_CONTINUE, HTTP_CREATED, - HTTP_MULTIPLE_CHOICES, HTTP_NOT_FOUND, HTTP_INTERNAL_SERVER_ERROR, + is_success, is_server_error, HTTP_CONTINUE, HTTP_CREATED, + HTTP_MULTIPLE_CHOICES, HTTP_INTERNAL_SERVER_ERROR, HTTP_SERVICE_UNAVAILABLE, HTTP_INSUFFICIENT_STORAGE, HTTP_PRECONDITION_FAILED, HTTP_CONFLICT, is_informational) from swift.common.storage_policy import (POLICIES, REPL_POLICY, EC_POLICY, @@ -139,46 +138,6 @@ class BaseObjectController(Controller): self.container_name = unquote(container_name) self.object_name = unquote(object_name) - def _listing_iter(self, lcontainer, lprefix, env): - for page in self._listing_pages_iter(lcontainer, lprefix, env): - for item in page: - yield item - - def _listing_pages_iter(self, lcontainer, lprefix, env): - lpartition = self.app.container_ring.get_part( - self.account_name, lcontainer) - marker = '' - while True: - lreq = Request.blank('i will be overridden by env', environ=env) - # Don't quote PATH_INFO, by WSGI spec - lreq.environ['PATH_INFO'] = \ - '/v1/%s/%s' % (self.account_name, lcontainer) - lreq.environ['REQUEST_METHOD'] = 'GET' - lreq.environ['QUERY_STRING'] = \ - 'format=json&prefix=%s&marker=%s' % (quote(lprefix), - quote(marker)) - container_node_iter = self.app.iter_nodes(self.app.container_ring, - lpartition) - lresp = self.GETorHEAD_base( - lreq, _('Container'), container_node_iter, lpartition, - lreq.swift_entity_path) - if 'swift.authorize' in env: - lreq.acl = lresp.headers.get('x-container-read') - aresp = env['swift.authorize'](lreq) - if aresp: - raise ListingIterNotAuthorized(aresp) - if lresp.status_int == HTTP_NOT_FOUND: - raise ListingIterNotFound() - elif not is_success(lresp.status_int): - raise ListingIterError() - if not lresp.body: - break - sublisting = json.loads(lresp.body) - if not sublisting: - break - marker = sublisting[-1]['name'].encode('utf-8') - yield sublisting - def iter_nodes_local_first(self, ring, partition): """ Yields nodes for a ring partition. @@ -548,71 +507,6 @@ class BaseObjectController(Controller): # until copy request handling moves to middleware return None, req, data_source, update_response - def _handle_object_versions(self, req): - """ - This method handles versionining of objects in containers that - have the feature enabled. - - When a new PUT request is sent, the proxy checks for previous versions - of that same object name. If found, it is copied to a different - container and the new version is stored in its place. - - This method was added as part of the PUT method refactoring and the - functionality is expected to be moved to middleware - """ - container_info = self.container_info( - self.account_name, self.container_name, req) - policy_index = req.headers.get('X-Backend-Storage-Policy-Index', - container_info['storage_policy']) - obj_ring = self.app.get_object_ring(policy_index) - partition, nodes = obj_ring.get_nodes( - self.account_name, self.container_name, self.object_name) - object_versions = container_info['versions'] - - # do a HEAD request for checking object versions - if object_versions and not req.environ.get('swift_versioned_copy'): - # make sure proxy-server uses the right policy index - _headers = {'X-Backend-Storage-Policy-Index': policy_index, - 'X-Newest': 'True'} - hreq = Request.blank(req.path_info, headers=_headers, - environ={'REQUEST_METHOD': 'HEAD'}) - hnode_iter = self.app.iter_nodes(obj_ring, partition) - hresp = self.GETorHEAD_base( - hreq, _('Object'), hnode_iter, partition, - hreq.swift_entity_path) - - is_manifest = 'X-Object-Manifest' in req.headers or \ - 'X-Object-Manifest' in hresp.headers - if hresp.status_int != HTTP_NOT_FOUND and not is_manifest: - # This is a version manifest and needs to be handled - # differently. First copy the existing data to a new object, - # then write the data from this request to the version manifest - # object. - lcontainer = object_versions.split('/')[0] - prefix_len = '%03x' % len(self.object_name) - lprefix = prefix_len + self.object_name + '/' - ts_source = hresp.environ.get('swift_x_timestamp') - if ts_source is None: - ts_source = time.mktime(time.strptime( - hresp.headers['last-modified'], - '%a, %d %b %Y %H:%M:%S GMT')) - new_ts = Timestamp(ts_source).internal - vers_obj_name = lprefix + new_ts - copy_headers = { - 'Destination': '%s/%s' % (lcontainer, vers_obj_name)} - copy_environ = {'REQUEST_METHOD': 'COPY', - 'swift_versioned_copy': True - } - copy_req = Request.blank(req.path_info, headers=copy_headers, - environ=copy_environ) - copy_resp = self.COPY(copy_req) - if is_client_error(copy_resp.status_int): - # missing container or bad permissions - raise HTTPPreconditionFailed(request=req) - elif not is_success(copy_resp.status_int): - # could not copy the data, bail - raise HTTPServiceUnavailable(request=req) - def _update_content_type(self, req): # Sometimes the 'content-type' header exists, but is set to None. req.content_type_manually_set = True @@ -657,13 +551,17 @@ class BaseObjectController(Controller): if any(conn for conn in conns if conn.resp and conn.resp.status == HTTP_CONFLICT): - timestamps = [HeaderKeyDict(conn.resp.getheaders()).get( - 'X-Backend-Timestamp') for conn in conns if conn.resp] + status_times = ['%(status)s (%(timestamp)s)' % { + 'status': conn.resp.status, + 'timestamp': HeaderKeyDict( + conn.resp.getheaders()).get( + 'X-Backend-Timestamp', 'unknown') + } for conn in conns if conn.resp] self.app.logger.debug( _('Object PUT returning 202 for 409: ' '%(req_timestamp)s <= %(timestamps)r'), {'req_timestamp': req.timestamp.internal, - 'timestamps': ', '.join(timestamps)}) + 'timestamps': ', '.join(status_times)}) raise HTTPAccepted(request=req) self._check_min_conn(req, conns, min_conns) @@ -725,7 +623,7 @@ class BaseObjectController(Controller): """ This method is responsible for establishing connection with storage nodes and sending the data to each one of those - nodes. The process of transfering data is specific to each + nodes. The process of transferring data is specific to each Storage Policy, thus it is required for each policy specific ObjectController to provide their own implementation of this method. @@ -815,9 +713,6 @@ class BaseObjectController(Controller): self._update_x_timestamp(req) - # check if versioning is enabled and handle copying previous version - self._handle_object_versions(req) - # check if request is a COPY of an existing object source_header = req.headers.get('X-Copy-From') if source_header: @@ -830,7 +725,7 @@ class BaseObjectController(Controller): data_source = iter(lambda: reader(self.app.client_chunk_size), '') update_response = lambda req, resp: resp - # check if object is set to be automaticaly deleted (i.e. expired) + # check if object is set to be automatically deleted (i.e. expired) req, delete_at_container, delete_at_part, \ delete_at_nodes = self._config_obj_expiration(req) @@ -861,86 +756,10 @@ class BaseObjectController(Controller): containers = container_info['nodes'] req.acl = container_info['write_acl'] req.environ['swift_sync_key'] = container_info['sync_key'] - object_versions = container_info['versions'] if 'swift.authorize' in req.environ: aresp = req.environ['swift.authorize'](req) if aresp: return aresp - if object_versions: - # this is a version manifest and needs to be handled differently - object_versions = unquote(object_versions) - lcontainer = object_versions.split('/')[0] - prefix_len = '%03x' % len(self.object_name) - lprefix = prefix_len + self.object_name + '/' - item_list = [] - try: - for _item in self._listing_iter(lcontainer, lprefix, - req.environ): - item_list.append(_item) - except ListingIterNotFound: - # no worries, last_item is None - pass - except ListingIterNotAuthorized as err: - return err.aresp - except ListingIterError: - return HTTPServerError(request=req) - - while len(item_list) > 0: - previous_version = item_list.pop() - # there are older versions so copy the previous version to the - # current object and delete the previous version - orig_container = self.container_name - orig_obj = self.object_name - self.container_name = lcontainer - self.object_name = previous_version['name'].encode('utf-8') - - copy_path = '/v1/' + self.account_name + '/' + \ - self.container_name + '/' + self.object_name - - copy_headers = {'X-Newest': 'True', - 'Destination': orig_container + '/' + orig_obj - } - copy_environ = {'REQUEST_METHOD': 'COPY', - 'swift_versioned_copy': True - } - creq = Request.blank(copy_path, headers=copy_headers, - environ=copy_environ) - copy_resp = self.COPY(creq) - if copy_resp.status_int == HTTP_NOT_FOUND: - # the version isn't there so we'll try with previous - self.container_name = orig_container - self.object_name = orig_obj - continue - if is_client_error(copy_resp.status_int): - # some user error, maybe permissions - return HTTPPreconditionFailed(request=req) - elif not is_success(copy_resp.status_int): - # could not copy the data, bail - return HTTPServiceUnavailable(request=req) - # reset these because the COPY changed them - self.container_name = lcontainer - self.object_name = previous_version['name'].encode('utf-8') - new_del_req = Request.blank(copy_path, environ=req.environ) - container_info = self.container_info( - self.account_name, self.container_name, req) - policy_idx = container_info['storage_policy'] - obj_ring = self.app.get_object_ring(policy_idx) - # pass the policy index to storage nodes via req header - new_del_req.headers['X-Backend-Storage-Policy-Index'] = \ - policy_idx - container_partition = container_info['partition'] - containers = container_info['nodes'] - new_del_req.acl = container_info['write_acl'] - new_del_req.path_info = copy_path - req = new_del_req - # remove 'X-If-Delete-At', since it is not for the older copy - if 'X-If-Delete-At' in req.headers: - del req.headers['X-If-Delete-At'] - if 'swift.authorize' in req.environ: - aresp = req.environ['swift.authorize'](req) - if aresp: - return aresp - break if not containers: return HTTPNotFound(request=req) partition, nodes = obj_ring.get_nodes( @@ -999,6 +818,13 @@ class BaseObjectController(Controller): self.object_name = dest_object # re-write the existing request as a PUT instead of creating a new one # since this one is already attached to the posthooklogger + # TODO: Swift now has proxy-logging middleware instead of + # posthooklogger used in before. i.e. we don't have to + # keep the code depends on evnetlet.posthooks sequence, IMHO. + # However, creating a new sub request might + # cause the possibility to hide some bugs behindes the request + # so that we should discuss whichi is suitable (new-sub-request + # vs re-write-existing-request) for Swift. [kota_] req.method = 'PUT' req.path_info = '/v1/%s/%s/%s' % \ (dest_account, dest_container, dest_object) @@ -1471,6 +1297,8 @@ class ECAppIter(object): # 100-byte object with 1024-byte segments. That's not # what we're dealing with here, though. if client_asked_for_range and not satisfiable: + req.environ[ + 'swift.non_client_disconnect'] = True raise HTTPRequestedRangeNotSatisfiable( request=req, headers=resp_headers) self.learned_content_type = content_type @@ -2132,44 +1960,43 @@ class ECObjectController(BaseObjectController): orig_range = req.range range_specs = self._convert_range(req, policy) - node_iter = GreenthreadSafeIterator(node_iter) - num_gets = policy.ec_ndata - with ContextPool(num_gets) as pool: + safe_iter = GreenthreadSafeIterator(node_iter) + with ContextPool(policy.ec_ndata) as pool: pile = GreenAsyncPile(pool) - for _junk in range(num_gets): + for _junk in range(policy.ec_ndata): pile.spawn(self._fragment_GET_request, - req, node_iter, partition, + req, safe_iter, partition, policy) - gets = list(pile) - good_gets = [] bad_gets = [] - for get, parts_iter in gets: + etag_buckets = collections.defaultdict(list) + best_etag = None + for get, parts_iter in pile: if is_success(get.last_status): - good_gets.append((get, parts_iter)) + etag = HeaderKeyDict( + get.last_headers)['X-Object-Sysmeta-Ec-Etag'] + etag_buckets[etag].append((get, parts_iter)) + if etag != best_etag and ( + len(etag_buckets[etag]) > + len(etag_buckets[best_etag])): + best_etag = etag else: bad_gets.append((get, parts_iter)) + matching_response_count = max( + len(etag_buckets[best_etag]), len(bad_gets)) + if (policy.ec_ndata - matching_response_count > + pile._pending) and node_iter.nodes_left > 0: + # we need more matching responses to reach ec_ndata + # than we have pending gets, as long as we still have + # nodes in node_iter we can spawn another + pile.spawn(self._fragment_GET_request, req, + safe_iter, partition, policy) req.range = orig_range - if len(good_gets) == num_gets: - # If these aren't all for the same object, then error out so - # at least the client doesn't get garbage. We can do a lot - # better here with more work, but this'll work for now. - found_obj_etags = set( - HeaderKeyDict( - getter.last_headers)['X-Object-Sysmeta-Ec-Etag'] - for getter, _junk in good_gets) - if len(found_obj_etags) > 1: - self.app.logger.debug( - "Returning 503 for %s; found too many etags (%s)", - req.path, - ", ".join(found_obj_etags)) - return HTTPServiceUnavailable(request=req) - - # we found enough pieces to decode the object, so now let's - # decode the object + if len(etag_buckets[best_etag]) >= policy.ec_ndata: + # headers can come from any of the getters resp_headers = HeaderKeyDict( - good_gets[0][0].source_headers[-1]) + etag_buckets[best_etag][0][0].source_headers[-1]) resp_headers.pop('Content-Range', None) eccl = resp_headers.get('X-Object-Sysmeta-Ec-Content-Length') obj_length = int(eccl) if eccl is not None else None @@ -2177,11 +2004,10 @@ class ECObjectController(BaseObjectController): # This is only true if we didn't get a 206 response, but # that's the only time this is used anyway. fa_length = int(resp_headers['Content-Length']) - app_iter = ECAppIter( req.swift_entity_path, policy, - [iterator for getter, iterator in good_gets], + [iterator for getter, iterator in etag_buckets[best_etag]], range_specs, fa_length, obj_length, self.app.logger) resp = Response( @@ -2203,20 +2029,19 @@ class ECObjectController(BaseObjectController): resp = self.best_response( req, statuses, reasons, bodies, 'Object', headers=headers) - - self._fix_response_headers(resp) + self._fix_response(resp) return resp - def _fix_response_headers(self, resp): + def _fix_response(self, resp): # EC fragment archives each have different bytes, hence different # etags. However, they all have the original object's etag stored in # sysmeta, so we copy that here so the client gets it. - resp.headers['Etag'] = resp.headers.get( - 'X-Object-Sysmeta-Ec-Etag') - resp.headers['Content-Length'] = resp.headers.get( - 'X-Object-Sysmeta-Ec-Content-Length') - - return resp + if is_success(resp.status_int): + resp.headers['Etag'] = resp.headers.get( + 'X-Object-Sysmeta-Ec-Etag') + resp.headers['Content-Length'] = resp.headers.get( + 'X-Object-Sysmeta-Ec-Content-Length') + resp.fix_conditional_response() def _connect_put_node(self, node_iter, part, path, headers, logger_thread_locals): @@ -2551,10 +2376,9 @@ class ECObjectController(BaseObjectController): need_quorum = False # The .durable file will propagate in a replicated fashion; if # one exists, the reconstructor will spread it around. Thus, we - # don't require as many .durable files to be successfully - # written as we do fragment archives in order to call the PUT a - # success. - min_conns = 2 + # require "parity + 1" .durable files to be successfully written + # as we do fragment archives in order to call the PUT a success. + min_conns = policy.ec_nparity + 1 putters = [p for p in putters if not p.failed] # ignore response etags, and quorum boolean statuses, reasons, bodies, _etags, _quorum = \ diff --git a/swift/proxy/server.py b/swift/proxy/server.py index b631542f60..b49181dc37 100644 --- a/swift/proxy/server.py +++ b/swift/proxy/server.py @@ -19,7 +19,6 @@ import socket from swift import gettext_ as _ from random import shuffle from time import time -import itertools import functools import sys @@ -36,7 +35,7 @@ from swift.common.utils import cache_from_env, get_logger, \ from swift.common.constraints import check_utf8, valid_api_version from swift.proxy.controllers import AccountController, ContainerController, \ ObjectControllerRouter, InfoController -from swift.proxy.controllers.base import get_container_info +from swift.proxy.controllers.base import get_container_info, NodeIter from swift.common.swob import HTTPBadRequest, HTTPForbidden, \ HTTPMethodNotAllowed, HTTPNotFound, HTTPPreconditionFailed, \ HTTPServerError, HTTPException, Request, HTTPServiceUnavailable @@ -64,6 +63,9 @@ required_filters = [ if pipe.startswith('catch_errors') else [])}, {'name': 'dlo', 'after_fn': lambda _junk: [ + 'staticweb', 'tempauth', 'keystoneauth', + 'catch_errors', 'gatekeeper', 'proxy_logging']}, + {'name': 'versioned_writes', 'after_fn': lambda _junk: [ 'staticweb', 'tempauth', 'keystoneauth', 'catch_errors', 'gatekeeper', 'proxy_logging']}] @@ -378,6 +380,7 @@ class Application(object): allowed_methods = getattr(controller, 'allowed_methods', set()) return HTTPMethodNotAllowed( request=req, headers={'Allow': ', '.join(allowed_methods)}) + old_authorize = None if 'swift.authorize' in req.environ: # We call authorize before the handler, always. If authorized, # we remove the swift.authorize hook so isn't ever called @@ -388,7 +391,7 @@ class Application(object): if not resp and not req.headers.get('X-Copy-From-Account') \ and not req.headers.get('Destination-Account'): # No resp means authorized, no delayed recheck required. - del req.environ['swift.authorize'] + old_authorize = req.environ['swift.authorize'] else: # Response indicates denial, but we might delay the denial # and recheck later. If not delayed, return the error now. @@ -398,7 +401,13 @@ class Application(object): # gets mutated during handling. This way logging can display the # method the client actually sent. req.environ['swift.orig_req_method'] = req.method - return handler(req) + try: + if old_authorize: + req.environ.pop('swift.authorize', None) + return handler(req) + finally: + if old_authorize: + req.environ['swift.authorize'] = old_authorize except HTTPException as error_response: return error_response except (Exception, Timeout): @@ -497,60 +506,7 @@ class Application(object): 'port': node['port'], 'device': node['device']}) def iter_nodes(self, ring, partition, node_iter=None): - """ - Yields nodes for a ring partition, skipping over error - limited nodes and stopping at the configurable number of nodes. If a - node yielded subsequently gets error limited, an extra node will be - yielded to take its place. - - Note that if you're going to iterate over this concurrently from - multiple greenthreads, you'll want to use a - swift.common.utils.GreenthreadSafeIterator to serialize access. - Otherwise, you may get ValueErrors from concurrent access. (You also - may not, depending on how logging is configured, the vagaries of - socket IO and eventlet, and the phase of the moon.) - - :param ring: ring to get yield nodes from - :param partition: ring partition to yield nodes for - :param node_iter: optional iterable of nodes to try. Useful if you - want to filter or reorder the nodes. - """ - part_nodes = ring.get_part_nodes(partition) - if node_iter is None: - node_iter = itertools.chain(part_nodes, - ring.get_more_nodes(partition)) - num_primary_nodes = len(part_nodes) - - # Use of list() here forcibly yanks the first N nodes (the primary - # nodes) from node_iter, so the rest of its values are handoffs. - primary_nodes = self.sort_nodes( - list(itertools.islice(node_iter, num_primary_nodes))) - handoff_nodes = node_iter - nodes_left = self.request_node_count(len(primary_nodes)) - - log_handoffs_threshold = nodes_left - len(primary_nodes) - for node in primary_nodes: - if not self.error_limited(node): - yield node - if not self.error_limited(node): - nodes_left -= 1 - if nodes_left <= 0: - return - handoffs = 0 - for node in handoff_nodes: - if not self.error_limited(node): - handoffs += 1 - if self.log_handoffs and handoffs > log_handoffs_threshold: - self.logger.increment('handoff_count') - self.logger.warning( - 'Handoff requested (%d)' % handoffs) - if handoffs - log_handoffs_threshold == len(primary_nodes): - self.logger.increment('handoff_all_count') - yield node - if not self.error_limited(node): - nodes_left -= 1 - if nodes_left <= 0: - return + return NodeIter(self, ring, partition, node_iter=node_iter) def exception_occurred(self, node, typ, additional_info, **kwargs): @@ -569,11 +525,11 @@ class Application(object): else: log = self.logger.exception log(_('ERROR with %(type)s server %(ip)s:%(port)s/%(device)s' - ' re: %(info)s'), { - 'type': typ, 'ip': node['ip'], 'port': - node['port'], 'device': node['device'], - 'info': additional_info - }, **kwargs) + ' re: %(info)s'), + {'type': typ, 'ip': node['ip'], + 'port': node['port'], 'device': node['device'], + 'info': additional_info}, + **kwargs) def modify_wsgi_pipeline(self, pipe): """ diff --git a/test-requirements.txt b/test-requirements.txt index b3f7eed5be..73ca508fe3 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -3,7 +3,7 @@ # process, which may cause wedges in the gate later. # Hacking already pins down pep8, pyflakes and flake8 -hacking>=0.8.0,<0.9 +hacking>=0.10.0,<0.11 coverage nose nosexcover @@ -13,3 +13,6 @@ sphinx>=1.1.2,<1.2 mock>=1.0 python-swiftclient python-keystoneclient>=1.3.0 + +# Security checks +bandit>=0.10.1 diff --git a/test/__init__.py b/test/__init__.py index 3bd25b1407..b3ebefe70c 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -15,7 +15,7 @@ # See http://code.google.com/p/python-nose/issues/detail?id=373 # The code below enables nosetests to work with i18n _() blocks - +from __future__ import print_function import sys import os try: @@ -63,15 +63,12 @@ def get_config(section_name=None, defaults=None): config = readconf(config_file, section_name) except SystemExit: if not os.path.exists(config_file): - print >>sys.stderr, \ - 'Unable to read test config %s - file not found' \ - % config_file + print('Unable to read test config %s - file not found' + % config_file, file=sys.stderr) elif not os.access(config_file, os.R_OK): - print >>sys.stderr, \ - 'Unable to read test config %s - permission denied' \ - % config_file + print('Unable to read test config %s - permission denied' + % config_file, file=sys.stderr) else: - print >>sys.stderr, \ - 'Unable to read test config %s - section %s not found' \ - % (config_file, section_name) + print('Unable to read test config %s - section %s not found' + % (config_file, section_name), file=sys.stderr) return config diff --git a/test/functional/__init__.py b/test/functional/__init__.py index 580de56c81..f07d162691 100644 --- a/test/functional/__init__.py +++ b/test/functional/__init__.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import httplib +from __future__ import print_function import mock import os import sys @@ -24,20 +24,24 @@ import eventlet import eventlet.debug import functools import random -from ConfigParser import ConfigParser, NoSectionError + from time import time, sleep -from httplib import HTTPException from urlparse import urlparse from nose import SkipTest from contextlib import closing from gzip import GzipFile from shutil import rmtree from tempfile import mkdtemp + +from six.moves.configparser import ConfigParser, NoSectionError +from six.moves import http_client +from six.moves.http_client import HTTPException + from swift.common.middleware.memcache import MemcacheMiddleware from swift.common.storage_policy import parse_storage_policies, PolicyError from test import get_config -from test.functional.swift_test_client import Account, Connection, \ +from test.functional.swift_test_client import Account, Connection, Container, \ ResponseError # This has the side effect of mocking out the xattr module so that unit tests # (and in this case, when in-process functional tests are called for) can run @@ -47,13 +51,13 @@ from test.unit import debug_logger, FakeMemcache from swift.common import constraints, utils, ring, storage_policy from swift.common.ring import Ring from swift.common.wsgi import monkey_patch_mimetools, loadapp -from swift.common.utils import config_true_value +from swift.common.utils import config_true_value, split_path from swift.account import server as account_server from swift.container import server as container_server from swift.obj import server as object_server, mem_server as mem_object_server import swift.proxy.controllers.obj -httplib._MAXHEADERS = constraints.MAX_HEADER_COUNT +http_client._MAXHEADERS = constraints.MAX_HEADER_COUNT DEBUG = True # In order to get the proper blocking behavior of sockets without using @@ -106,6 +110,7 @@ orig_swift_conf_name = None in_process = False _testdir = _test_servers = _test_coros = None +policy_specified = None class FakeMemcacheMiddleware(MemcacheMiddleware): @@ -124,7 +129,7 @@ class InProcessException(BaseException): def _info(msg): - print >> sys.stderr, msg + print(msg, file=sys.stderr) def _debug(msg): @@ -210,7 +215,6 @@ def _in_process_setup_ring(swift_conf, conf_src_dir, testdir): for policy in policies: conf.remove_section(sp_prefix + str(policy.idx)) - policy_specified = os.environ.get('SWIFT_TEST_POLICY') if policy_specified: policy_to_test = policies.get_by_name(policy_specified) if policy_to_test is None: @@ -498,7 +502,7 @@ def get_cluster_info(): # Most likely the swift cluster has "expose_info = false" set # in its proxy-server.conf file, so we'll just do the best we # can. - print >>sys.stderr, "** Swift Cluster not exposing /info **" + print("** Swift Cluster not exposing /info **", file=sys.stderr) # Finally, we'll allow any constraint present in the swift-constraints # section of test.conf to override everything. Note that only those @@ -510,8 +514,8 @@ def get_cluster_info(): except KeyError: pass except ValueError: - print >>sys.stderr, "Invalid constraint value: %s = %s" % ( - k, test_constraints[k]) + print("Invalid constraint value: %s = %s" % ( + k, test_constraints[k]), file=sys.stderr) eff_constraints.update(test_constraints) # Just make it look like these constraints were loaded from a /info call, @@ -521,6 +525,9 @@ def get_cluster_info(): def setup_package(): + + global policy_specified + policy_specified = os.environ.get('SWIFT_TEST_POLICY') in_process_env = os.environ.get('SWIFT_TEST_IN_PROCESS') if in_process_env is not None: use_in_process = utils.config_true_value(in_process_env) @@ -558,8 +565,8 @@ def setup_package(): in_process_setup(the_object_server=( mem_object_server if in_mem_obj else object_server)) except InProcessException as exc: - print >> sys.stderr, ('Exception during in-process setup: %s' - % str(exc)) + print(('Exception during in-process setup: %s' + % str(exc)), file=sys.stderr) raise global web_front_end @@ -668,20 +675,19 @@ def setup_package(): global skip skip = not all([swift_test_auth, swift_test_user[0], swift_test_key[0]]) if skip: - print >>sys.stderr, 'SKIPPING FUNCTIONAL TESTS DUE TO NO CONFIG' + print('SKIPPING FUNCTIONAL TESTS DUE TO NO CONFIG', file=sys.stderr) global skip2 skip2 = not all([not skip, swift_test_user[1], swift_test_key[1]]) if not skip and skip2: - print >>sys.stderr, \ - 'SKIPPING SECOND ACCOUNT FUNCTIONAL TESTS' \ - ' DUE TO NO CONFIG FOR THEM' + print('SKIPPING SECOND ACCOUNT FUNCTIONAL TESTS ' + 'DUE TO NO CONFIG FOR THEM', file=sys.stderr) global skip3 skip3 = not all([not skip, swift_test_user[2], swift_test_key[2]]) if not skip and skip3: - print >>sys.stderr, \ - 'SKIPPING THIRD ACCOUNT FUNCTIONAL TESTS DUE TO NO CONFIG FOR THEM' + print('SKIPPING THIRD ACCOUNT FUNCTIONAL TESTS' + 'DUE TO NO CONFIG FOR THEM', file=sys.stderr) global skip_if_not_v3 skip_if_not_v3 = (swift_test_auth_version != '3' @@ -689,16 +695,33 @@ def setup_package(): swift_test_user[3], swift_test_key[3]])) if not skip and skip_if_not_v3: - print >>sys.stderr, \ - 'SKIPPING FUNCTIONAL TESTS SPECIFIC TO AUTH VERSION 3' + print('SKIPPING FUNCTIONAL TESTS SPECIFIC TO AUTH VERSION 3', + file=sys.stderr) global skip_service_tokens skip_service_tokens = not all([not skip, swift_test_user[4], swift_test_key[4], swift_test_tenant[4], swift_test_service_prefix]) if not skip and skip_service_tokens: - print >>sys.stderr, \ - 'SKIPPING FUNCTIONAL TESTS SPECIFIC TO SERVICE TOKENS' + print( + 'SKIPPING FUNCTIONAL TESTS SPECIFIC TO SERVICE TOKENS', + file=sys.stderr) + + if policy_specified: + policies = FunctionalStoragePolicyCollection.from_info() + for p in policies: + # policy names are case-insensitive + if policy_specified.lower() == p['name'].lower(): + _info('Using specified policy %s' % policy_specified) + FunctionalStoragePolicyCollection.policy_specified = p + Container.policy_specified = policy_specified + break + else: + _info( + 'SKIPPING FUNCTIONAL TESTS: Failed to find specified policy %s' + % policy_specified) + raise Exception('Failed to find specified policy %s' + % policy_specified) get_cluster_info() @@ -747,8 +770,24 @@ conn = [None, None, None, None, None] def connection(url): if has_insecure: - return http_connection(url, insecure=insecure) - return http_connection(url) + parsed_url, http_conn = http_connection(url, insecure=insecure) + else: + parsed_url, http_conn = http_connection(url) + + orig_request = http_conn.request + + # Add the policy header if policy_specified is set + def request_with_policy(method, url, body=None, headers={}): + version, account, container, obj = split_path(url, 1, 4, True) + if policy_specified and method == 'PUT' and container and not obj \ + and 'X-Storage-Policy' not in headers: + headers['X-Storage-Policy'] = policy_specified + + return orig_request(method, url, body, headers) + + http_conn.request = request_with_policy + + return parsed_url, http_conn def get_url_token(user_index, os_options): @@ -899,6 +938,9 @@ def requires_acls(f): class FunctionalStoragePolicyCollection(object): + # policy_specified is set in __init__.py when tests are being set up. + policy_specified = None + def __init__(self, policies): self._all = policies self.default = None @@ -940,7 +982,12 @@ class FunctionalStoragePolicyCollection(object): p.get(k) != v for k, v in kwargs.items())]) def select(self): - return random.choice(self) + # check that a policy was specified and that it is available + # in the current list (i.e., hasn't been excluded of the current list) + if self.policy_specified and self.policy_specified in self: + return self.policy_specified + else: + return random.choice(self) def requires_policies(f): diff --git a/test/functional/swift_test_client.py b/test/functional/swift_test_client.py index 442fa95966..0148ba7b33 100644 --- a/test/functional/swift_test_client.py +++ b/test/functional/swift_test_client.py @@ -14,19 +14,18 @@ # limitations under the License. import hashlib -import httplib import os import random import socket -import StringIO import time import urllib import simplejson as json - from nose import SkipTest from xml.dom import minidom +import six +from six.moves import http_client from swiftclient import get_auth from swift.common import constraints @@ -34,7 +33,7 @@ from swift.common.utils import config_true_value from test import safe_repr -httplib._MAXHEADERS = constraints.MAX_HEADER_COUNT +http_client._MAXHEADERS = constraints.MAX_HEADER_COUNT class AuthenticationFailed(Exception): @@ -166,10 +165,10 @@ class Connection(object): x = storage_url.split('/') if x[0] == 'http:': - self.conn_class = httplib.HTTPConnection + self.conn_class = http_client.HTTPConnection self.storage_port = 80 elif x[0] == 'https:': - self.conn_class = httplib.HTTPSConnection + self.conn_class = http_client.HTTPSConnection self.storage_port = 443 else: raise ValueError('unexpected protocol %s' % (x[0])) @@ -209,7 +208,7 @@ class Connection(object): def http_connect(self): self.connection = self.conn_class(self.storage_host, port=self.storage_port) - #self.connection.set_debuglevel(3) + # self.connection.set_debuglevel(3) def make_path(self, path=None, cfg=None): if path is None: @@ -237,6 +236,9 @@ class Connection(object): if not cfg.get('no_auth_token'): headers['X-Auth-Token'] = self.storage_token + if cfg.get('use_token'): + headers['X-Auth-Token'] = cfg.get('use_token') + if isinstance(hdrs, dict): headers.update(hdrs) return headers @@ -283,7 +285,7 @@ class Connection(object): try: self.response = try_request() - except httplib.HTTPException as e: + except http_client.HTTPException as e: fail_messages.append(safe_repr(e)) continue @@ -335,7 +337,7 @@ class Connection(object): self.connection = self.conn_class(self.storage_host, port=self.storage_port) - #self.connection.set_debuglevel(3) + # self.connection.set_debuglevel(3) self.connection.putrequest('PUT', path) for key, value in headers.items(): self.connection.putheader(key, value) @@ -488,6 +490,9 @@ class Account(Base): class Container(Base): + # policy_specified is set in __init__.py when tests are being set up. + policy_specified = None + def __init__(self, conn, account, name): self.conn = conn self.account = str(account) @@ -500,9 +505,23 @@ class Container(Base): parms = {} if cfg is None: cfg = {} + if self.policy_specified and 'X-Storage-Policy' not in hdrs: + hdrs['X-Storage-Policy'] = self.policy_specified return self.conn.make_request('PUT', self.path, hdrs=hdrs, parms=parms, cfg=cfg) in (201, 202) + def update_metadata(self, hdrs=None, cfg=None): + if hdrs is None: + hdrs = {} + if cfg is None: + cfg = {} + + self.conn.make_request('POST', self.path, hdrs=hdrs, cfg=cfg) + if not 200 <= self.conn.response.status <= 299: + raise ResponseError(self.conn.response, 'POST', + self.conn.make_path(self.path)) + return True + def delete(self, hdrs=None, parms=None): if hdrs is None: hdrs = {} @@ -633,6 +652,9 @@ class File(Base): else: headers['Content-Length'] = 0 + if cfg.get('use_token'): + headers['X-Auth-Token'] = cfg.get('use_token') + if cfg.get('no_content_type'): pass elif self.content_type: @@ -650,7 +672,7 @@ class File(Base): block_size = 4096 if isinstance(data, str): - data = StringIO.StringIO(data) + data = six.StringIO(data) checksum = hashlib.md5() buff = data.read(block_size) @@ -707,13 +729,13 @@ class File(Base): return self.conn.make_request('COPY', self.path, hdrs=headers, parms=parms) == 201 - def delete(self, hdrs=None, parms=None): + def delete(self, hdrs=None, parms=None, cfg=None): if hdrs is None: hdrs = {} if parms is None: parms = {} if self.conn.make_request('DELETE', self.path, hdrs=hdrs, - parms=parms) != 204: + cfg=cfg, parms=parms) != 204: raise ResponseError(self.conn.response, 'DELETE', self.conn.make_path(self.path)) @@ -925,7 +947,7 @@ class File(Base): pass self.size = int(os.fstat(data.fileno())[6]) else: - data = StringIO.StringIO(data) + data = six.StringIO(data) self.size = data.len headers = self.make_headers(cfg=cfg) @@ -977,7 +999,7 @@ class File(Base): if not self.write(data, hdrs=hdrs, parms=parms, cfg=cfg): raise ResponseError(self.conn.response, 'PUT', self.conn.make_path(self.path)) - self.md5 = self.compute_md5sum(StringIO.StringIO(data)) + self.md5 = self.compute_md5sum(six.StringIO(data)) return data def write_random_return_resp(self, size=None, hdrs=None, parms=None, @@ -994,5 +1016,28 @@ class File(Base): return_resp=True) if not resp: raise ResponseError(self.conn.response) - self.md5 = self.compute_md5sum(StringIO.StringIO(data)) + self.md5 = self.compute_md5sum(six.StringIO(data)) return resp + + def post(self, hdrs=None, parms=None, cfg=None, return_resp=False): + if hdrs is None: + hdrs = {} + if parms is None: + parms = {} + if cfg is None: + cfg = {} + + headers = self.make_headers(cfg=cfg) + headers.update(hdrs) + + self.conn.make_request('POST', self.path, hdrs=headers, + parms=parms, cfg=cfg) + + if self.conn.response.status not in (201, 202): + raise ResponseError(self.conn.response, 'POST', + self.conn.make_path(self.path)) + + if return_resp: + return self.conn.response + + return True diff --git a/test/functional/test_account.py b/test/functional/test_account.py index b6c5abedc6..e952c0923b 100755 --- a/test/functional/test_account.py +++ b/test/functional/test_account.py @@ -89,22 +89,22 @@ class TestAccount(unittest.TestCase): self.assertEqual(resp.status, 204) resp = retry(head) resp.read() - self.assert_(resp.status in (200, 204), resp.status) + self.assertIn(resp.status, (200, 204)) self.assertEqual(resp.getheader('x-account-meta-test'), None) resp = retry(get) resp.read() - self.assert_(resp.status in (200, 204), resp.status) + self.assertIn(resp.status, (200, 204)) self.assertEqual(resp.getheader('x-account-meta-test'), None) resp = retry(post, 'Value') resp.read() self.assertEqual(resp.status, 204) resp = retry(head) resp.read() - self.assert_(resp.status in (200, 204), resp.status) + self.assertIn(resp.status, (200, 204)) self.assertEqual(resp.getheader('x-account-meta-test'), 'Value') resp = retry(get) resp.read() - self.assert_(resp.status in (200, 204), resp.status) + self.assertIn(resp.status, (200, 204)) self.assertEqual(resp.getheader('x-account-meta-test'), 'Value') def test_invalid_acls(self): @@ -190,7 +190,7 @@ class TestAccount(unittest.TestCase): # cannot read account resp = retry(get, use_account=3) resp.read() - self.assertEquals(resp.status, 403) + self.assertEqual(resp.status, 403) # grant read access acl_user = tf.swift_test_user[2] @@ -204,7 +204,7 @@ class TestAccount(unittest.TestCase): # read-only can read account headers resp = retry(get, use_account=3) resp.read() - self.assert_(resp.status in (200, 204)) + self.assertIn(resp.status, (200, 204)) # but not acls self.assertEqual(resp.getheader('X-Account-Access-Control'), None) @@ -221,7 +221,7 @@ class TestAccount(unittest.TestCase): self.assertEqual(resp.status, 204) resp = retry(get, use_account=3) resp.read() - self.assert_(resp.status in (200, 204)) + self.assertIn(resp.status, (200, 204)) self.assertEqual(resp.getheader('X-Account-Meta-Test'), 'value') @requires_acls @@ -241,7 +241,7 @@ class TestAccount(unittest.TestCase): # cannot read account resp = retry(get, use_account=3) resp.read() - self.assertEquals(resp.status, 403) + self.assertEqual(resp.status, 403) # grant read-write access acl_user = tf.swift_test_user[2] @@ -255,7 +255,7 @@ class TestAccount(unittest.TestCase): # read-write can read account headers resp = retry(get, use_account=3) resp.read() - self.assert_(resp.status in (200, 204)) + self.assertIn(resp.status, (200, 204)) # but not acls self.assertEqual(resp.getheader('X-Account-Access-Control'), None) @@ -282,7 +282,7 @@ class TestAccount(unittest.TestCase): # cannot read account resp = retry(get, use_account=3) resp.read() - self.assertEquals(resp.status, 403) + self.assertEqual(resp.status, 403) # grant admin access acl_user = tf.swift_test_user[2] @@ -296,7 +296,7 @@ class TestAccount(unittest.TestCase): # admin can read account headers resp = retry(get, use_account=3) resp.read() - self.assert_(resp.status in (200, 204)) + self.assertIn(resp.status, (200, 204)) # including acls self.assertEqual(resp.getheader('X-Account-Access-Control'), acl_json_str) @@ -309,7 +309,7 @@ class TestAccount(unittest.TestCase): self.assertEqual(resp.status, 204) resp = retry(get, use_account=3) resp.read() - self.assert_(resp.status in (200, 204)) + self.assertIn(resp.status, (200, 204)) self.assertEqual(resp.getheader('X-Account-Meta-Test'), value) # admin can even revoke their own access @@ -321,7 +321,7 @@ class TestAccount(unittest.TestCase): # and again, cannot read account resp = retry(get, use_account=3) resp.read() - self.assertEquals(resp.status, 403) + self.assertEqual(resp.status, 403) @requires_acls def test_protected_tempurl(self): @@ -359,8 +359,9 @@ class TestAccount(unittest.TestCase): # read-only tester3 can read account metadata resp = retry(get, use_account=3) resp.read() - self.assert_(resp.status in (200, 204), - 'Expected status in (200, 204), got %s' % resp.status) + self.assertTrue( + resp.status in (200, 204), + 'Expected status in (200, 204), got %s' % resp.status) self.assertEqual(resp.getheader('X-Account-Meta-Test'), value) # but not temp-url-key self.assertEqual(resp.getheader('X-Account-Meta-Temp-Url-Key'), None) @@ -377,8 +378,9 @@ class TestAccount(unittest.TestCase): # read-write tester3 can read account metadata resp = retry(get, use_account=3) resp.read() - self.assert_(resp.status in (200, 204), - 'Expected status in (200, 204), got %s' % resp.status) + self.assertTrue( + resp.status in (200, 204), + 'Expected status in (200, 204), got %s' % resp.status) self.assertEqual(resp.getheader('X-Account-Meta-Test'), value) # but not temp-url-key self.assertEqual(resp.getheader('X-Account-Meta-Temp-Url-Key'), None) @@ -395,8 +397,9 @@ class TestAccount(unittest.TestCase): # admin tester3 can read account metadata resp = retry(get, use_account=3) resp.read() - self.assert_(resp.status in (200, 204), - 'Expected status in (200, 204), got %s' % resp.status) + self.assertTrue( + resp.status in (200, 204), + 'Expected status in (200, 204), got %s' % resp.status) self.assertEqual(resp.getheader('X-Account-Meta-Test'), value) # including temp-url-key self.assertEqual(resp.getheader('X-Account-Meta-Temp-Url-Key'), @@ -412,8 +415,9 @@ class TestAccount(unittest.TestCase): self.assertEqual(resp.status, 204) resp = retry(get, use_account=3) resp.read() - self.assert_(resp.status in (200, 204), - 'Expected status in (200, 204), got %s' % resp.status) + self.assertTrue( + resp.status in (200, 204), + 'Expected status in (200, 204), got %s' % resp.status) self.assertEqual(resp.getheader('X-Account-Meta-Temp-Url-Key'), secret) @@ -689,17 +693,17 @@ class TestAccount(unittest.TestCase): if (tf.web_front_end == 'integral'): resp = retry(post, uni_key, '1') resp.read() - self.assertTrue(resp.status in (201, 204)) + self.assertIn(resp.status, (201, 204)) resp = retry(head) resp.read() - self.assert_(resp.status in (200, 204), resp.status) + self.assertIn(resp.status, (200, 204)) self.assertEqual(resp.getheader(uni_key.encode('utf-8')), '1') resp = retry(post, 'X-Account-Meta-uni', uni_value) resp.read() self.assertEqual(resp.status, 204) resp = retry(head) resp.read() - self.assert_(resp.status in (200, 204), resp.status) + self.assertIn(resp.status, (200, 204)) self.assertEqual(resp.getheader('X-Account-Meta-uni'), uni_value.encode('utf-8')) if (tf.web_front_end == 'integral'): @@ -708,7 +712,7 @@ class TestAccount(unittest.TestCase): self.assertEqual(resp.status, 204) resp = retry(head) resp.read() - self.assert_(resp.status in (200, 204), resp.status) + self.assertIn(resp.status, (200, 204)) self.assertEqual(resp.getheader(uni_key.encode('utf-8')), uni_value.encode('utf-8')) @@ -730,14 +734,14 @@ class TestAccount(unittest.TestCase): self.assertEqual(resp.status, 204) resp = retry(head) resp.read() - self.assert_(resp.status in (200, 204), resp.status) + self.assertIn(resp.status, (200, 204)) self.assertEqual(resp.getheader('x-account-meta-one'), '1') resp = retry(post, 'X-Account-Meta-Two', '2') resp.read() self.assertEqual(resp.status, 204) resp = retry(head) resp.read() - self.assert_(resp.status in (200, 204), resp.status) + self.assertIn(resp.status, (200, 204)) self.assertEqual(resp.getheader('x-account-meta-one'), '1') self.assertEqual(resp.getheader('x-account-meta-two'), '2') @@ -875,7 +879,7 @@ class TestAccountInNonDefaultDomain(unittest.TestCase): resp = retry(head, use_account=4) resp.read() self.assertEqual(resp.status, 204) - self.assertTrue('X-Account-Project-Domain-Id' in resp.headers) + self.assertIn('X-Account-Project-Domain-Id', resp.headers) if __name__ == '__main__': diff --git a/test/functional/test_container.py b/test/functional/test_container.py index dfbec4eed2..345aa0aa84 100755 --- a/test/functional/test_container.py +++ b/test/functional/test_container.py @@ -72,7 +72,7 @@ class TestContainer(unittest.TestCase): body = resp.read() if resp.status == 404: break - self.assert_(resp.status // 100 == 2, resp.status) + self.assertTrue(resp.status // 100 == 2, resp.status) objs = json.loads(body) if not objs: break @@ -93,7 +93,7 @@ class TestContainer(unittest.TestCase): # container may have not been created resp = retry(delete, self.container) resp.read() - self.assert_(resp.status in (204, 404)) + self.assertIn(resp.status, (204, 404)) def test_multi_metadata(self): if tf.skip: @@ -114,14 +114,14 @@ class TestContainer(unittest.TestCase): self.assertEqual(resp.status, 204) resp = retry(head) resp.read() - self.assert_(resp.status in (200, 204), resp.status) + self.assertIn(resp.status, (200, 204)) self.assertEqual(resp.getheader('x-container-meta-one'), '1') resp = retry(post, 'X-Container-Meta-Two', '2') resp.read() self.assertEqual(resp.status, 204) resp = retry(head) resp.read() - self.assert_(resp.status in (200, 204), resp.status) + self.assertIn(resp.status, (200, 204)) self.assertEqual(resp.getheader('x-container-meta-one'), '1') self.assertEqual(resp.getheader('x-container-meta-two'), '2') @@ -147,14 +147,14 @@ class TestContainer(unittest.TestCase): self.assertEqual(resp.status, 204) resp = retry(head) resp.read() - self.assert_(resp.status in (200, 204), resp.status) + self.assertIn(resp.status, (200, 204)) self.assertEqual(resp.getheader(uni_key.encode('utf-8')), '1') resp = retry(post, 'X-Container-Meta-uni', uni_value) resp.read() self.assertEqual(resp.status, 204) resp = retry(head) resp.read() - self.assert_(resp.status in (200, 204), resp.status) + self.assertIn(resp.status, (200, 204)) self.assertEqual(resp.getheader('X-Container-Meta-uni'), uni_value.encode('utf-8')) if (tf.web_front_end == 'integral'): @@ -163,7 +163,7 @@ class TestContainer(unittest.TestCase): self.assertEqual(resp.status, 204) resp = retry(head) resp.read() - self.assert_(resp.status in (200, 204), resp.status) + self.assertIn(resp.status, (200, 204)) self.assertEqual(resp.getheader(uni_key.encode('utf-8')), uni_value.encode('utf-8')) @@ -198,11 +198,11 @@ class TestContainer(unittest.TestCase): self.assertEqual(resp.status, 201) resp = retry(head, name) resp.read() - self.assert_(resp.status in (200, 204), resp.status) + self.assertIn(resp.status, (200, 204)) self.assertEqual(resp.getheader('x-container-meta-test'), 'Value') resp = retry(get, name) resp.read() - self.assert_(resp.status in (200, 204), resp.status) + self.assertIn(resp.status, (200, 204)) self.assertEqual(resp.getheader('x-container-meta-test'), 'Value') resp = retry(delete, name) resp.read() @@ -214,11 +214,11 @@ class TestContainer(unittest.TestCase): self.assertEqual(resp.status, 201) resp = retry(head, name) resp.read() - self.assert_(resp.status in (200, 204), resp.status) + self.assertIn(resp.status, (200, 204)) self.assertEqual(resp.getheader('x-container-meta-test'), None) resp = retry(get, name) resp.read() - self.assert_(resp.status in (200, 204), resp.status) + self.assertIn(resp.status, (200, 204)) self.assertEqual(resp.getheader('x-container-meta-test'), None) resp = retry(delete, name) resp.read() @@ -246,22 +246,22 @@ class TestContainer(unittest.TestCase): resp = retry(head) resp.read() - self.assert_(resp.status in (200, 204), resp.status) + self.assertIn(resp.status, (200, 204)) self.assertEqual(resp.getheader('x-container-meta-test'), None) resp = retry(get) resp.read() - self.assert_(resp.status in (200, 204), resp.status) + self.assertIn(resp.status, (200, 204)) self.assertEqual(resp.getheader('x-container-meta-test'), None) resp = retry(post, 'Value') resp.read() self.assertEqual(resp.status, 204) resp = retry(head) resp.read() - self.assert_(resp.status in (200, 204), resp.status) + self.assertIn(resp.status, (200, 204)) self.assertEqual(resp.getheader('x-container-meta-test'), 'Value') resp = retry(get) resp.read() - self.assert_(resp.status in (200, 204), resp.status) + self.assertIn(resp.status, (200, 204)) self.assertEqual(resp.getheader('x-container-meta-test'), 'Value') def test_PUT_bad_metadata(self): @@ -484,7 +484,7 @@ class TestContainer(unittest.TestCase): resp = retry(get) raise Exception('Should not have been able to GET') except Exception as err: - self.assert_(str(err).startswith('No result after '), err) + self.assertTrue(str(err).startswith('No result after '), err) def post(url, token, parsed, conn): conn.request('POST', parsed.path + '/' + self.name, '', @@ -511,7 +511,7 @@ class TestContainer(unittest.TestCase): resp = retry(get) raise Exception('Should not have been able to GET') except Exception as err: - self.assert_(str(err).startswith('No result after '), err) + self.assertTrue(str(err).startswith('No result after '), err) def test_cross_account_container(self): if tf.skip or tf.skip2: @@ -729,7 +729,7 @@ class TestContainer(unittest.TestCase): # cannot list containers resp = retry(get, use_account=3) resp.read() - self.assertEquals(resp.status, 403) + self.assertEqual(resp.status, 403) # grant read-only access acl_user = tf.swift_test_user[2] @@ -742,23 +742,23 @@ class TestContainer(unittest.TestCase): # read-only can list containers resp = retry(get, use_account=3) listing = resp.read() - self.assertEquals(resp.status, 200) - self.assert_(self.name in listing) + self.assertEqual(resp.status, 200) + self.assertIn(self.name, listing) # read-only can not create containers new_container_name = str(uuid4()) resp = retry(put, new_container_name, use_account=3) resp.read() - self.assertEquals(resp.status, 403) + self.assertEqual(resp.status, 403) # but it can see newly created ones resp = retry(put, new_container_name, use_account=1) resp.read() - self.assertEquals(resp.status, 201) + self.assertEqual(resp.status, 201) resp = retry(get, use_account=3) listing = resp.read() - self.assertEquals(resp.status, 200) - self.assert_(new_container_name in listing) + self.assertEqual(resp.status, 200) + self.assertIn(new_container_name, listing) @requires_acls def test_read_only_acl_metadata(self): @@ -788,13 +788,13 @@ class TestContainer(unittest.TestCase): self.assertEqual(resp.status, 204) resp = retry(get, self.name, use_account=1) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) self.assertEqual(resp.getheader('X-Container-Meta-Test'), value) # cannot see metadata resp = retry(get, self.name, use_account=3) resp.read() - self.assertEquals(resp.status, 403) + self.assertEqual(resp.status, 403) # grant read-only access acl_user = tf.swift_test_user[2] @@ -814,7 +814,7 @@ class TestContainer(unittest.TestCase): # read-only can read container metadata resp = retry(get, self.name, use_account=3) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) self.assertEqual(resp.getheader('X-Container-Meta-Test'), value) @requires_acls @@ -844,7 +844,7 @@ class TestContainer(unittest.TestCase): # cannot list containers resp = retry(get, use_account=3) resp.read() - self.assertEquals(resp.status, 403) + self.assertEqual(resp.status, 403) # grant read-write access acl_user = tf.swift_test_user[2] @@ -857,36 +857,36 @@ class TestContainer(unittest.TestCase): # can list containers resp = retry(get, use_account=3) listing = resp.read() - self.assertEquals(resp.status, 200) - self.assert_(self.name in listing) + self.assertEqual(resp.status, 200) + self.assertIn(self.name, listing) # can create new containers new_container_name = str(uuid4()) resp = retry(put, new_container_name, use_account=3) resp.read() - self.assertEquals(resp.status, 201) + self.assertEqual(resp.status, 201) resp = retry(get, use_account=3) listing = resp.read() - self.assertEquals(resp.status, 200) - self.assert_(new_container_name in listing) + self.assertEqual(resp.status, 200) + self.assertIn(new_container_name, listing) # can also delete them resp = retry(delete, new_container_name, use_account=3) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) resp = retry(get, use_account=3) listing = resp.read() - self.assertEquals(resp.status, 200) - self.assert_(new_container_name not in listing) + self.assertEqual(resp.status, 200) + self.assertNotIn(new_container_name, listing) # even if they didn't create them empty_container_name = str(uuid4()) resp = retry(put, empty_container_name, use_account=1) resp.read() - self.assertEquals(resp.status, 201) + self.assertEqual(resp.status, 201) resp = retry(delete, empty_container_name, use_account=3) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) @requires_acls def test_read_write_acl_metadata(self): @@ -916,13 +916,13 @@ class TestContainer(unittest.TestCase): self.assertEqual(resp.status, 204) resp = retry(get, self.name, use_account=1) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) self.assertEqual(resp.getheader('X-Container-Meta-Test'), value) # cannot see metadata resp = retry(get, self.name, use_account=3) resp.read() - self.assertEquals(resp.status, 403) + self.assertEqual(resp.status, 403) # grant read-write access acl_user = tf.swift_test_user[2] @@ -935,7 +935,7 @@ class TestContainer(unittest.TestCase): # read-write can read container metadata resp = retry(get, self.name, use_account=3) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) self.assertEqual(resp.getheader('X-Container-Meta-Test'), value) # read-write can also write container metadata @@ -943,20 +943,20 @@ class TestContainer(unittest.TestCase): headers = {'x-container-meta-test': new_value} resp = retry(post, self.name, headers=headers, use_account=3) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) resp = retry(get, self.name, use_account=3) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) self.assertEqual(resp.getheader('X-Container-Meta-Test'), new_value) # and remove it headers = {'x-remove-container-meta-test': 'true'} resp = retry(post, self.name, headers=headers, use_account=3) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) resp = retry(get, self.name, use_account=3) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) self.assertEqual(resp.getheader('X-Container-Meta-Test'), None) @requires_acls @@ -986,7 +986,7 @@ class TestContainer(unittest.TestCase): # cannot list containers resp = retry(get, use_account=3) resp.read() - self.assertEquals(resp.status, 403) + self.assertEqual(resp.status, 403) # grant admin access acl_user = tf.swift_test_user[2] @@ -999,36 +999,36 @@ class TestContainer(unittest.TestCase): # can list containers resp = retry(get, use_account=3) listing = resp.read() - self.assertEquals(resp.status, 200) - self.assert_(self.name in listing) + self.assertEqual(resp.status, 200) + self.assertIn(self.name, listing) # can create new containers new_container_name = str(uuid4()) resp = retry(put, new_container_name, use_account=3) resp.read() - self.assertEquals(resp.status, 201) + self.assertEqual(resp.status, 201) resp = retry(get, use_account=3) listing = resp.read() - self.assertEquals(resp.status, 200) - self.assert_(new_container_name in listing) + self.assertEqual(resp.status, 200) + self.assertIn(new_container_name, listing) # can also delete them resp = retry(delete, new_container_name, use_account=3) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) resp = retry(get, use_account=3) listing = resp.read() - self.assertEquals(resp.status, 200) - self.assert_(new_container_name not in listing) + self.assertEqual(resp.status, 200) + self.assertNotIn(new_container_name, listing) # even if they didn't create them empty_container_name = str(uuid4()) resp = retry(put, empty_container_name, use_account=1) resp.read() - self.assertEquals(resp.status, 201) + self.assertEqual(resp.status, 201) resp = retry(delete, empty_container_name, use_account=3) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) @requires_acls def test_admin_acl_metadata(self): @@ -1058,13 +1058,13 @@ class TestContainer(unittest.TestCase): self.assertEqual(resp.status, 204) resp = retry(get, self.name, use_account=1) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) self.assertEqual(resp.getheader('X-Container-Meta-Test'), value) # cannot see metadata resp = retry(get, self.name, use_account=3) resp.read() - self.assertEquals(resp.status, 403) + self.assertEqual(resp.status, 403) # grant access acl_user = tf.swift_test_user[2] @@ -1077,7 +1077,7 @@ class TestContainer(unittest.TestCase): # can read container metadata resp = retry(get, self.name, use_account=3) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) self.assertEqual(resp.getheader('X-Container-Meta-Test'), value) # can also write container metadata @@ -1085,20 +1085,20 @@ class TestContainer(unittest.TestCase): headers = {'x-container-meta-test': new_value} resp = retry(post, self.name, headers=headers, use_account=3) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) resp = retry(get, self.name, use_account=3) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) self.assertEqual(resp.getheader('X-Container-Meta-Test'), new_value) # and remove it headers = {'x-remove-container-meta-test': 'true'} resp = retry(post, self.name, headers=headers, use_account=3) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) resp = retry(get, self.name, use_account=3) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) self.assertEqual(resp.getheader('X-Container-Meta-Test'), None) @requires_acls @@ -1132,7 +1132,7 @@ class TestContainer(unittest.TestCase): self.assertEqual(resp.status, 204) resp = retry(get, self.name, use_account=1) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) self.assertEqual(resp.getheader('X-Container-Sync-Key'), 'secret') self.assertEqual(resp.getheader('X-Container-Meta-Test'), value) @@ -1147,7 +1147,7 @@ class TestContainer(unittest.TestCase): # can read container metadata resp = retry(get, self.name, use_account=3) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) self.assertEqual(resp.getheader('X-Container-Meta-Test'), value) # but not sync-key self.assertEqual(resp.getheader('X-Container-Sync-Key'), None) @@ -1169,7 +1169,7 @@ class TestContainer(unittest.TestCase): # can read container metadata resp = retry(get, self.name, use_account=3) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) self.assertEqual(resp.getheader('X-Container-Meta-Test'), value) # but not sync-key self.assertEqual(resp.getheader('X-Container-Sync-Key'), None) @@ -1177,7 +1177,7 @@ class TestContainer(unittest.TestCase): # sanity check sync-key w/ account1 resp = retry(get, self.name, use_account=1) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) self.assertEqual(resp.getheader('X-Container-Sync-Key'), 'secret') # and can write @@ -1191,7 +1191,7 @@ class TestContainer(unittest.TestCase): self.assertEqual(resp.status, 204) resp = retry(get, self.name, use_account=1) # validate w/ account1 resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) self.assertEqual(resp.getheader('X-Container-Meta-Test'), new_value) # but can not write sync-key self.assertEqual(resp.getheader('X-Container-Sync-Key'), 'secret') @@ -1207,7 +1207,7 @@ class TestContainer(unittest.TestCase): # admin can read container metadata resp = retry(get, self.name, use_account=3) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) self.assertEqual(resp.getheader('X-Container-Meta-Test'), new_value) # and ALSO sync-key self.assertEqual(resp.getheader('X-Container-Sync-Key'), 'secret') @@ -1220,7 +1220,7 @@ class TestContainer(unittest.TestCase): self.assertEqual(resp.status, 204) resp = retry(get, self.name, use_account=3) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) self.assertEqual(resp.getheader('X-Container-Sync-Key'), new_secret) @requires_acls @@ -1255,7 +1255,7 @@ class TestContainer(unittest.TestCase): self.assertEqual(resp.status, 204) resp = retry(get, self.name, use_account=1) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) self.assertEqual(resp.getheader('X-Container-Read'), 'jdoe') self.assertEqual(resp.getheader('X-Container-Write'), 'jdoe') self.assertEqual(resp.getheader('X-Container-Meta-Test'), value) @@ -1271,7 +1271,7 @@ class TestContainer(unittest.TestCase): # can read container metadata resp = retry(get, self.name, use_account=3) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) self.assertEqual(resp.getheader('X-Container-Meta-Test'), value) # but not container acl self.assertEqual(resp.getheader('X-Container-Read'), None) @@ -1297,7 +1297,7 @@ class TestContainer(unittest.TestCase): # can read container metadata resp = retry(get, self.name, use_account=3) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) self.assertEqual(resp.getheader('X-Container-Meta-Test'), value) # but not container acl self.assertEqual(resp.getheader('X-Container-Read'), None) @@ -1306,7 +1306,7 @@ class TestContainer(unittest.TestCase): # sanity check container acls with account1 resp = retry(get, self.name, use_account=1) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) self.assertEqual(resp.getheader('X-Container-Read'), 'jdoe') self.assertEqual(resp.getheader('X-Container-Write'), 'jdoe') @@ -1322,7 +1322,7 @@ class TestContainer(unittest.TestCase): self.assertEqual(resp.status, 204) resp = retry(get, self.name, use_account=1) # validate w/ account1 resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) self.assertEqual(resp.getheader('X-Container-Meta-Test'), new_value) # but can not write container acls self.assertEqual(resp.getheader('X-Container-Read'), 'jdoe') @@ -1339,7 +1339,7 @@ class TestContainer(unittest.TestCase): # admin can read container metadata resp = retry(get, self.name, use_account=3) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) self.assertEqual(resp.getheader('X-Container-Meta-Test'), new_value) # and ALSO container acls self.assertEqual(resp.getheader('X-Container-Read'), 'jdoe') @@ -1355,7 +1355,7 @@ class TestContainer(unittest.TestCase): self.assertEqual(resp.status, 204) resp = retry(get, self.name, use_account=3) resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) self.assertEqual(resp.getheader('X-Container-Read'), '.r:*') def test_long_name_content_type(self): @@ -1398,8 +1398,11 @@ class TestContainer(unittest.TestCase): raise SkipTest() def put(url, token, parsed, conn): + # using the empty storage policy header value here to ensure + # that the default policy is chosen in case policy_specified is set + # see __init__.py for details on policy_specified conn.request('PUT', parsed.path + '/' + self.container, '', - {'X-Auth-Token': token}) + {'X-Auth-Token': token, 'X-Storage-Policy': ''}) return check_response(conn) resp = retry(put) resp.read() @@ -1412,8 +1415,8 @@ class TestContainer(unittest.TestCase): resp = retry(head) resp.read() headers = dict((k.lower(), v) for k, v in resp.getheaders()) - self.assertEquals(headers.get('x-storage-policy'), - default_policy['name']) + self.assertEqual(headers.get('x-storage-policy'), + default_policy['name']) def test_error_invalid_storage_policy_name(self): def put(url, token, parsed, conn, headers): @@ -1450,8 +1453,8 @@ class TestContainer(unittest.TestCase): resp = retry(head) resp.read() headers = dict((k.lower(), v) for k, v in resp.getheaders()) - self.assertEquals(headers.get('x-storage-policy'), - policy['name']) + self.assertEqual(headers.get('x-storage-policy'), + policy['name']) # and test recreate with-out specifying Storage Policy resp = retry(put) @@ -1461,8 +1464,8 @@ class TestContainer(unittest.TestCase): resp = retry(head) resp.read() headers = dict((k.lower(), v) for k, v in resp.getheaders()) - self.assertEquals(headers.get('x-storage-policy'), - policy['name']) + self.assertEqual(headers.get('x-storage-policy'), + policy['name']) # delete it def delete(url, token, parsed, conn): @@ -1477,7 +1480,7 @@ class TestContainer(unittest.TestCase): resp = retry(head) resp.read() headers = dict((k.lower(), v) for k, v in resp.getheaders()) - self.assertEquals(headers.get('x-storage-policy'), None) + self.assertEqual(headers.get('x-storage-policy'), None) @requires_policies def test_conflict_change_storage_policy_with_put(self): @@ -1507,8 +1510,8 @@ class TestContainer(unittest.TestCase): resp = retry(head) resp.read() headers = dict((k.lower(), v) for k, v in resp.getheaders()) - self.assertEquals(headers.get('x-storage-policy'), - policy['name']) + self.assertEqual(headers.get('x-storage-policy'), + policy['name']) @requires_policies def test_noop_change_storage_policy_with_post(self): @@ -1544,8 +1547,8 @@ class TestContainer(unittest.TestCase): resp = retry(head) resp.read() headers = dict((k.lower(), v) for k, v in resp.getheaders()) - self.assertEquals(headers.get('x-storage-policy'), - policy['name']) + self.assertEqual(headers.get('x-storage-policy'), + policy['name']) class BaseTestContainerACLs(unittest.TestCase): @@ -1592,7 +1595,7 @@ class BaseTestContainerACLs(unittest.TestCase): while True: resp = retry(get, use_account=self.account) body = resp.read() - self.assert_(resp.status // 100 == 2, resp.status) + self.assertTrue(resp.status // 100 == 2, resp.status) objs = json.loads(body) if not objs: break diff --git a/test/functional/test_object.py b/test/functional/test_object.py index beb52047fd..55868098be 100755 --- a/test/functional/test_object.py +++ b/test/functional/test_object.py @@ -89,7 +89,7 @@ class TestObject(unittest.TestCase): body = resp.read() if resp.status == 404: break - self.assert_(resp.status // 100 == 2, resp.status) + self.assertTrue(resp.status // 100 == 2, resp.status) objs = json.loads(body) if not objs: break @@ -107,7 +107,7 @@ class TestObject(unittest.TestCase): for container in self.containers: resp = retry(delete, container) resp.read() - self.assert_(resp.status in (204, 404)) + self.assertIn(resp.status, (204, 404)) def test_if_none_match(self): def put(url, token, parsed, conn): @@ -119,10 +119,10 @@ class TestObject(unittest.TestCase): return check_response(conn) resp = retry(put) resp.read() - self.assertEquals(resp.status, 201) + self.assertEqual(resp.status, 201) resp = retry(put) resp.read() - self.assertEquals(resp.status, 412) + self.assertEqual(resp.status, 412) def put(url, token, parsed, conn): conn.request('PUT', '%s/%s/%s' % ( @@ -133,7 +133,7 @@ class TestObject(unittest.TestCase): return check_response(conn) resp = retry(put) resp.read() - self.assertEquals(resp.status, 400) + self.assertEqual(resp.status, 400) def test_non_integer_x_delete_after(self): def put(url, token, parsed, conn): @@ -145,7 +145,7 @@ class TestObject(unittest.TestCase): return check_response(conn) resp = retry(put) body = resp.read() - self.assertEquals(resp.status, 400) + self.assertEqual(resp.status, 400) self.assertEqual(body, 'Non-integer X-Delete-After') def test_non_integer_x_delete_at(self): @@ -158,7 +158,7 @@ class TestObject(unittest.TestCase): return check_response(conn) resp = retry(put) body = resp.read() - self.assertEquals(resp.status, 400) + self.assertEqual(resp.status, 400) self.assertEqual(body, 'Non-integer X-Delete-At') def test_x_delete_at_in_the_past(self): @@ -171,7 +171,7 @@ class TestObject(unittest.TestCase): return check_response(conn) resp = retry(put) body = resp.read() - self.assertEquals(resp.status, 400) + self.assertEqual(resp.status, 400) self.assertEqual(body, 'X-Delete-At in past') def test_copy_object(self): @@ -243,6 +243,23 @@ class TestObject(unittest.TestCase): self.assertEqual(resp.status, 200) self.assertEqual(dest_contents, source_contents) + # copy source to dest with COPY and range + def copy(url, token, parsed, conn): + conn.request('COPY', '%s/%s' % (parsed.path, source), '', + {'X-Auth-Token': token, + 'Destination': dest, + 'Range': 'bytes=1-2'}) + return check_response(conn) + resp = retry(copy) + resp.read() + self.assertEqual(resp.status, 201) + + # contents of dest should be the same as source + resp = retry(get_dest) + dest_contents = resp.read() + self.assertEqual(resp.status, 200) + self.assertEqual(dest_contents, source_contents[1:3]) + # delete the copy resp = retry(delete) resp.read() @@ -370,7 +387,7 @@ class TestObject(unittest.TestCase): resp = retry(get) raise Exception('Should not have been able to GET') except Exception as err: - self.assert_(str(err).startswith('No result after ')) + self.assertTrue(str(err).startswith('No result after ')) def post(url, token, parsed, conn): conn.request('POST', parsed.path + '/' + self.container, '', @@ -395,7 +412,7 @@ class TestObject(unittest.TestCase): resp = retry(get) raise Exception('Should not have been able to GET') except Exception as err: - self.assert_(str(err).startswith('No result after ')) + self.assertTrue(str(err).startswith('No result after ')) def test_private_object(self): if tf.skip or tf.skip3: @@ -526,12 +543,12 @@ class TestObject(unittest.TestCase): # cannot list objects resp = retry(get_listing, use_account=3) resp.read() - self.assertEquals(resp.status, 403) + self.assertEqual(resp.status, 403) # cannot get object resp = retry(get, self.obj, use_account=3) resp.read() - self.assertEquals(resp.status, 403) + self.assertEqual(resp.status, 403) # grant read-only access acl_user = tf.swift_test_user[2] @@ -544,32 +561,32 @@ class TestObject(unittest.TestCase): # can list objects resp = retry(get_listing, use_account=3) listing = resp.read() - self.assertEquals(resp.status, 200) - self.assert_(self.obj in listing) + self.assertEqual(resp.status, 200) + self.assertIn(self.obj, listing) # can get object resp = retry(get, self.obj, use_account=3) body = resp.read() - self.assertEquals(resp.status, 200) - self.assertEquals(body, 'test') + self.assertEqual(resp.status, 200) + self.assertEqual(body, 'test') # can not put an object obj_name = str(uuid4()) resp = retry(put, obj_name, use_account=3) body = resp.read() - self.assertEquals(resp.status, 403) + self.assertEqual(resp.status, 403) # can not delete an object resp = retry(delete, self.obj, use_account=3) body = resp.read() - self.assertEquals(resp.status, 403) + self.assertEqual(resp.status, 403) # sanity with account1 resp = retry(get_listing, use_account=3) listing = resp.read() - self.assertEquals(resp.status, 200) - self.assert_(obj_name not in listing) - self.assert_(self.obj in listing) + self.assertEqual(resp.status, 200) + self.assertNotIn(obj_name, listing) + self.assertIn(self.obj, listing) @requires_acls def test_read_write(self): @@ -607,12 +624,12 @@ class TestObject(unittest.TestCase): # cannot list objects resp = retry(get_listing, use_account=3) resp.read() - self.assertEquals(resp.status, 403) + self.assertEqual(resp.status, 403) # cannot get object resp = retry(get, self.obj, use_account=3) resp.read() - self.assertEquals(resp.status, 403) + self.assertEqual(resp.status, 403) # grant read-write access acl_user = tf.swift_test_user[2] @@ -625,32 +642,32 @@ class TestObject(unittest.TestCase): # can list objects resp = retry(get_listing, use_account=3) listing = resp.read() - self.assertEquals(resp.status, 200) - self.assert_(self.obj in listing) + self.assertEqual(resp.status, 200) + self.assertIn(self.obj, listing) # can get object resp = retry(get, self.obj, use_account=3) body = resp.read() - self.assertEquals(resp.status, 200) - self.assertEquals(body, 'test') + self.assertEqual(resp.status, 200) + self.assertEqual(body, 'test') # can put an object obj_name = str(uuid4()) resp = retry(put, obj_name, use_account=3) body = resp.read() - self.assertEquals(resp.status, 201) + self.assertEqual(resp.status, 201) # can delete an object resp = retry(delete, self.obj, use_account=3) body = resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) # sanity with account1 resp = retry(get_listing, use_account=3) listing = resp.read() - self.assertEquals(resp.status, 200) - self.assert_(obj_name in listing) - self.assert_(self.obj not in listing) + self.assertEqual(resp.status, 200) + self.assertIn(obj_name, listing) + self.assertNotIn(self.obj, listing) @requires_acls def test_admin(self): @@ -688,12 +705,12 @@ class TestObject(unittest.TestCase): # cannot list objects resp = retry(get_listing, use_account=3) resp.read() - self.assertEquals(resp.status, 403) + self.assertEqual(resp.status, 403) # cannot get object resp = retry(get, self.obj, use_account=3) resp.read() - self.assertEquals(resp.status, 403) + self.assertEqual(resp.status, 403) # grant admin access acl_user = tf.swift_test_user[2] @@ -706,32 +723,32 @@ class TestObject(unittest.TestCase): # can list objects resp = retry(get_listing, use_account=3) listing = resp.read() - self.assertEquals(resp.status, 200) - self.assert_(self.obj in listing) + self.assertEqual(resp.status, 200) + self.assertIn(self.obj, listing) # can get object resp = retry(get, self.obj, use_account=3) body = resp.read() - self.assertEquals(resp.status, 200) - self.assertEquals(body, 'test') + self.assertEqual(resp.status, 200) + self.assertEqual(body, 'test') # can put an object obj_name = str(uuid4()) resp = retry(put, obj_name, use_account=3) body = resp.read() - self.assertEquals(resp.status, 201) + self.assertEqual(resp.status, 201) # can delete an object resp = retry(delete, self.obj, use_account=3) body = resp.read() - self.assertEquals(resp.status, 204) + self.assertEqual(resp.status, 204) # sanity with account1 resp = retry(get_listing, use_account=3) listing = resp.read() - self.assertEquals(resp.status, 200) - self.assert_(obj_name in listing) - self.assert_(self.obj not in listing) + self.assertEqual(resp.status, 200) + self.assertIn(obj_name, listing) + self.assertNotIn(self.obj, listing) def test_manifest(self): if tf.skip: @@ -1096,78 +1113,78 @@ class TestObject(unittest.TestCase): resp = retry(put_cors_cont, '*') resp.read() - self.assertEquals(resp.status // 100, 2) + self.assertEqual(resp.status // 100, 2) resp = retry(put_obj, 'cat') resp.read() - self.assertEquals(resp.status // 100, 2) + self.assertEqual(resp.status // 100, 2) resp = retry(check_cors, 'OPTIONS', 'cat', {'Origin': 'http://m.com'}) - self.assertEquals(resp.status, 401) + self.assertEqual(resp.status, 401) resp = retry(check_cors, 'OPTIONS', 'cat', {'Origin': 'http://m.com', 'Access-Control-Request-Method': 'GET'}) - self.assertEquals(resp.status, 200) + self.assertEqual(resp.status, 200) resp.read() headers = dict((k.lower(), v) for k, v in resp.getheaders()) - self.assertEquals(headers.get('access-control-allow-origin'), - '*') + self.assertEqual(headers.get('access-control-allow-origin'), + '*') resp = retry(check_cors, 'GET', 'cat', {'Origin': 'http://m.com'}) - self.assertEquals(resp.status, 200) + self.assertEqual(resp.status, 200) headers = dict((k.lower(), v) for k, v in resp.getheaders()) - self.assertEquals(headers.get('access-control-allow-origin'), - '*') + self.assertEqual(headers.get('access-control-allow-origin'), + '*') resp = retry(check_cors, 'GET', 'cat', {'Origin': 'http://m.com', 'X-Web-Mode': 'True'}) - self.assertEquals(resp.status, 200) + self.assertEqual(resp.status, 200) headers = dict((k.lower(), v) for k, v in resp.getheaders()) - self.assertEquals(headers.get('access-control-allow-origin'), - '*') + self.assertEqual(headers.get('access-control-allow-origin'), + '*') #################### resp = retry(put_cors_cont, 'http://secret.com') resp.read() - self.assertEquals(resp.status // 100, 2) + self.assertEqual(resp.status // 100, 2) resp = retry(check_cors, 'OPTIONS', 'cat', {'Origin': 'http://m.com', 'Access-Control-Request-Method': 'GET'}) resp.read() - self.assertEquals(resp.status, 401) + self.assertEqual(resp.status, 401) if strict_cors: resp = retry(check_cors, 'GET', 'cat', {'Origin': 'http://m.com'}) resp.read() - self.assertEquals(resp.status, 200) + self.assertEqual(resp.status, 200) headers = dict((k.lower(), v) for k, v in resp.getheaders()) - self.assertTrue('access-control-allow-origin' not in headers) + self.assertNotIn('access-control-allow-origin', headers) resp = retry(check_cors, 'GET', 'cat', {'Origin': 'http://secret.com'}) resp.read() - self.assertEquals(resp.status, 200) + self.assertEqual(resp.status, 200) headers = dict((k.lower(), v) for k, v in resp.getheaders()) - self.assertEquals(headers.get('access-control-allow-origin'), - 'http://secret.com') + self.assertEqual(headers.get('access-control-allow-origin'), + 'http://secret.com') else: resp = retry(check_cors, 'GET', 'cat', {'Origin': 'http://m.com'}) resp.read() - self.assertEquals(resp.status, 200) + self.assertEqual(resp.status, 200) headers = dict((k.lower(), v) for k, v in resp.getheaders()) - self.assertEquals(headers.get('access-control-allow-origin'), - 'http://m.com') + self.assertEqual(headers.get('access-control-allow-origin'), + 'http://m.com') @requires_policies def test_cross_policy_copy(self): diff --git a/test/functional/tests.py b/test/functional/tests.py index f6fe875374..092c8098f6 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -20,7 +20,7 @@ import hmac import json import locale import random -import StringIO +import six import time import unittest import urllib @@ -69,15 +69,16 @@ class Base(unittest.TestCase): def assert_body(self, body): response_body = self.env.conn.response.read() - self.assert_(response_body == body, - 'Body returned: %s' % (response_body)) + self.assertTrue(response_body == body, + 'Body returned: %s' % (response_body)) def assert_status(self, status_or_statuses): - self.assert_(self.env.conn.response.status == status_or_statuses or - (hasattr(status_or_statuses, '__iter__') and - self.env.conn.response.status in status_or_statuses), - 'Status returned: %d Expected: %s' % - (self.env.conn.response.status, status_or_statuses)) + self.assertTrue( + self.env.conn.response.status == status_or_statuses or + (hasattr(status_or_statuses, '__iter__') and + self.env.conn.response.status in status_or_statuses), + 'Status returned: %d Expected: %s' % + (self.env.conn.response.status, status_or_statuses)) class Base2(object): @@ -132,7 +133,7 @@ class TestAccount(Base): def testInvalidUTF8Path(self): invalid_utf8 = Utils.create_utf8_name()[::-1] container = self.env.account.container(invalid_utf8) - self.assert_(not container.create(cfg={'no_path_quote': True})) + self.assertFalse(container.create(cfg={'no_path_quote': True})) self.assert_status(412) self.assert_body('Invalid UTF8 or contains NULL') @@ -165,7 +166,7 @@ class TestAccount(Base): info = self.env.account.info() for field in ['object_count', 'container_count', 'bytes_used']: - self.assert_(info[field] >= 0) + self.assertTrue(info[field] >= 0) if info['container_count'] == len(self.env.containers): break @@ -192,8 +193,8 @@ class TestAccount(Base): for format_type in ['json', 'xml']: for a in self.env.account.containers( parms={'format': format_type}): - self.assert_(a['count'] >= 0) - self.assert_(a['bytes'] >= 0) + self.assertTrue(a['count'] >= 0) + self.assertTrue(a['bytes'] >= 0) headers = dict(self.env.conn.response.getheaders()) if format_type == 'json': @@ -209,7 +210,7 @@ class TestAccount(Base): p = {'limit': l} if l <= limit: - self.assert_(len(self.env.account.containers(parms=p)) <= l) + self.assertTrue(len(self.env.account.containers(parms=p)) <= l) self.assert_status(200) else: self.assertRaises(ResponseError, @@ -256,11 +257,11 @@ class TestAccount(Base): parms={'format': format_type, 'marker': marker, 'limit': limit}) - self.assert_(len(containers) <= limit) + self.assertTrue(len(containers) <= limit) if containers: if isinstance(containers[0], dict): containers = [x['name'] for x in containers] - self.assert_(locale.strcoll(containers[0], marker) > 0) + self.assertTrue(locale.strcoll(containers[0], marker) > 0) def testContainersOrderedByName(self): for format_type in [None, 'json', 'xml']: @@ -283,15 +284,13 @@ class TestAccount(Base): conn.connection.request('GET', '/v1/' + quoted_hax, None, {}) resp = conn.connection.getresponse() resp_headers = dict(resp.getheaders()) - self.assertTrue('www-authenticate' in resp_headers, - 'www-authenticate not found in %s' % resp_headers) + self.assertIn('www-authenticate', resp_headers) actual = resp_headers['www-authenticate'] expected = 'Swift realm="%s"' % quoted_hax # other middleware e.g. auth_token may also set www-authenticate # headers in which case actual values will be a comma separated list. # check that expected value is among the actual values - self.assertTrue(expected in actual, - '%s not found in %s' % (expected, actual)) + self.assertIn(expected, actual) class TestAccountUTF8(Base2, TestAccount): @@ -314,7 +313,7 @@ class TestAccountNoContainers(Base): def testGetRequest(self): for format_type in [None, 'json', 'xml']: - self.assert_(not self.env.account.containers( + self.assertFalse(self.env.account.containers( parms={'format': format_type})) if format_type is None: @@ -369,48 +368,49 @@ class TestContainer(Base): limit + 1, limit + 10, limit + 100): cont = self.env.account.container('a' * l) if l <= limit: - self.assert_(cont.create()) + self.assertTrue(cont.create()) self.assert_status(201) else: - self.assert_(not cont.create()) + self.assertFalse(cont.create()) self.assert_status(400) def testFileThenContainerDelete(self): cont = self.env.account.container(Utils.create_name()) - self.assert_(cont.create()) + self.assertTrue(cont.create()) file_item = cont.file(Utils.create_name()) - self.assert_(file_item.write_random()) + self.assertTrue(file_item.write_random()) - self.assert_(file_item.delete()) + self.assertTrue(file_item.delete()) self.assert_status(204) - self.assert_(file_item.name not in cont.files()) + self.assertNotIn(file_item.name, cont.files()) - self.assert_(cont.delete()) + self.assertTrue(cont.delete()) self.assert_status(204) - self.assert_(cont.name not in self.env.account.containers()) + self.assertNotIn(cont.name, self.env.account.containers()) def testFileListingLimitMarkerPrefix(self): cont = self.env.account.container(Utils.create_name()) - self.assert_(cont.create()) + self.assertTrue(cont.create()) files = sorted([Utils.create_name() for x in range(10)]) for f in files: file_item = cont.file(f) - self.assert_(file_item.write_random()) + self.assertTrue(file_item.write_random()) for i in range(len(files)): f = files[i] for j in range(1, len(files) - i): - self.assert_(cont.files(parms={'limit': j, 'marker': f}) == - files[i + 1: i + j + 1]) - self.assert_(cont.files(parms={'marker': f}) == files[i + 1:]) - self.assert_(cont.files(parms={'marker': f, 'prefix': f}) == []) - self.assert_(cont.files(parms={'prefix': f}) == [f]) + self.assertTrue( + cont.files(parms={'limit': j, 'marker': f}) == + files[i + 1: i + j + 1]) + self.assertTrue(cont.files(parms={'marker': f}) == files[i + 1:]) + self.assertTrue(cont.files(parms={'marker': f, 'prefix': f}) == []) + self.assertTrue(cont.files(parms={'prefix': f}) == [f]) def testPrefixAndLimit(self): load_constraint('container_listing_limit') cont = self.env.account.container(Utils.create_name()) - self.assert_(cont.create()) + self.assertTrue(cont.create()) prefix_file_count = 10 limit_count = 2 @@ -437,13 +437,41 @@ class TestContainer(Base): self.assertEqual(len(files), limit_count) for file_item in files: - self.assert_(file_item.startswith(prefix)) + self.assertTrue(file_item.startswith(prefix)) + + def testListDelimiter(self): + cont = self.env.account.container(Utils.create_name()) + self.assertTrue(cont.create()) + + delimiter = '-' + files = ['test', delimiter.join(['test', 'bar']), + delimiter.join(['test', 'foo'])] + for f in files: + file_item = cont.file(f) + self.assertTrue(file_item.write_random()) + + results = cont.files() + results = cont.files(parms={'delimiter': delimiter}) + self.assertEqual(results, ['test', 'test-']) + + def testListDelimiterAndPrefix(self): + cont = self.env.account.container(Utils.create_name()) + self.assertTrue(cont.create()) + + delimiter = 'a' + files = ['bar', 'bazar'] + for f in files: + file_item = cont.file(f) + self.assertTrue(file_item.write_random()) + + results = cont.files(parms={'delimiter': delimiter, 'prefix': 'ba'}) + self.assertEqual(results, ['bar', 'baza']) def testCreate(self): cont = self.env.account.container(Utils.create_name()) - self.assert_(cont.create()) + self.assertTrue(cont.create()) self.assert_status(201) - self.assert_(cont.name in self.env.account.containers()) + self.assertIn(cont.name, self.env.account.containers()) def testContainerFileListOnContainerThatDoesNotExist(self): for format_type in [None, 'json', 'xml']: @@ -456,13 +484,13 @@ class TestContainer(Base): valid_utf8 = Utils.create_utf8_name() invalid_utf8 = valid_utf8[::-1] container = self.env.account.container(valid_utf8) - self.assert_(container.create(cfg={'no_path_quote': True})) - self.assert_(container.name in self.env.account.containers()) + self.assertTrue(container.create(cfg={'no_path_quote': True})) + self.assertIn(container.name, self.env.account.containers()) self.assertEqual(container.files(), []) - self.assert_(container.delete()) + self.assertTrue(container.delete()) container = self.env.account.container(invalid_utf8) - self.assert_(not container.create(cfg={'no_path_quote': True})) + self.assertFalse(container.create(cfg={'no_path_quote': True})) self.assert_status(412) self.assertRaises(ResponseError, container.files, cfg={'no_path_quote': True}) @@ -470,9 +498,9 @@ class TestContainer(Base): def testCreateOnExisting(self): cont = self.env.account.container(Utils.create_name()) - self.assert_(cont.create()) + self.assertTrue(cont.create()) self.assert_status(201) - self.assert_(cont.create()) + self.assertTrue(cont.create()) self.assert_status(202) def testSlashInName(self): @@ -488,31 +516,31 @@ class TestContainer(Base): cont_name = cont_name.encode('utf-8') cont = self.env.account.container(cont_name) - self.assert_(not cont.create(cfg={'no_path_quote': True}), - 'created container with name %s' % (cont_name)) + self.assertFalse(cont.create(cfg={'no_path_quote': True}), + 'created container with name %s' % (cont_name)) self.assert_status(404) - self.assert_(cont.name not in self.env.account.containers()) + self.assertNotIn(cont.name, self.env.account.containers()) def testDelete(self): cont = self.env.account.container(Utils.create_name()) - self.assert_(cont.create()) + self.assertTrue(cont.create()) self.assert_status(201) - self.assert_(cont.delete()) + self.assertTrue(cont.delete()) self.assert_status(204) - self.assert_(cont.name not in self.env.account.containers()) + self.assertNotIn(cont.name, self.env.account.containers()) def testDeleteOnContainerThatDoesNotExist(self): cont = self.env.account.container(Utils.create_name()) - self.assert_(not cont.delete()) + self.assertFalse(cont.delete()) self.assert_status(404) def testDeleteOnContainerWithFiles(self): cont = self.env.account.container(Utils.create_name()) - self.assert_(cont.create()) + self.assertTrue(cont.create()) file_item = cont.file(Utils.create_name()) file_item.write_random(self.env.file_size) - self.assert_(file_item.name in cont.files()) - self.assert_(not cont.delete()) + self.assertIn(file_item.name, cont.files()) + self.assertFalse(cont.delete()) self.assert_status(409) def testFileCreateInContainerThatDoesNotExist(self): @@ -544,10 +572,10 @@ class TestContainer(Base): files = [x['name'] for x in files] for file_item in self.env.files: - self.assert_(file_item in files) + self.assertIn(file_item, files) for file_item in files: - self.assert_(file_item in self.env.files) + self.assertIn(file_item, self.env.files) def testMarkerLimitFileList(self): for format_type in [None, 'json', 'xml']: @@ -564,11 +592,11 @@ class TestContainer(Base): if isinstance(files[0], dict): files = [x['name'] for x in files] - self.assert_(len(files) <= limit) + self.assertTrue(len(files) <= limit) if files: if isinstance(files[0], dict): files = [x['name'] for x in files] - self.assert_(locale.strcoll(files[0], marker) > 0) + self.assertTrue(locale.strcoll(files[0], marker) > 0) def testFileOrder(self): for format_type in [None, 'json', 'xml']: @@ -597,19 +625,19 @@ class TestContainer(Base): def testTooLongName(self): cont = self.env.account.container('x' * 257) - self.assert_(not cont.create(), - 'created container with name %s' % (cont.name)) + self.assertFalse(cont.create(), + 'created container with name %s' % (cont.name)) self.assert_status(400) def testContainerExistenceCachingProblem(self): cont = self.env.account.container(Utils.create_name()) self.assertRaises(ResponseError, cont.files) - self.assert_(cont.create()) + self.assertTrue(cont.create()) cont.files() cont = self.env.account.container(Utils.create_name()) self.assertRaises(ResponseError, cont.files) - self.assert_(cont.create()) + self.assertTrue(cont.create()) file_item = cont.file(Utils.create_name()) file_item.write_random() @@ -707,7 +735,7 @@ class TestContainerPaths(Base): raise ValueError('too deep recursion') for file_item in self.env.container.files(parms={'path': path}): - self.assert_(file_item.startswith(path)) + self.assertTrue(file_item.startswith(path)) if file_item.endswith('/'): recurse_path(file_item, count + 1) found_dirs.append(file_item) @@ -717,28 +745,28 @@ class TestContainerPaths(Base): recurse_path('') for file_item in self.env.stored_files: if file_item.startswith('/'): - self.assert_(file_item not in found_dirs) - self.assert_(file_item not in found_files) + self.assertNotIn(file_item, found_dirs) + self.assertNotIn(file_item, found_files) elif file_item.endswith('/'): - self.assert_(file_item in found_dirs) - self.assert_(file_item not in found_files) + self.assertIn(file_item, found_dirs) + self.assertNotIn(file_item, found_files) else: - self.assert_(file_item in found_files) - self.assert_(file_item not in found_dirs) + self.assertIn(file_item, found_files) + self.assertNotIn(file_item, found_dirs) found_files = [] found_dirs = [] recurse_path('/') for file_item in self.env.stored_files: if not file_item.startswith('/'): - self.assert_(file_item not in found_dirs) - self.assert_(file_item not in found_files) + self.assertNotIn(file_item, found_dirs) + self.assertNotIn(file_item, found_files) elif file_item.endswith('/'): - self.assert_(file_item in found_dirs) - self.assert_(file_item not in found_files) + self.assertIn(file_item, found_dirs) + self.assertNotIn(file_item, found_files) else: - self.assert_(file_item in found_files) - self.assert_(file_item not in found_dirs) + self.assertIn(file_item, found_files) + self.assertNotIn(file_item, found_dirs) def testContainerListing(self): for format_type in (None, 'json', 'xml'): @@ -752,8 +780,8 @@ class TestContainerPaths(Base): for format_type in ('json', 'xml'): for file_item in self.env.container.files(parms={'format': format_type}): - self.assert_(int(file_item['bytes']) >= 0) - self.assert_('last_modified' in file_item) + self.assertTrue(int(file_item['bytes']) >= 0) + self.assertIn('last_modified', file_item) if file_item['name'].endswith('/'): self.assertEqual(file_item['content_type'], 'application/directory') @@ -852,7 +880,7 @@ class TestFile(Base): file_item.sync_metadata(metadata) dest_cont = self.env.account.container(Utils.create_name()) - self.assert_(dest_cont.create()) + self.assertTrue(dest_cont.create()) # copy both from within and across containers for cont in (self.env.container, dest_cont): @@ -863,13 +891,13 @@ class TestFile(Base): file_item = self.env.container.file(source_filename) file_item.copy('%s%s' % (prefix, cont), dest_filename) - self.assert_(dest_filename in cont.files()) + self.assertIn(dest_filename, cont.files()) file_item = cont.file(dest_filename) - self.assert_(data == file_item.read()) - self.assert_(file_item.initialize()) - self.assert_(metadata == file_item.metadata) + self.assertTrue(data == file_item.read()) + self.assertTrue(file_item.initialize()) + self.assertTrue(metadata == file_item.metadata) def testCopyAccount(self): # makes sure to test encoded characters @@ -882,7 +910,7 @@ class TestFile(Base): file_item.sync_metadata(metadata) dest_cont = self.env.account.container(Utils.create_name()) - self.assert_(dest_cont.create()) + self.assertTrue(dest_cont.create()) acct = self.env.conn.account_name # copy both from within and across containers @@ -896,16 +924,16 @@ class TestFile(Base): '%s%s' % (prefix, cont), dest_filename) - self.assert_(dest_filename in cont.files()) + self.assertIn(dest_filename, cont.files()) file_item = cont.file(dest_filename) - self.assert_(data == file_item.read()) - self.assert_(file_item.initialize()) - self.assert_(metadata == file_item.metadata) + self.assertTrue(data == file_item.read()) + self.assertTrue(file_item.initialize()) + self.assertTrue(metadata == file_item.metadata) dest_cont = self.env.account2.container(Utils.create_name()) - self.assert_(dest_cont.create(hdrs={ + self.assertTrue(dest_cont.create(hdrs={ 'X-Container-Write': self.env.conn.user_acl })) @@ -919,13 +947,13 @@ class TestFile(Base): '%s%s' % (prefix, dest_cont), dest_filename) - self.assert_(dest_filename in dest_cont.files()) + self.assertIn(dest_filename, dest_cont.files()) file_item = dest_cont.file(dest_filename) - self.assert_(data == file_item.read()) - self.assert_(file_item.initialize()) - self.assert_(metadata == file_item.metadata) + self.assertTrue(data == file_item.read()) + self.assertTrue(file_item.initialize()) + self.assertTrue(metadata == file_item.metadata) def testCopy404s(self): source_filename = Utils.create_name() @@ -933,37 +961,38 @@ class TestFile(Base): file_item.write_random() dest_cont = self.env.account.container(Utils.create_name()) - self.assert_(dest_cont.create()) + self.assertTrue(dest_cont.create()) for prefix in ('', '/'): # invalid source container source_cont = self.env.account.container(Utils.create_name()) file_item = source_cont.file(source_filename) - self.assert_(not file_item.copy( + self.assertFalse(file_item.copy( '%s%s' % (prefix, self.env.container), Utils.create_name())) self.assert_status(404) - self.assert_(not file_item.copy('%s%s' % (prefix, dest_cont), - Utils.create_name())) + self.assertFalse(file_item.copy('%s%s' % (prefix, dest_cont), + Utils.create_name())) self.assert_status(404) # invalid source object file_item = self.env.container.file(Utils.create_name()) - self.assert_(not file_item.copy( + self.assertFalse(file_item.copy( '%s%s' % (prefix, self.env.container), Utils.create_name())) self.assert_status(404) - self.assert_(not file_item.copy('%s%s' % (prefix, dest_cont), + self.assertFalse(file_item.copy('%s%s' % (prefix, dest_cont), Utils.create_name())) self.assert_status(404) # invalid destination container file_item = self.env.container.file(source_filename) - self.assert_(not file_item.copy( - '%s%s' % (prefix, Utils.create_name()), - Utils.create_name())) + self.assertTrue( + not file_item.copy( + '%s%s' % (prefix, Utils.create_name()), + Utils.create_name())) def testCopyAccount404s(self): acct = self.env.conn.account_name @@ -973,11 +1002,11 @@ class TestFile(Base): file_item.write_random() dest_cont = self.env.account.container(Utils.create_name()) - self.assert_(dest_cont.create(hdrs={ + self.assertTrue(dest_cont.create(hdrs={ 'X-Container-Read': self.env.conn2.user_acl })) dest_cont2 = self.env.account2.container(Utils.create_name()) - self.assert_(dest_cont2.create(hdrs={ + self.assertTrue(dest_cont2.create(hdrs={ 'X-Container-Write': self.env.conn.user_acl, 'X-Container-Read': self.env.conn.user_acl })) @@ -987,7 +1016,7 @@ class TestFile(Base): # invalid source container source_cont = self.env.account.container(Utils.create_name()) file_item = source_cont.file(source_filename) - self.assert_(not file_item.copy_account( + self.assertFalse(file_item.copy_account( acct, '%s%s' % (prefix, self.env.container), Utils.create_name())) @@ -998,7 +1027,7 @@ class TestFile(Base): else: self.assert_status(404) - self.assert_(not file_item.copy_account( + self.assertFalse(file_item.copy_account( acct, '%s%s' % (prefix, cont), Utils.create_name())) @@ -1006,7 +1035,7 @@ class TestFile(Base): # invalid source object file_item = self.env.container.file(Utils.create_name()) - self.assert_(not file_item.copy_account( + self.assertFalse(file_item.copy_account( acct, '%s%s' % (prefix, self.env.container), Utils.create_name())) @@ -1017,7 +1046,7 @@ class TestFile(Base): else: self.assert_status(404) - self.assert_(not file_item.copy_account( + self.assertFalse(file_item.copy_account( acct, '%s%s' % (prefix, cont), Utils.create_name())) @@ -1025,7 +1054,7 @@ class TestFile(Base): # invalid destination container file_item = self.env.container.file(source_filename) - self.assert_(not file_item.copy_account( + self.assertFalse(file_item.copy_account( acct, '%s%s' % (prefix, Utils.create_name()), Utils.create_name())) @@ -1042,9 +1071,9 @@ class TestFile(Base): file_item.write_random() file_item = self.env.container.file(source_filename) - self.assert_(not file_item.copy(Utils.create_name(), - Utils.create_name(), - cfg={'no_destination': True})) + self.assertFalse(file_item.copy(Utils.create_name(), + Utils.create_name(), + cfg={'no_destination': True})) self.assert_status(412) def testCopyDestinationSlashProblems(self): @@ -1053,9 +1082,9 @@ class TestFile(Base): file_item.write_random() # no slash - self.assert_(not file_item.copy(Utils.create_name(), - Utils.create_name(), - cfg={'destination': Utils.create_name()})) + self.assertFalse(file_item.copy(Utils.create_name(), + Utils.create_name(), + cfg={'destination': Utils.create_name()})) self.assert_status(412) def testCopyFromHeader(self): @@ -1070,7 +1099,7 @@ class TestFile(Base): data = file_item.write_random() dest_cont = self.env.account.container(Utils.create_name()) - self.assert_(dest_cont.create()) + self.assertTrue(dest_cont.create()) # copy both from within and across containers for cont in (self.env.container, dest_cont): @@ -1082,18 +1111,18 @@ class TestFile(Base): file_item.write(hdrs={'X-Copy-From': '%s%s/%s' % ( prefix, self.env.container.name, source_filename)}) - self.assert_(dest_filename in cont.files()) + self.assertIn(dest_filename, cont.files()) file_item = cont.file(dest_filename) - self.assert_(data == file_item.read()) - self.assert_(file_item.initialize()) - self.assert_(metadata == file_item.metadata) + self.assertTrue(data == file_item.read()) + self.assertTrue(file_item.initialize()) + self.assertTrue(metadata == file_item.metadata) def testCopyFromAccountHeader(self): acct = self.env.conn.account_name src_cont = self.env.account.container(Utils.create_name()) - self.assert_(src_cont.create(hdrs={ + self.assertTrue(src_cont.create(hdrs={ 'X-Container-Read': self.env.conn2.user_acl })) source_filename = Utils.create_name() @@ -1107,9 +1136,9 @@ class TestFile(Base): data = file_item.write_random() dest_cont = self.env.account.container(Utils.create_name()) - self.assert_(dest_cont.create()) + self.assertTrue(dest_cont.create()) dest_cont2 = self.env.account2.container(Utils.create_name()) - self.assert_(dest_cont2.create(hdrs={ + self.assertTrue(dest_cont2.create(hdrs={ 'X-Container-Write': self.env.conn.user_acl })) @@ -1125,13 +1154,13 @@ class TestFile(Base): src_cont.name, source_filename)}) - self.assert_(dest_filename in cont.files()) + self.assertIn(dest_filename, cont.files()) file_item = cont.file(dest_filename) - self.assert_(data == file_item.read()) - self.assert_(file_item.initialize()) - self.assert_(metadata == file_item.metadata) + self.assertTrue(data == file_item.read()) + self.assertTrue(file_item.initialize()) + self.assertTrue(metadata == file_item.metadata) def testCopyFromHeader404s(self): source_filename = Utils.create_name() @@ -1141,40 +1170,41 @@ class TestFile(Base): for prefix in ('', '/'): # invalid source container file_item = self.env.container.file(Utils.create_name()) + copy_from = ('%s%s/%s' + % (prefix, Utils.create_name(), source_filename)) self.assertRaises(ResponseError, file_item.write, - hdrs={'X-Copy-From': '%s%s/%s' % - (prefix, - Utils.create_name(), source_filename)}) + hdrs={'X-Copy-From': copy_from}) self.assert_status(404) # invalid source object + copy_from = ('%s%s/%s' + % (prefix, self.env.container.name, + Utils.create_name())) file_item = self.env.container.file(Utils.create_name()) self.assertRaises(ResponseError, file_item.write, - hdrs={'X-Copy-From': '%s%s/%s' % - (prefix, - self.env.container.name, Utils.create_name())}) + hdrs={'X-Copy-From': copy_from}) self.assert_status(404) # invalid destination container dest_cont = self.env.account.container(Utils.create_name()) file_item = dest_cont.file(Utils.create_name()) + copy_from = ('%s%s/%s' + % (prefix, self.env.container.name, source_filename)) self.assertRaises(ResponseError, file_item.write, - hdrs={'X-Copy-From': '%s%s/%s' % - (prefix, - self.env.container.name, source_filename)}) + hdrs={'X-Copy-From': copy_from}) self.assert_status(404) def testCopyFromAccountHeader404s(self): acct = self.env.conn2.account_name src_cont = self.env.account2.container(Utils.create_name()) - self.assert_(src_cont.create(hdrs={ + self.assertTrue(src_cont.create(hdrs={ 'X-Container-Read': self.env.conn.user_acl })) source_filename = Utils.create_name() file_item = src_cont.file(source_filename) file_item.write_random() dest_cont = self.env.account.container(Utils.create_name()) - self.assert_(dest_cont.create()) + self.assertTrue(dest_cont.create()) for prefix in ('', '/'): # invalid source container @@ -1217,7 +1247,7 @@ class TestFile(Base): file_item = self.env.container.file('a' * l) if l <= limit: - self.assert_(file_item.write()) + self.assertTrue(file_item.write()) self.assert_status(201) else: self.assertRaises(ResponseError, file_item.write) @@ -1232,16 +1262,16 @@ class TestFile(Base): file_name = Utils.create_name(6) + '?' + Utils.create_name(6) file_item = self.env.container.file(file_name) - self.assert_(file_item.write(cfg={'no_path_quote': True})) - self.assert_(file_name not in self.env.container.files()) - self.assert_(file_name.split('?')[0] in self.env.container.files()) + self.assertTrue(file_item.write(cfg={'no_path_quote': True})) + self.assertNotIn(file_name, self.env.container.files()) + self.assertIn(file_name.split('?')[0], self.env.container.files()) def testDeleteThen404s(self): file_item = self.env.container.file(Utils.create_name()) - self.assert_(file_item.write_random()) + self.assertTrue(file_item.write_random()) self.assert_status(201) - self.assert_(file_item.delete()) + self.assertTrue(file_item.delete()) self.assert_status(204) file_item.metadata = {Utils.create_ascii_name(): Utils.create_name()} @@ -1285,15 +1315,15 @@ class TestFile(Base): file_item.metadata = metadata if i <= number_limit: - self.assert_(file_item.write()) + self.assertTrue(file_item.write()) self.assert_status(201) - self.assert_(file_item.sync_metadata()) + self.assertTrue(file_item.sync_metadata()) self.assert_status((201, 202)) else: self.assertRaises(ResponseError, file_item.write) self.assert_status(400) file_item.metadata = {} - self.assert_(file_item.write()) + self.assertTrue(file_item.write()) self.assert_status(201) file_item.metadata = metadata self.assertRaises(ResponseError, file_item.sync_metadata) @@ -1304,7 +1334,7 @@ class TestFile(Base): 'zip': 'application/zip'} container = self.env.account.container(Utils.create_name()) - self.assert_(container.create()) + self.assertTrue(container.create()) for i in file_types.keys(): file_item = container.file(Utils.create_name() + '.' + i) @@ -1330,8 +1360,9 @@ class TestFile(Base): for i in range(0, file_length, range_size): range_string = 'bytes=%d-%d' % (i, i + range_size - 1) hdrs = {'Range': range_string} - self.assert_(data[i: i + range_size] == file_item.read(hdrs=hdrs), - range_string) + self.assertTrue( + data[i: i + range_size] == file_item.read(hdrs=hdrs), + range_string) range_string = 'bytes=-%d' % (i) hdrs = {'Range': range_string} @@ -1348,8 +1379,9 @@ class TestFile(Base): range_string = 'bytes=%d-' % (i) hdrs = {'Range': range_string} - self.assert_(file_item.read(hdrs=hdrs) == data[i - file_length:], - range_string) + self.assertTrue( + file_item.read(hdrs=hdrs) == data[i - file_length:], + range_string) range_string = 'bytes=%d-%d' % (file_length + 1000, file_length + 2000) hdrs = {'Range': range_string} @@ -1358,20 +1390,21 @@ class TestFile(Base): range_string = 'bytes=%d-%d' % (file_length - 1000, file_length + 2000) hdrs = {'Range': range_string} - self.assert_(file_item.read(hdrs=hdrs) == data[-1000:], range_string) + self.assertTrue( + file_item.read(hdrs=hdrs) == data[-1000:], range_string) hdrs = {'Range': '0-4'} - self.assert_(file_item.read(hdrs=hdrs) == data, range_string) + self.assertTrue(file_item.read(hdrs=hdrs) == data, range_string) # RFC 2616 14.35.1 # "If the entity is shorter than the specified suffix-length, the # entire entity-body is used." range_string = 'bytes=-%d' % (file_length + 10) hdrs = {'Range': range_string} - self.assert_(file_item.read(hdrs=hdrs) == data, range_string) + self.assertTrue(file_item.read(hdrs=hdrs) == data, range_string) def testRangedGetsWithLWSinHeader(self): - #Skip this test until webob 1.2 can tolerate LWS in Range header. + # Skip this test until webob 1.2 can tolerate LWS in Range header. file_length = 10000 file_item = self.env.container.file(Utils.create_name()) data = file_item.write_random(file_length) @@ -1379,7 +1412,7 @@ class TestFile(Base): for r in ('BYTES=0-999', 'bytes = 0-999', 'BYTES = 0 - 999', 'bytes = 0 - 999', 'bytes=0 - 999', 'bytes=0-999 '): - self.assert_(file_item.read(hdrs={'Range': r}) == data[0:1000]) + self.assertTrue(file_item.read(hdrs={'Range': r}) == data[0:1000]) def testFileSizeLimit(self): limit = load_constraint('max_file_size') @@ -1400,8 +1433,8 @@ class TestFile(Base): file_item = self.env.container.file(Utils.create_name()) if i <= limit: - self.assert_(timeout(tsecs, file_item.write, - cfg={'set_content_length': i})) + self.assertTrue(timeout(tsecs, file_item.write, + cfg={'set_content_length': i})) else: self.assertRaises(ResponseError, timeout, tsecs, file_item.write, @@ -1417,9 +1450,9 @@ class TestFile(Base): file_item = self.env.container.file(Utils.create_name()) file_item.write_random(self.env.file_size) - self.assert_(file_item.name in self.env.container.files()) - self.assert_(file_item.delete()) - self.assert_(file_item.name not in self.env.container.files()) + self.assertIn(file_item.name, self.env.container.files()) + self.assertTrue(file_item.delete()) + self.assertNotIn(file_item.name, self.env.container.files()) def testBadHeaders(self): file_length = 100 @@ -1446,15 +1479,16 @@ class TestFile(Base): self.assert_status(501) # bad request types - #for req in ('LICK', 'GETorHEAD_base', 'container_info', - # 'best_response'): + # for req in ('LICK', 'GETorHEAD_base', 'container_info', + # 'best_response'): for req in ('LICK', 'GETorHEAD_base'): self.env.account.conn.make_request(req) self.assert_status(405) # bad range headers - self.assert_(len(file_item.read(hdrs={'Range': 'parsecs=8-12'})) == - file_length) + self.assertTrue( + len(file_item.read(hdrs={'Range': 'parsecs=8-12'})) == + file_length) self.assert_status(200) def testMetadataLengthLimits(self): @@ -1471,14 +1505,14 @@ class TestFile(Base): file_item.metadata = metadata if l[0] <= key_limit and l[1] <= value_limit: - self.assert_(file_item.write()) + self.assertTrue(file_item.write()) self.assert_status(201) - self.assert_(file_item.sync_metadata()) + self.assertTrue(file_item.sync_metadata()) else: self.assertRaises(ResponseError, file_item.write) self.assert_status(400) file_item.metadata = {} - self.assert_(file_item.write()) + self.assertTrue(file_item.write()) self.assert_status(201) file_item.metadata = metadata self.assertRaises(ResponseError, file_item.sync_metadata) @@ -1495,7 +1529,7 @@ class TestFile(Base): file_item = self.env.container.file(Utils.create_name()) data = file_item.write_random() self.assert_status(201) - self.assert_(data == file_item.read()) + self.assertTrue(data == file_item.read()) self.assert_status(200) def testHead(self): @@ -1515,7 +1549,7 @@ class TestFile(Base): self.assertEqual(info['content_length'], self.env.file_size) self.assertEqual(info['etag'], md5) self.assertEqual(info['content_type'], content_type) - self.assert_('last_modified' in info) + self.assertIn('last_modified', info) def testDeleteOfFileThatDoesNotExist(self): # in container that exists @@ -1551,11 +1585,11 @@ class TestFile(Base): metadata[Utils.create_ascii_name()] = Utils.create_name() file_item.metadata = metadata - self.assert_(file_item.sync_metadata()) + self.assertTrue(file_item.sync_metadata()) self.assert_status((201, 202)) file_item = self.env.container.file(file_item.name) - self.assert_(file_item.initialize()) + self.assertTrue(file_item.initialize()) self.assert_status(200) self.assertEqual(file_item.metadata, metadata) @@ -1609,13 +1643,13 @@ class TestFile(Base): file_item.write_random(self.env.file_size) file_item = self.env.container.file(file_item.name) - self.assert_(file_item.initialize()) + self.assertTrue(file_item.initialize()) self.assert_status(200) self.assertEqual(file_item.metadata, metadata) def testSerialization(self): container = self.env.account.container(Utils.create_name()) - self.assert_(container.create()) + self.assertTrue(container.create()) files = [] for i in (0, 1, 10, 100, 1000, 10000): @@ -1657,8 +1691,9 @@ class TestFile(Base): f[format_type] = True found = True - self.assert_(found, 'Unexpected file %s found in ' - '%s listing' % (file_item['name'], format_type)) + self.assertTrue( + found, 'Unexpected file %s found in ' + '%s listing' % (file_item['name'], format_type)) headers = dict(self.env.conn.response.getheaders()) if format_type == 'json': @@ -1670,13 +1705,15 @@ class TestFile(Base): lm_diff = max([f['last_modified'] for f in files]) -\ min([f['last_modified'] for f in files]) - self.assert_(lm_diff < write_time + 1, 'Diff in last ' - 'modified times should be less than time to write files') + self.assertTrue( + lm_diff < write_time + 1, 'Diff in last ' + 'modified times should be less than time to write files') for f in files: for format_type in ['json', 'xml']: - self.assert_(f[format_type], 'File %s not found in %s listing' - % (f['name'], format_type)) + self.assertTrue( + f[format_type], 'File %s not found in %s listing' + % (f['name'], format_type)) def testStackedOverwrite(self): file_item = self.env.container.file(Utils.create_name()) @@ -1685,7 +1722,7 @@ class TestFile(Base): data = file_item.write_random(512) file_item.write(data) - self.assert_(file_item.read() == data) + self.assertTrue(file_item.read() == data) def testTooLongName(self): file_item = self.env.container.file('x' * 1025) @@ -1695,18 +1732,18 @@ class TestFile(Base): def testZeroByteFile(self): file_item = self.env.container.file(Utils.create_name()) - self.assert_(file_item.write('')) - self.assert_(file_item.name in self.env.container.files()) - self.assert_(file_item.read() == '') + self.assertTrue(file_item.write('')) + self.assertIn(file_item.name, self.env.container.files()) + self.assertTrue(file_item.read() == '') def testEtagResponse(self): file_item = self.env.container.file(Utils.create_name()) - data = StringIO.StringIO(file_item.write_random(512)) + data = six.StringIO(file_item.write_random(512)) etag = File.compute_md5sum(data) headers = dict(self.env.conn.response.getheaders()) - self.assert_('etag' in headers.keys()) + self.assertIn('etag', headers.keys()) header_etag = headers['etag'].strip('"') self.assertEqual(etag, header_etag) @@ -1731,8 +1768,8 @@ class TestFile(Base): for j in chunks(data, i): file_item.chunked_write(j) - self.assert_(file_item.chunked_write()) - self.assert_(data == file_item.read()) + self.assertTrue(file_item.chunked_write()) + self.assertTrue(data == file_item.read()) info = file_item.info() self.assertEqual(etag, info['etag']) @@ -1850,7 +1887,7 @@ class TestDlo(Base): file_contents, "aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffff") # The copied object must not have X-Object-Manifest - self.assertTrue("x_object_manifest" not in file_item.info()) + self.assertNotIn("x_object_manifest", file_item.info()) def test_copy_account(self): # dlo use same account and same container only @@ -1876,7 +1913,7 @@ class TestDlo(Base): file_contents, "aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffff") # The copied object must not have X-Object-Manifest - self.assertTrue("x_object_manifest" not in file_item.info()) + self.assertNotIn("x_object_manifest", file_item.info()) def test_copy_manifest(self): # Copying the manifest with multipart-manifest=get query string @@ -1988,7 +2025,7 @@ class TestFileComparison(Base): def testIfMatch(self): for file_item in self.env.files: hdrs = {'If-Match': file_item.md5} - self.assert_(file_item.read(hdrs=hdrs)) + self.assertTrue(file_item.read(hdrs=hdrs)) hdrs = {'If-Match': 'bogus'} self.assertRaises(ResponseError, file_item.read, hdrs=hdrs) @@ -1997,7 +2034,7 @@ class TestFileComparison(Base): def testIfNoneMatch(self): for file_item in self.env.files: hdrs = {'If-None-Match': 'bogus'} - self.assert_(file_item.read(hdrs=hdrs)) + self.assertTrue(file_item.read(hdrs=hdrs)) hdrs = {'If-None-Match': file_item.md5} self.assertRaises(ResponseError, file_item.read, hdrs=hdrs) @@ -2006,8 +2043,8 @@ class TestFileComparison(Base): def testIfModifiedSince(self): for file_item in self.env.files: hdrs = {'If-Modified-Since': self.env.time_old_f1} - self.assert_(file_item.read(hdrs=hdrs)) - self.assert_(file_item.info(hdrs=hdrs)) + self.assertTrue(file_item.read(hdrs=hdrs)) + self.assertTrue(file_item.info(hdrs=hdrs)) hdrs = {'If-Modified-Since': self.env.time_new} self.assertRaises(ResponseError, file_item.read, hdrs=hdrs) @@ -2018,8 +2055,8 @@ class TestFileComparison(Base): def testIfUnmodifiedSince(self): for file_item in self.env.files: hdrs = {'If-Unmodified-Since': self.env.time_new} - self.assert_(file_item.read(hdrs=hdrs)) - self.assert_(file_item.info(hdrs=hdrs)) + self.assertTrue(file_item.read(hdrs=hdrs)) + self.assertTrue(file_item.info(hdrs=hdrs)) hdrs = {'If-Unmodified-Since': self.env.time_old_f2} self.assertRaises(ResponseError, file_item.read, hdrs=hdrs) @@ -2031,7 +2068,7 @@ class TestFileComparison(Base): for file_item in self.env.files: hdrs = {'If-Match': file_item.md5, 'If-Unmodified-Since': self.env.time_new} - self.assert_(file_item.read(hdrs=hdrs)) + self.assertTrue(file_item.read(hdrs=hdrs)) hdrs = {'If-Match': 'bogus', 'If-Unmodified-Since': self.env.time_new} @@ -2054,7 +2091,7 @@ class TestFileComparison(Base): file = self.env.container.file(file_name) info = file.info() - self.assert_('last_modified' in info) + self.assertIn('last_modified', info) last_modified = info['last_modified'] self.assertEqual(put_last_modified, info['last_modified']) @@ -2063,7 +2100,7 @@ class TestFileComparison(Base): self.assert_status(304) hdrs = {'If-Unmodified-Since': last_modified} - self.assert_(file.read(hdrs=hdrs)) + self.assertTrue(file.read(hdrs=hdrs)) class TestFileComparisonUTF8(Base2, TestFileComparison): @@ -2356,7 +2393,7 @@ class TestSlo(Base): # copy to different account acct = self.env.conn2.account_name dest_cont = self.env.account2.container(Utils.create_name()) - self.assert_(dest_cont.create(hdrs={ + self.assertTrue(dest_cont.create(hdrs={ 'X-Container-Write': self.env.conn.user_acl })) file_item = self.env.container.file("manifest-abcde") @@ -2397,7 +2434,7 @@ class TestSlo(Base): # different account acct = self.env.conn2.account_name dest_cont = self.env.account2.container(Utils.create_name()) - self.assert_(dest_cont.create(hdrs={ + self.assertTrue(dest_cont.create(hdrs={ 'X-Container-Write': self.env.conn.user_acl })) file_item.copy_account(acct, @@ -2413,8 +2450,6 @@ class TestSlo(Base): self.fail("COPY didn't copy the manifest (invalid json on GET)") def _make_manifest(self): - # To avoid the bug 1453807 on fast-post, make a new manifest - # for post test. file_item = self.env.container.file("manifest-post") seg_info = self.env.seg_info file_item.write( @@ -2436,6 +2471,7 @@ class TestSlo(Base): updated = self.env.container.file("manifest-post") updated.info() updated.header_fields([('user-meta', 'x-object-meta-post')]) # sanity + updated.header_fields([('slo', 'x-static-large-object')]) updated_contents = updated.read(parms={'multipart-manifest': 'get'}) try: json.loads(updated_contents) @@ -2456,6 +2492,7 @@ class TestSlo(Base): updated.info() updated.header_fields( [('user-meta', 'x-object-meta-post')]) # sanity + updated.header_fields([('slo', 'x-static-large-object')]) updated_contents = updated.read( parms={'multipart-manifest': 'get'}) try: @@ -2561,7 +2598,7 @@ class TestObjectVersioningEnv(object): @classmethod def setUp(cls): cls.conn = Connection(tf.config) - cls.conn.authenticate() + cls.storage_url, cls.storage_token = cls.conn.authenticate() cls.account = Account(cls.conn, tf.config.get('account', tf.config['username'])) @@ -2591,6 +2628,30 @@ class TestObjectVersioningEnv(object): # if versioning is off, then X-Versions-Location won't persist cls.versioning_enabled = 'versions' in container_info + # setup another account to test ACLs + config2 = deepcopy(tf.config) + config2['account'] = tf.config['account2'] + config2['username'] = tf.config['username2'] + config2['password'] = tf.config['password2'] + cls.conn2 = Connection(config2) + cls.storage_url2, cls.storage_token2 = cls.conn2.authenticate() + cls.account2 = cls.conn2.get_account() + cls.account2.delete_containers() + + # setup another account with no access to anything to test ACLs + config3 = deepcopy(tf.config) + config3['account'] = tf.config['account'] + config3['username'] = tf.config['username3'] + config3['password'] = tf.config['password3'] + cls.conn3 = Connection(config3) + cls.storage_url3, cls.storage_token3 = cls.conn3.authenticate() + cls.account3 = cls.conn3.get_account() + + @classmethod + def tearDown(cls): + cls.account.delete_containers() + cls.account2.delete_containers() + class TestCrossPolicyObjectVersioningEnv(object): # tri-state: None initially, then True/False @@ -2613,14 +2674,14 @@ class TestCrossPolicyObjectVersioningEnv(object): cls.multiple_policies_enabled = True else: cls.multiple_policies_enabled = False - # We have to lie here that versioning is enabled. We actually - # don't know, but it does not matter. We know these tests cannot - # run without multiple policies present. If multiple policies are - # present, we won't be setting this field to any value, so it - # should all still work. - cls.versioning_enabled = True + cls.versioning_enabled = False return + if cls.versioning_enabled is None: + cls.versioning_enabled = 'versioned_writes' in cluster_info + if not cls.versioning_enabled: + return + policy = cls.policies.select() version_policy = cls.policies.exclude(name=policy['name']).select() @@ -2654,6 +2715,25 @@ class TestCrossPolicyObjectVersioningEnv(object): # if versioning is off, then X-Versions-Location won't persist cls.versioning_enabled = 'versions' in container_info + # setup another account to test ACLs + config2 = deepcopy(tf.config) + config2['account'] = tf.config['account2'] + config2['username'] = tf.config['username2'] + config2['password'] = tf.config['password2'] + cls.conn2 = Connection(config2) + cls.storage_url2, cls.storage_token2 = cls.conn2.authenticate() + cls.account2 = cls.conn2.get_account() + cls.account2.delete_containers() + + # setup another account with no access to anything to test ACLs + config3 = deepcopy(tf.config) + config3['account'] = tf.config['account'] + config3['username'] = tf.config['username3'] + config3['password'] = tf.config['password3'] + cls.conn3 = Connection(config3) + cls.storage_url3, cls.storage_token3 = cls.conn3.authenticate() + cls.account3 = cls.conn3.get_account() + class TestObjectVersioning(Base): env = TestObjectVersioningEnv @@ -2672,40 +2752,103 @@ class TestObjectVersioning(Base): def tearDown(self): super(TestObjectVersioning, self).tearDown() try: - # delete versions first! + # only delete files and not container + # as they were configured in self.env self.env.versions_container.delete_files() self.env.container.delete_files() except ResponseError: pass + def test_clear_version_option(self): + # sanity + self.assertEqual(self.env.container.info()['versions'], + self.env.versions_container.name) + self.env.container.update_metadata( + hdrs={'X-Versions-Location': ''}) + self.assertEqual(self.env.container.info().get('versions'), None) + + # set location back to the way it was + self.env.container.update_metadata( + hdrs={'X-Versions-Location': self.env.versions_container.name}) + self.assertEqual(self.env.container.info()['versions'], + self.env.versions_container.name) + def test_overwriting(self): container = self.env.container versions_container = self.env.versions_container + cont_info = container.info() + self.assertEquals(cont_info['versions'], versions_container.name) + obj_name = Utils.create_name() versioned_obj = container.file(obj_name) - versioned_obj.write("aaaaa") + versioned_obj.write("aaaaa", hdrs={'Content-Type': 'text/jibberish01'}) + obj_info = versioned_obj.info() + self.assertEqual('text/jibberish01', obj_info['content_type']) self.assertEqual(0, versions_container.info()['object_count']) - - versioned_obj.write("bbbbb") + versioned_obj.write("bbbbb", hdrs={'Content-Type': 'text/jibberish02', + 'X-Object-Meta-Foo': 'Bar'}) + versioned_obj.initialize() + self.assertEqual(versioned_obj.content_type, 'text/jibberish02') + self.assertEqual(versioned_obj.metadata['foo'], 'Bar') # the old version got saved off self.assertEqual(1, versions_container.info()['object_count']) versioned_obj_name = versions_container.files()[0] - self.assertEqual( - "aaaaa", versions_container.file(versioned_obj_name).read()) + prev_version = versions_container.file(versioned_obj_name) + prev_version.initialize() + self.assertEqual("aaaaa", prev_version.read()) + self.assertEqual(prev_version.content_type, 'text/jibberish01') + + # make sure the new obj metadata did not leak to the prev. version + self.assertTrue('foo' not in prev_version.metadata) + + # check that POST does not create a new version + versioned_obj.sync_metadata(metadata={'fu': 'baz'}) + self.assertEqual(1, versions_container.info()['object_count']) # if we overwrite it again, there are two versions versioned_obj.write("ccccc") self.assertEqual(2, versions_container.info()['object_count']) + versioned_obj_name = versions_container.files()[1] + prev_version = versions_container.file(versioned_obj_name) + prev_version.initialize() + self.assertEqual("bbbbb", prev_version.read()) + self.assertEqual(prev_version.content_type, 'text/jibberish02') + self.assertTrue('foo' in prev_version.metadata) + self.assertTrue('fu' in prev_version.metadata) # as we delete things, the old contents return self.assertEqual("ccccc", versioned_obj.read()) + + # test copy from a different container + src_container = self.env.account.container(Utils.create_name()) + self.assertTrue(src_container.create()) + src_name = Utils.create_name() + src_obj = src_container.file(src_name) + src_obj.write("ddddd", hdrs={'Content-Type': 'text/jibberish04'}) + src_obj.copy(container.name, obj_name) + + self.assertEqual("ddddd", versioned_obj.read()) + versioned_obj.initialize() + self.assertEqual(versioned_obj.content_type, 'text/jibberish04') + + # make sure versions container has the previous version + self.assertEqual(3, versions_container.info()['object_count']) + versioned_obj_name = versions_container.files()[2] + prev_version = versions_container.file(versioned_obj_name) + prev_version.initialize() + self.assertEqual("ccccc", prev_version.read()) + + # test delete + versioned_obj.delete() + self.assertEqual("ccccc", versioned_obj.read()) versioned_obj.delete() self.assertEqual("bbbbb", versioned_obj.read()) versioned_obj.delete() self.assertEqual("aaaaa", versioned_obj.read()) + self.assertEqual(0, versions_container.info()['object_count']) versioned_obj.delete() self.assertRaises(ResponseError, versioned_obj.read) @@ -2737,6 +2880,87 @@ class TestObjectVersioning(Base): self.assertEqual(3, versions_container.info()['object_count']) self.assertEqual("112233", man_file.read()) + def test_versioning_container_acl(self): + # create versions container and DO NOT give write access to account2 + versions_container = self.env.account.container(Utils.create_name()) + self.assertTrue(versions_container.create(hdrs={ + 'X-Container-Write': '' + })) + + # check account2 cannot write to versions container + fail_obj_name = Utils.create_name() + fail_obj = versions_container.file(fail_obj_name) + self.assertRaises(ResponseError, fail_obj.write, "should fail", + cfg={'use_token': self.env.storage_token2}) + + # create container and give write access to account2 + # don't set X-Versions-Location just yet + container = self.env.account.container(Utils.create_name()) + self.assertTrue(container.create(hdrs={ + 'X-Container-Write': self.env.conn2.user_acl})) + + # check account2 cannot set X-Versions-Location on container + self.assertRaises(ResponseError, container.update_metadata, hdrs={ + 'X-Versions-Location': versions_container}, + cfg={'use_token': self.env.storage_token2}) + + # good! now let admin set the X-Versions-Location + # p.s.: sticking a 'x-remove' header here to test precedence + # of both headers. Setting the location should succeed. + self.assertTrue(container.update_metadata(hdrs={ + 'X-Remove-Versions-Location': versions_container, + 'X-Versions-Location': versions_container})) + + # write object twice to container and check version + obj_name = Utils.create_name() + versioned_obj = container.file(obj_name) + self.assertTrue(versioned_obj.write("never argue with the data", + cfg={'use_token': self.env.storage_token2})) + self.assertEqual(versioned_obj.read(), "never argue with the data") + + self.assertTrue( + versioned_obj.write("we don't have no beer, just tequila", + cfg={'use_token': self.env.storage_token2})) + self.assertEqual(versioned_obj.read(), + "we don't have no beer, just tequila") + self.assertEqual(1, versions_container.info()['object_count']) + + # read the original uploaded object + for filename in versions_container.files(): + backup_file = versions_container.file(filename) + break + self.assertEqual(backup_file.read(), "never argue with the data") + + # user3 (some random user with no access to anything) + # tries to read from versioned container + self.assertRaises(ResponseError, backup_file.read, + cfg={'use_token': self.env.storage_token3}) + + # user3 cannot write or delete from source container either + self.assertRaises(ResponseError, versioned_obj.write, + "some random user trying to write data", + cfg={'use_token': self.env.storage_token3}) + self.assertRaises(ResponseError, versioned_obj.delete, + cfg={'use_token': self.env.storage_token3}) + + # user2 can't read or delete from versions-location + self.assertRaises(ResponseError, backup_file.read, + cfg={'use_token': self.env.storage_token2}) + self.assertRaises(ResponseError, backup_file.delete, + cfg={'use_token': self.env.storage_token2}) + + # but is able to delete from the source container + # this could be a helpful scenario for dev ops that want to setup + # just one container to hold object versions of multiple containers + # and each one of those containers are owned by different users + self.assertTrue(versioned_obj.delete( + cfg={'use_token': self.env.storage_token2})) + + # tear-down since we create these containers here + # and not in self.env + versions_container.delete_recursive() + container.delete_recursive() + def test_versioning_check_acl(self): container = self.env.container versions_container = self.env.versions_container @@ -2852,8 +3076,8 @@ class TestTempurl(Base): self.assertEqual(contents, "obj contents") # GET tempurls also allow HEAD requests - self.assert_(self.env.obj.info(parms=self.obj_tempurl_parms, - cfg={'no_auth_token': True})) + self.assertTrue(self.env.obj.info(parms=self.obj_tempurl_parms, + cfg={'no_auth_token': True})) def test_GET_with_key_2(self): expires = int(time.time()) + 86400 @@ -2866,6 +3090,59 @@ class TestTempurl(Base): contents = self.env.obj.read(parms=parms, cfg={'no_auth_token': True}) self.assertEqual(contents, "obj contents") + def test_GET_DLO_inside_container(self): + seg1 = self.env.container.file( + "get-dlo-inside-seg1" + Utils.create_name()) + seg2 = self.env.container.file( + "get-dlo-inside-seg2" + Utils.create_name()) + seg1.write("one fish two fish ") + seg2.write("red fish blue fish") + + manifest = self.env.container.file("manifest" + Utils.create_name()) + manifest.write( + '', + hdrs={"X-Object-Manifest": "%s/get-dlo-inside-seg" % + (self.env.container.name,)}) + + expires = int(time.time()) + 86400 + sig = self.tempurl_sig( + 'GET', expires, self.env.conn.make_path(manifest.path), + self.env.tempurl_key) + parms = {'temp_url_sig': sig, + 'temp_url_expires': str(expires)} + + contents = manifest.read(parms=parms, cfg={'no_auth_token': True}) + self.assertEqual(contents, "one fish two fish red fish blue fish") + + def test_GET_DLO_outside_container(self): + seg1 = self.env.container.file( + "get-dlo-outside-seg1" + Utils.create_name()) + seg2 = self.env.container.file( + "get-dlo-outside-seg2" + Utils.create_name()) + seg1.write("one fish two fish ") + seg2.write("red fish blue fish") + + container2 = self.env.account.container(Utils.create_name()) + container2.create() + + manifest = container2.file("manifest" + Utils.create_name()) + manifest.write( + '', + hdrs={"X-Object-Manifest": "%s/get-dlo-outside-seg" % + (self.env.container.name,)}) + + expires = int(time.time()) + 86400 + sig = self.tempurl_sig( + 'GET', expires, self.env.conn.make_path(manifest.path), + self.env.tempurl_key) + parms = {'temp_url_sig': sig, + 'temp_url_expires': str(expires)} + + # cross container tempurl works fine for account tempurl key + contents = manifest.read(parms=parms, cfg={'no_auth_token': True}) + self.assertEqual(contents, "one fish two fish red fish blue fish") + self.assert_status([200]) + def test_PUT(self): new_obj = self.env.container.file(Utils.create_name()) @@ -2881,8 +3158,60 @@ class TestTempurl(Base): self.assertEqual(new_obj.read(), "new obj contents") # PUT tempurls also allow HEAD requests - self.assert_(new_obj.info(parms=put_parms, - cfg={'no_auth_token': True})) + self.assertTrue(new_obj.info(parms=put_parms, + cfg={'no_auth_token': True})) + + def test_PUT_manifest_access(self): + new_obj = self.env.container.file(Utils.create_name()) + + # give out a signature which allows a PUT to new_obj + expires = int(time.time()) + 86400 + sig = self.tempurl_sig( + 'PUT', expires, self.env.conn.make_path(new_obj.path), + self.env.tempurl_key) + put_parms = {'temp_url_sig': sig, + 'temp_url_expires': str(expires)} + + # try to create manifest pointing to some random container + try: + new_obj.write('', { + 'x-object-manifest': '%s/foo' % 'some_random_container' + }, parms=put_parms, cfg={'no_auth_token': True}) + except ResponseError as e: + self.assertEqual(e.status, 400) + else: + self.fail('request did not error') + + # create some other container + other_container = self.env.account.container(Utils.create_name()) + if not other_container.create(): + raise ResponseError(self.conn.response) + + # try to create manifest pointing to new container + try: + new_obj.write('', { + 'x-object-manifest': '%s/foo' % other_container + }, parms=put_parms, cfg={'no_auth_token': True}) + except ResponseError as e: + self.assertEqual(e.status, 400) + else: + self.fail('request did not error') + + # try again using a tempurl POST to an already created object + new_obj.write('', {}, parms=put_parms, cfg={'no_auth_token': True}) + expires = int(time.time()) + 86400 + sig = self.tempurl_sig( + 'POST', expires, self.env.conn.make_path(new_obj.path), + self.env.tempurl_key) + post_parms = {'temp_url_sig': sig, + 'temp_url_expires': str(expires)} + try: + new_obj.post({'x-object-manifest': '%s/foo' % other_container}, + parms=post_parms, cfg={'no_auth_token': True}) + except ResponseError as e: + self.assertEqual(e.status, 400) + else: + self.fail('request did not error') def test_HEAD(self): expires = int(time.time()) + 86400 @@ -2892,8 +3221,8 @@ class TestTempurl(Base): head_parms = {'temp_url_sig': sig, 'temp_url_expires': str(expires)} - self.assert_(self.env.obj.info(parms=head_parms, - cfg={'no_auth_token': True})) + self.assertTrue(self.env.obj.info(parms=head_parms, + cfg={'no_auth_token': True})) # HEAD tempurls don't allow PUT or GET requests, despite the fact that # PUT and GET tempurls both allow HEAD requests self.assertRaises(ResponseError, self.env.other_obj.read, @@ -3036,8 +3365,8 @@ class TestContainerTempurl(Base): self.assertEqual(contents, "obj contents") # GET tempurls also allow HEAD requests - self.assert_(self.env.obj.info(parms=self.obj_tempurl_parms, - cfg={'no_auth_token': True})) + self.assertTrue(self.env.obj.info(parms=self.obj_tempurl_parms, + cfg={'no_auth_token': True})) def test_GET_with_key_2(self): expires = int(time.time()) + 86400 @@ -3065,8 +3394,8 @@ class TestContainerTempurl(Base): self.assertEqual(new_obj.read(), "new obj contents") # PUT tempurls also allow HEAD requests - self.assert_(new_obj.info(parms=put_parms, - cfg={'no_auth_token': True})) + self.assertTrue(new_obj.info(parms=put_parms, + cfg={'no_auth_token': True})) def test_HEAD(self): expires = int(time.time()) + 86400 @@ -3076,8 +3405,8 @@ class TestContainerTempurl(Base): head_parms = {'temp_url_sig': sig, 'temp_url_expires': str(expires)} - self.assert_(self.env.obj.info(parms=head_parms, - cfg={'no_auth_token': True})) + self.assertTrue(self.env.obj.info(parms=head_parms, + cfg={'no_auth_token': True})) # HEAD tempurls don't allow PUT or GET requests, despite the fact that # PUT and GET tempurls both allow HEAD requests self.assertRaises(ResponseError, self.env.other_obj.read, @@ -3153,12 +3482,75 @@ class TestContainerTempurl(Base): metadata = self.env.container.info() self.env.container.conn.storage_token = original_token - self.assertTrue('tempurl_key' not in metadata, - 'Container TempURL key found, should not be visible ' - 'to readonly ACLs') - self.assertTrue('tempurl_key2' not in metadata, - 'Container TempURL key-2 found, should not be visible ' - 'to readonly ACLs') + self.assertNotIn( + 'tempurl_key', metadata, + 'Container TempURL key found, should not be visible ' + 'to readonly ACLs') + self.assertNotIn( + 'tempurl_key2', metadata, + 'Container TempURL key-2 found, should not be visible ' + 'to readonly ACLs') + + def test_GET_DLO_inside_container(self): + seg1 = self.env.container.file( + "get-dlo-inside-seg1" + Utils.create_name()) + seg2 = self.env.container.file( + "get-dlo-inside-seg2" + Utils.create_name()) + seg1.write("one fish two fish ") + seg2.write("red fish blue fish") + + manifest = self.env.container.file("manifest" + Utils.create_name()) + manifest.write( + '', + hdrs={"X-Object-Manifest": "%s/get-dlo-inside-seg" % + (self.env.container.name,)}) + + expires = int(time.time()) + 86400 + sig = self.tempurl_sig( + 'GET', expires, self.env.conn.make_path(manifest.path), + self.env.tempurl_key) + parms = {'temp_url_sig': sig, + 'temp_url_expires': str(expires)} + + contents = manifest.read(parms=parms, cfg={'no_auth_token': True}) + self.assertEqual(contents, "one fish two fish red fish blue fish") + + def test_GET_DLO_outside_container(self): + container2 = self.env.account.container(Utils.create_name()) + container2.create() + seg1 = container2.file( + "get-dlo-outside-seg1" + Utils.create_name()) + seg2 = container2.file( + "get-dlo-outside-seg2" + Utils.create_name()) + seg1.write("one fish two fish ") + seg2.write("red fish blue fish") + + manifest = self.env.container.file("manifest" + Utils.create_name()) + manifest.write( + '', + hdrs={"X-Object-Manifest": "%s/get-dlo-outside-seg" % + (container2.name,)}) + + expires = int(time.time()) + 86400 + sig = self.tempurl_sig( + 'GET', expires, self.env.conn.make_path(manifest.path), + self.env.tempurl_key) + parms = {'temp_url_sig': sig, + 'temp_url_expires': str(expires)} + + # cross container tempurl does not work for container tempurl key + try: + manifest.read(parms=parms, cfg={'no_auth_token': True}) + except ResponseError as e: + self.assertEqual(e.status, 401) + else: + self.fail('request did not error') + try: + manifest.info(parms=parms, cfg={'no_auth_token': True}) + except ResponseError as e: + self.assertEqual(e.status, 401) + else: + self.fail('request did not error') class TestContainerTempurlUTF8(Base2, TestContainerTempurl): @@ -3244,7 +3636,7 @@ class TestSloTempurl(Base): self.assertEqual(len(contents), 2 * 1024 * 1024) # GET tempurls also allow HEAD requests - self.assert_(self.env.manifest.info( + self.assertTrue(self.env.manifest.info( parms=parms, cfg={'no_auth_token': True})) @@ -3351,8 +3743,6 @@ class TestServiceToken(unittest.TestCase): headers = {} if self.body: headers.update({'Content-Length': len(self.body)}) - if self.headers: - headers.update(self.headers) if self.x_auth_token == self.SET_TO_USERS_TOKEN: headers.update({'X-Auth-Token': token}) elif self.x_auth_token == self.SET_TO_SERVICE_TOKEN: @@ -3385,7 +3775,7 @@ class TestServiceToken(unittest.TestCase): self.prepare_request('HEAD') resp = retry(self.do_request) resp.read() - self.assert_(resp.status in (200, 204), resp.status) + self.assertIn(resp.status, (200, 204)) def test_user_cannot_access_service_account(self): for method, container, obj in self._scenario_generator(): diff --git a/test/probe/brain.py b/test/probe/brain.py index 791d974b56..9ec907c0a2 100644 --- a/test/probe/brain.py +++ b/test/probe/brain.py @@ -11,7 +11,7 @@ # implied. # See the License for the specific language governing permissions and # limitations under the License. - +from __future__ import print_function import sys import itertools import uuid @@ -19,6 +19,8 @@ from optparse import OptionParser from urlparse import urlparse import random +import six + from swift.common.manager import Manager from swift.common import utils, ring from swift.common.storage_policy import POLICIES @@ -62,10 +64,8 @@ def command(f): return f +@six.add_metaclass(meta_command) class BrainSplitter(object): - - __metaclass__ = meta_command - def __init__(self, url, token, container_name='test', object_name='test', server_type='container', policy=None): self.url = url @@ -226,8 +226,8 @@ def main(): try: brain.run(command, *args) except ClientException as e: - print '**WARNING**: %s raised %s' % (command, e) - print 'STATUS'.join(['*' * 25] * 2) + print('**WARNING**: %s raised %s' % (command, e)) + print('STATUS'.join(['*' * 25] * 2)) brain.servers.status() sys.exit() diff --git a/test/probe/common.py b/test/probe/common.py index 093194aed9..45a907444d 100644 --- a/test/probe/common.py +++ b/test/probe/common.py @@ -13,17 +13,20 @@ # See the License for the specific language governing permissions and # limitations under the License. -from httplib import HTTPConnection +from __future__ import print_function import os from subprocess import Popen, PIPE import sys from time import sleep, time from collections import defaultdict import unittest +from hashlib import md5 +from uuid import uuid4 from nose import SkipTest -from swiftclient import get_auth, head_account +from six.moves.http_client import HTTPConnection +from swiftclient import get_auth, head_account from swift.obj.diskfile import get_data_dir from swift.common.ring import Ring from swift.common.utils import readconf, renamer @@ -85,9 +88,9 @@ def check_server(ipport, ipport2server, pids, timeout=CHECK_SERVER_TIMEOUT): break except Exception as err: if time() > try_until: - print err - print 'Giving up on %s:%s after %s seconds.' % ( - server, ipport, timeout) + print(err) + print('Giving up on %s:%s after %s seconds.' % ( + server, ipport, timeout)) raise err sleep(0.1) else: @@ -101,8 +104,8 @@ def check_server(ipport, ipport2server, pids, timeout=CHECK_SERVER_TIMEOUT): return url, token, account except Exception as err: if time() > try_until: - print err - print 'Giving up on proxy:8080 after 30 seconds.' + print(err) + print('Giving up on proxy:8080 after 30 seconds.') raise err sleep(0.1) return None @@ -254,16 +257,63 @@ def get_policy(**kwargs): raise SkipTest('No policy matching %s' % kwargs) +def resetswift(): + p = Popen("resetswift 2>&1", shell=True, stdout=PIPE) + stdout, _stderr = p.communicate() + print(stdout) + Manager(['all']).stop() + + +class Body(object): + + def __init__(self, total=3.5 * 2 ** 20): + self.length = total + self.hasher = md5() + self.read_amount = 0 + self.chunk = uuid4().hex * 2 ** 10 + self.buff = '' + + @property + def etag(self): + return self.hasher.hexdigest() + + def __len__(self): + return self.length + + def read(self, amount): + if len(self.buff) < amount: + try: + self.buff += next(self) + except StopIteration: + pass + rv, self.buff = self.buff[:amount], self.buff[amount:] + return rv + + def __iter__(self): + return self + + def next(self): + if self.buff: + rv, self.buff = self.buff, '' + return rv + if self.read_amount >= self.length: + raise StopIteration() + rv = self.chunk[:int(self.length - self.read_amount)] + self.read_amount += len(rv) + self.hasher.update(rv) + return rv + + def __next__(self): + return next(self) + + class ProbeTest(unittest.TestCase): """ Don't instantiate this directly, use a child class instead. """ def setUp(self): - p = Popen("resetswift 2>&1", shell=True, stdout=PIPE) - stdout, _stderr = p.communicate() - print stdout - Manager(['all']).stop() + resetswift() self.pids = {} try: self.ipport2server = {} @@ -402,11 +452,11 @@ if __name__ == "__main__": force_validate=True) except SkipTest as err: sys.exit('%s ERROR: %s' % (server, err)) - print '%s OK' % server + print('%s OK' % server) for policy in POLICIES: try: get_ring(policy.ring_name, 3, 4, server='object', force_validate=True) except SkipTest as err: sys.exit('object ERROR (%s): %s' % (policy.name, err)) - print 'object OK (%s)' % policy.name + print('object OK (%s)' % policy.name) diff --git a/test/probe/test_account_failures.py b/test/probe/test_account_failures.py index 783d3da9b8..4d6b1496b9 100755 --- a/test/probe/test_account_failures.py +++ b/test/probe/test_account_failures.py @@ -35,66 +35,66 @@ class TestAccountFailures(ReplProbeTest): # Assert account level sees them headers, containers = client.get_account(self.url, self.token) - self.assertEquals(headers['x-account-container-count'], '2') - self.assertEquals(headers['x-account-object-count'], '0') - self.assertEquals(headers['x-account-bytes-used'], '0') + self.assertEqual(headers['x-account-container-count'], '2') + self.assertEqual(headers['x-account-object-count'], '0') + self.assertEqual(headers['x-account-bytes-used'], '0') found1 = False found2 = False for container in containers: if container['name'] == container1: found1 = True - self.assertEquals(container['count'], 0) - self.assertEquals(container['bytes'], 0) + self.assertEqual(container['count'], 0) + self.assertEqual(container['bytes'], 0) elif container['name'] == container2: found2 = True - self.assertEquals(container['count'], 0) - self.assertEquals(container['bytes'], 0) - self.assert_(found1) - self.assert_(found2) + self.assertEqual(container['count'], 0) + self.assertEqual(container['bytes'], 0) + self.assertTrue(found1) + self.assertTrue(found2) # Create container2/object1 client.put_object(self.url, self.token, container2, 'object1', '1234') # Assert account level doesn't see it yet headers, containers = client.get_account(self.url, self.token) - self.assertEquals(headers['x-account-container-count'], '2') - self.assertEquals(headers['x-account-object-count'], '0') - self.assertEquals(headers['x-account-bytes-used'], '0') + self.assertEqual(headers['x-account-container-count'], '2') + self.assertEqual(headers['x-account-object-count'], '0') + self.assertEqual(headers['x-account-bytes-used'], '0') found1 = False found2 = False for container in containers: if container['name'] == container1: found1 = True - self.assertEquals(container['count'], 0) - self.assertEquals(container['bytes'], 0) + self.assertEqual(container['count'], 0) + self.assertEqual(container['bytes'], 0) elif container['name'] == container2: found2 = True - self.assertEquals(container['count'], 0) - self.assertEquals(container['bytes'], 0) - self.assert_(found1) - self.assert_(found2) + self.assertEqual(container['count'], 0) + self.assertEqual(container['bytes'], 0) + self.assertTrue(found1) + self.assertTrue(found2) # Get to final state self.get_to_final_state() # Assert account level now sees the container2/object1 headers, containers = client.get_account(self.url, self.token) - self.assertEquals(headers['x-account-container-count'], '2') - self.assertEquals(headers['x-account-object-count'], '1') - self.assertEquals(headers['x-account-bytes-used'], '4') + self.assertEqual(headers['x-account-container-count'], '2') + self.assertEqual(headers['x-account-object-count'], '1') + self.assertEqual(headers['x-account-bytes-used'], '4') found1 = False found2 = False for container in containers: if container['name'] == container1: found1 = True - self.assertEquals(container['count'], 0) - self.assertEquals(container['bytes'], 0) + self.assertEqual(container['count'], 0) + self.assertEqual(container['bytes'], 0) elif container['name'] == container2: found2 = True - self.assertEquals(container['count'], 1) - self.assertEquals(container['bytes'], 4) - self.assert_(found1) - self.assert_(found2) + self.assertEqual(container['count'], 1) + self.assertEqual(container['bytes'], 4) + self.assertTrue(found1) + self.assertTrue(found2) apart, anodes = self.account_ring.get_nodes(self.account) kill_nonprimary_server(anodes, self.ipport2server, self.pids) @@ -111,9 +111,9 @@ class TestAccountFailures(ReplProbeTest): # Assert account level knows container1 is gone but doesn't know about # container2/object2 yet headers, containers = client.get_account(self.url, self.token) - self.assertEquals(headers['x-account-container-count'], '1') - self.assertEquals(headers['x-account-object-count'], '1') - self.assertEquals(headers['x-account-bytes-used'], '4') + self.assertEqual(headers['x-account-container-count'], '1') + self.assertEqual(headers['x-account-object-count'], '1') + self.assertEqual(headers['x-account-bytes-used'], '4') found1 = False found2 = False for container in containers: @@ -121,19 +121,19 @@ class TestAccountFailures(ReplProbeTest): found1 = True elif container['name'] == container2: found2 = True - self.assertEquals(container['count'], 1) - self.assertEquals(container['bytes'], 4) - self.assert_(not found1) - self.assert_(found2) + self.assertEqual(container['count'], 1) + self.assertEqual(container['bytes'], 4) + self.assertFalse(found1) + self.assertTrue(found2) # Run container updaters Manager(['container-updater']).once() # Assert account level now knows about container2/object2 headers, containers = client.get_account(self.url, self.token) - self.assertEquals(headers['x-account-container-count'], '1') - self.assertEquals(headers['x-account-object-count'], '2') - self.assertEquals(headers['x-account-bytes-used'], '9') + self.assertEqual(headers['x-account-container-count'], '1') + self.assertEqual(headers['x-account-object-count'], '2') + self.assertEqual(headers['x-account-bytes-used'], '9') found1 = False found2 = False for container in containers: @@ -141,10 +141,10 @@ class TestAccountFailures(ReplProbeTest): found1 = True elif container['name'] == container2: found2 = True - self.assertEquals(container['count'], 2) - self.assertEquals(container['bytes'], 9) - self.assert_(not found1) - self.assert_(found2) + self.assertEqual(container['count'], 2) + self.assertEqual(container['bytes'], 9) + self.assertFalse(found1) + self.assertTrue(found2) # Restart other primary account server start_server((anodes[0]['ip'], anodes[0]['port']), @@ -154,9 +154,9 @@ class TestAccountFailures(ReplProbeTest): # new container2/object2 yet headers, containers = \ direct_client.direct_get_account(anodes[0], apart, self.account) - self.assertEquals(headers['x-account-container-count'], '2') - self.assertEquals(headers['x-account-object-count'], '1') - self.assertEquals(headers['x-account-bytes-used'], '4') + self.assertEqual(headers['x-account-container-count'], '2') + self.assertEqual(headers['x-account-object-count'], '1') + self.assertEqual(headers['x-account-bytes-used'], '4') found1 = False found2 = False for container in containers: @@ -164,10 +164,10 @@ class TestAccountFailures(ReplProbeTest): found1 = True elif container['name'] == container2: found2 = True - self.assertEquals(container['count'], 1) - self.assertEquals(container['bytes'], 4) - self.assert_(found1) - self.assert_(found2) + self.assertEqual(container['count'], 1) + self.assertEqual(container['bytes'], 4) + self.assertTrue(found1) + self.assertTrue(found2) # Get to final state self.get_to_final_state() @@ -175,9 +175,9 @@ class TestAccountFailures(ReplProbeTest): # Assert that server is now up to date headers, containers = \ direct_client.direct_get_account(anodes[0], apart, self.account) - self.assertEquals(headers['x-account-container-count'], '1') - self.assertEquals(headers['x-account-object-count'], '2') - self.assertEquals(headers['x-account-bytes-used'], '9') + self.assertEqual(headers['x-account-container-count'], '1') + self.assertEqual(headers['x-account-object-count'], '2') + self.assertEqual(headers['x-account-bytes-used'], '9') found1 = False found2 = False for container in containers: @@ -185,10 +185,11 @@ class TestAccountFailures(ReplProbeTest): found1 = True elif container['name'] == container2: found2 = True - self.assertEquals(container['count'], 2) + self.assertEqual(container['count'], 2) + self.assertEqual(container['bytes'], 9) self.assertEquals(container['bytes'], 9) - self.assert_(not found1) - self.assert_(found2) + self.assertFalse(found1) + self.assertTrue(found2) if __name__ == '__main__': diff --git a/test/probe/test_account_get_fake_responses_match.py b/test/probe/test_account_get_fake_responses_match.py index f99b41e780..46ddb16de0 100755 --- a/test/probe/test_account_get_fake_responses_match.py +++ b/test/probe/test_account_get_fake_responses_match.py @@ -14,10 +14,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import httplib import re import unittest +from six.moves import http_client from swiftclient import get_auth from test.probe.common import ReplProbeTest from urlparse import urlparse @@ -49,7 +49,7 @@ class TestAccountGetFakeResponsesMatch(ReplProbeTest): host, port = netloc.split(':') port = int(port) - conn = httplib.HTTPConnection(host, port) + conn = http_client.HTTPConnection(host, port) conn.request(method, self._account_path(account), headers=headers) resp = conn.getresponse() if resp.status // 100 != 2: diff --git a/test/probe/test_account_reaper.py b/test/probe/test_account_reaper.py index 7da9dcd39d..f5d2efee87 100644 --- a/test/probe/test_account_reaper.py +++ b/test/probe/test_account_reaper.py @@ -66,7 +66,7 @@ class TestAccountReaper(ReplProbeTest): direct_head_container(cnode, cpart, self.account, container) except ClientException as err: - self.assertEquals(err.http_status, 404) + self.assertEqual(err.http_status, 404) delete_time = err.http_headers.get( 'X-Backend-DELETE-Timestamp') # 'X-Backend-DELETE-Timestamp' confirms it was deleted @@ -91,7 +91,7 @@ class TestAccountReaper(ReplProbeTest): direct_get_object(node, part, self.account, container, obj, headers=headers) except ClientException as err: - self.assertEquals(err.http_status, 404) + self.assertEqual(err.http_status, 404) delete_time = err.http_headers.get('X-Backend-Timestamp') # 'X-Backend-Timestamp' confirms obj was deleted self.assertTrue(delete_time) @@ -114,7 +114,7 @@ class TestAccountReaper(ReplProbeTest): direct_head_container(cnode, cpart, self.account, container) except ClientException as err: - self.assertEquals(err.http_status, 404) + self.assertEqual(err.http_status, 404) delete_time = err.http_headers.get( 'X-Backend-DELETE-Timestamp') # 'X-Backend-DELETE-Timestamp' confirms it was deleted @@ -134,7 +134,7 @@ class TestAccountReaper(ReplProbeTest): direct_get_object(node, part, self.account, container, obj, headers=headers) except ClientException as err: - self.assertEquals(err.http_status, 404) + self.assertEqual(err.http_status, 404) delete_time = err.http_headers.get('X-Backend-Timestamp') # 'X-Backend-Timestamp' confirms obj was deleted self.assertTrue(delete_time) diff --git a/test/probe/test_container_failures.py b/test/probe/test_container_failures.py index 5eddad1464..d8c132c53d 100755 --- a/test/probe/test_container_failures.py +++ b/test/probe/test_container_failures.py @@ -70,7 +70,7 @@ class TestContainerFailures(ReplProbeTest): # Assert all container1 servers indicate container1 is alive and # well with object1 for cnode in cnodes: - self.assertEquals( + self.assertEqual( [o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container1)[1]], ['object1']) @@ -78,9 +78,9 @@ class TestContainerFailures(ReplProbeTest): # Assert account level also indicates container1 is alive and # well with object1 headers, containers = client.get_account(self.url, self.token) - self.assertEquals(headers['x-account-container-count'], '1') - self.assertEquals(headers['x-account-object-count'], '1') - self.assertEquals(headers['x-account-bytes-used'], '3') + self.assertEqual(headers['x-account-container-count'], '1') + self.assertEqual(headers['x-account-object-count'], '1') + self.assertEqual(headers['x-account-bytes-used'], '3') def test_two_nodes_fail(self): # Create container1 @@ -118,15 +118,15 @@ class TestContainerFailures(ReplProbeTest): direct_client.direct_get_container(cnode, cpart, self.account, container1) except ClientException as err: - self.assertEquals(err.http_status, 404) + self.assertEqual(err.http_status, 404) else: self.fail("Expected ClientException but didn't get it") # Assert account level also indicates container1 is gone headers, containers = client.get_account(self.url, self.token) - self.assertEquals(headers['x-account-container-count'], '0') - self.assertEquals(headers['x-account-object-count'], '0') - self.assertEquals(headers['x-account-bytes-used'], '0') + self.assertEqual(headers['x-account-container-count'], '0') + self.assertEqual(headers['x-account-object-count'], '0') + self.assertEqual(headers['x-account-bytes-used'], '0') def _get_container_db_files(self, container): opart, onodes = self.container_ring.get_nodes(self.account, container) @@ -160,7 +160,7 @@ class TestContainerFailures(ReplProbeTest): try: client.delete_container(self.url, self.token, container) except client.ClientException as err: - self.assertEquals(err.http_status, 503) + self.assertEqual(err.http_status, 503) else: self.fail("Expected ClientException but didn't get it") else: diff --git a/test/probe/test_container_merge_policy_index.py b/test/probe/test_container_merge_policy_index.py index d604b13716..3472488f5f 100644 --- a/test/probe/test_container_merge_policy_index.py +++ b/test/probe/test_container_merge_policy_index.py @@ -65,9 +65,10 @@ class TestContainerMergePolicyIndex(ReplProbeTest): found_policy_indexes = \ set(metadata['X-Backend-Storage-Policy-Index'] for node, metadata in head_responses) - self.assert_(len(found_policy_indexes) > 1, - 'primary nodes did not disagree about policy index %r' % - head_responses) + self.assertTrue( + len(found_policy_indexes) > 1, + 'primary nodes did not disagree about policy index %r' % + head_responses) # find our object orig_policy_index = None for policy_index in found_policy_indexes: @@ -102,9 +103,9 @@ class TestContainerMergePolicyIndex(ReplProbeTest): found_policy_indexes = \ set(metadata['X-Backend-Storage-Policy-Index'] for node, metadata in head_responses) - self.assert_(len(found_policy_indexes) == 1, - 'primary nodes disagree about policy index %r' % - head_responses) + self.assertTrue(len(found_policy_indexes) == 1, + 'primary nodes disagree about policy index %r' % + head_responses) expected_policy_index = found_policy_indexes.pop() self.assertNotEqual(orig_policy_index, expected_policy_index) @@ -165,9 +166,10 @@ class TestContainerMergePolicyIndex(ReplProbeTest): found_policy_indexes = \ set(metadata['X-Backend-Storage-Policy-Index'] for node, metadata in head_responses) - self.assert_(len(found_policy_indexes) > 1, - 'primary nodes did not disagree about policy index %r' % - head_responses) + self.assertTrue( + len(found_policy_indexes) > 1, + 'primary nodes did not disagree about policy index %r' % + head_responses) # find our object orig_policy_index = ts_policy_index = None for policy_index in found_policy_indexes: @@ -207,11 +209,11 @@ class TestContainerMergePolicyIndex(ReplProbeTest): new_found_policy_indexes = \ set(metadata['X-Backend-Storage-Policy-Index'] for node, metadata in head_responses) - self.assert_(len(new_found_policy_indexes) == 1, - 'primary nodes disagree about policy index %r' % - dict((node['port'], - metadata['X-Backend-Storage-Policy-Index']) - for node, metadata in head_responses)) + self.assertTrue(len(new_found_policy_indexes) == 1, + 'primary nodes disagree about policy index %r' % + dict((node['port'], + metadata['X-Backend-Storage-Policy-Index']) + for node, metadata in head_responses)) expected_policy_index = new_found_policy_indexes.pop() self.assertEqual(orig_policy_index, expected_policy_index) # validate object fully deleted @@ -334,7 +336,7 @@ class TestContainerMergePolicyIndex(ReplProbeTest): self.assertEqual(metadata['x-static-large-object'].lower(), 'true') for i, entry in enumerate(utils.json.loads(body)): for key in ('hash', 'bytes', 'name'): - self.assertEquals(entry[key], direct_manifest_data[i][key]) + self.assertEqual(entry[key], direct_manifest_data[i][key]) metadata, body = client.get_object( self.url, self.token, self.container_name, direct_manifest_name) self.assertEqual(metadata['x-static-large-object'].lower(), 'true') diff --git a/test/probe/test_empty_device_handoff.py b/test/probe/test_empty_device_handoff.py index f68ee6692b..c3138be05c 100755 --- a/test/probe/test_empty_device_handoff.py +++ b/test/probe/test_empty_device_handoff.py @@ -137,7 +137,7 @@ class TestEmptyDevice(ReplProbeTest): onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx}) except ClientException as err: - self.assertEquals(err.http_status, 404) + self.assertEqual(err.http_status, 404) self.assertFalse(os.path.exists(obj_dir)) else: self.fail("Expected ClientException but didn't get it") @@ -169,7 +169,7 @@ class TestEmptyDevice(ReplProbeTest): another_onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx}) except ClientException as err: - self.assertEquals(err.http_status, 404) + self.assertEqual(err.http_status, 404) else: self.fail("Expected ClientException but didn't get it") diff --git a/test/probe/test_object_async_update.py b/test/probe/test_object_async_update.py index 8657314fc7..379b0be556 100755 --- a/test/probe/test_object_async_update.py +++ b/test/probe/test_object_async_update.py @@ -54,7 +54,7 @@ class TestObjectAsyncUpdate(ReplProbeTest): self.ipport2server, self.pids) # Assert it does not know about container/obj - self.assert_(not direct_client.direct_get_container( + self.assertFalse(direct_client.direct_get_container( cnode, cpart, self.account, container)[1]) # Run the object-updaters @@ -63,7 +63,7 @@ class TestObjectAsyncUpdate(ReplProbeTest): # Assert the other primary server now knows about container/obj objs = [o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container)[1]] - self.assert_(obj in objs) + self.assertTrue(obj in objs) class TestUpdateOverrides(ReplProbeTest): diff --git a/test/probe/test_object_expirer.py b/test/probe/test_object_expirer.py index 11da6c49c2..3f8f39deed 100644 --- a/test/probe/test_object_expirer.py +++ b/test/probe/test_object_expirer.py @@ -87,7 +87,7 @@ class TestObjectExpirer(ReplProbeTest): self.account, self.container_name, self.object_name, acceptable_statuses=(4,), headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) - self.assert_('x-backend-timestamp' in metadata) + self.assertTrue('x-backend-timestamp' in metadata) self.assertEqual(Timestamp(metadata['x-backend-timestamp']), create_timestamp) @@ -122,9 +122,9 @@ class TestObjectExpirer(ReplProbeTest): self.fail('found object in %s and also %s' % (found_in_policy, policy)) found_in_policy = policy - self.assert_('x-backend-timestamp' in metadata) - self.assert_(Timestamp(metadata['x-backend-timestamp']) > - create_timestamp) + self.assertTrue('x-backend-timestamp' in metadata) + self.assertTrue(Timestamp(metadata['x-backend-timestamp']) > + create_timestamp) if __name__ == "__main__": unittest.main() diff --git a/test/probe/test_object_failures.py b/test/probe/test_object_failures.py index eed5aca6a3..ba53177743 100755 --- a/test/probe/test_object_failures.py +++ b/test/probe/test_object_failures.py @@ -57,7 +57,7 @@ class TestObjectFailures(ReplProbeTest): self.policy.name}) client.put_object(self.url, self.token, container, obj, data) odata = client.get_object(self.url, self.token, container, obj)[-1] - self.assertEquals(odata, data) + self.assertEqual(odata, data) opart, onodes = self.object_ring.get_nodes( self.account, container, obj) onode = onodes[0] @@ -84,14 +84,14 @@ class TestObjectFailures(ReplProbeTest): odata = direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] - self.assertEquals(odata, 'VERIFY') + self.assertEqual(odata, 'VERIFY') try: direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx}) raise Exception("Did not quarantine object") except ClientException as err: - self.assertEquals(err.http_status, 404) + self.assertEqual(err.http_status, 404) def run_quarantine_range_etag(self): container = 'container-range-%s' % uuid4() @@ -111,7 +111,7 @@ class TestObjectFailures(ReplProbeTest): odata = direct_client.direct_get_object( onode, opart, self.account, container, obj, headers=req_headers)[-1] - self.assertEquals(odata, result) + self.assertEqual(odata, result) try: direct_client.direct_get_object( @@ -119,7 +119,7 @@ class TestObjectFailures(ReplProbeTest): 'X-Backend-Storage-Policy-Index': self.policy.idx}) raise Exception("Did not quarantine object") except ClientException as err: - self.assertEquals(err.http_status, 404) + self.assertEqual(err.http_status, 404) def run_quarantine_zero_byte_get(self): container = 'container-zbyte-%s' % uuid4() @@ -137,7 +137,7 @@ class TestObjectFailures(ReplProbeTest): self.policy.idx}) raise Exception("Did not quarantine object") except ClientException as err: - self.assertEquals(err.http_status, 404) + self.assertEqual(err.http_status, 404) def run_quarantine_zero_byte_head(self): container = 'container-zbyte-%s' % uuid4() @@ -155,7 +155,7 @@ class TestObjectFailures(ReplProbeTest): self.policy.idx}) raise Exception("Did not quarantine object") except ClientException as err: - self.assertEquals(err.http_status, 404) + self.assertEqual(err.http_status, 404) def run_quarantine_zero_byte_post(self): container = 'container-zbyte-%s' % uuid4() @@ -177,7 +177,7 @@ class TestObjectFailures(ReplProbeTest): response_timeout=1) raise Exception("Did not quarantine object") except ClientException as err: - self.assertEquals(err.http_status, 404) + self.assertEqual(err.http_status, 404) def test_runner(self): self.run_quarantine() diff --git a/test/probe/test_object_handoff.py b/test/probe/test_object_handoff.py index 37fb7626b5..f3b02c53cd 100755 --- a/test/probe/test_object_handoff.py +++ b/test/probe/test_object_handoff.py @@ -16,13 +16,17 @@ from unittest import main from uuid import uuid4 +import random +from hashlib import md5 +from collections import defaultdict from swiftclient import client from swift.common import direct_client from swift.common.exceptions import ClientException from swift.common.manager import Manager -from test.probe.common import kill_server, ReplProbeTest, start_server +from test.probe.common import (kill_server, start_server, ReplProbeTest, + ECProbeTest, Body) class TestObjectHandoff(ReplProbeTest): @@ -102,7 +106,7 @@ class TestObjectHandoff(ReplProbeTest): onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx}) except ClientException as err: - self.assertEquals(err.http_status, 404) + self.assertEqual(err.http_status, 404) else: self.fail("Expected ClientException but didn't get it") @@ -136,7 +140,7 @@ class TestObjectHandoff(ReplProbeTest): another_onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx}) except ClientException as err: - self.assertEquals(err.http_status, 404) + self.assertEqual(err.http_status, 404) else: self.fail("Expected ClientException but didn't get it") @@ -160,7 +164,7 @@ class TestObjectHandoff(ReplProbeTest): try: client.head_object(self.url, self.token, container, obj) except client.ClientException as err: - self.assertEquals(err.http_status, 404) + self.assertEqual(err.http_status, 404) else: self.fail("Expected ClientException but didn't get it") @@ -206,10 +210,94 @@ class TestObjectHandoff(ReplProbeTest): another_onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx}) except ClientException as err: - self.assertEquals(err.http_status, 404) + self.assertEqual(err.http_status, 404) else: self.fail("Expected ClientException but didn't get it") +class TestECObjectHandoffOverwrite(ECProbeTest): + + def get_object(self, container_name, object_name): + headers, body = client.get_object(self.url, self.token, + container_name, + object_name, + resp_chunk_size=64 * 2 ** 10) + resp_checksum = md5() + for chunk in body: + resp_checksum.update(chunk) + return resp_checksum.hexdigest() + + def test_ec_handoff_overwrite(self): + container_name = 'container-%s' % uuid4() + object_name = 'object-%s' % uuid4() + + # create EC container + headers = {'X-Storage-Policy': self.policy.name} + client.put_container(self.url, self.token, container_name, + headers=headers) + + # PUT object + old_contents = Body() + client.put_object(self.url, self.token, container_name, + object_name, contents=old_contents) + + # get our node lists + opart, onodes = self.object_ring.get_nodes( + self.account, container_name, object_name) + + # shutdown one of the primary data nodes + failed_primary = random.choice(onodes) + failed_primary_device_path = self.device_dir('object', failed_primary) + self.kill_drive(failed_primary_device_path) + + # overwrite our object with some new data + new_contents = Body() + client.put_object(self.url, self.token, container_name, + object_name, contents=new_contents) + self.assertNotEqual(new_contents.etag, old_contents.etag) + + # restore failed primary device + self.revive_drive(failed_primary_device_path) + + # sanity - failed node has old contents + req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)} + headers = direct_client.direct_head_object( + failed_primary, opart, self.account, container_name, + object_name, headers=req_headers) + self.assertEqual(headers['X-Object-Sysmeta-EC-Etag'], + old_contents.etag) + + # we have 1 primary with wrong old etag, and we should have 5 with + # new etag plus a handoff with the new etag, so killing 2 other + # primaries forces proxy to try to GET from all primaries plus handoff. + other_nodes = [n for n in onodes if n != failed_primary] + random.shuffle(other_nodes) + for node in other_nodes[:2]: + self.kill_drive(self.device_dir('object', node)) + + # sanity, after taking out two primaries we should be down to + # only four primaries, one of which has the old etag - but we + # also have a handoff with the new etag out there + found_frags = defaultdict(int) + req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)} + for node in onodes + list(self.object_ring.get_more_nodes(opart)): + try: + headers = direct_client.direct_head_object( + node, opart, self.account, container_name, + object_name, headers=req_headers) + except Exception: + continue + found_frags[headers['X-Object-Sysmeta-EC-Etag']] += 1 + self.assertEqual(found_frags, { + new_contents.etag: 4, # this should be enough to rebuild! + old_contents.etag: 1, + }) + + # clear node error limiting + Manager(['proxy']).restart() + + resp_etag = self.get_object(container_name, object_name) + self.assertEqual(resp_etag, new_contents.etag) + if __name__ == '__main__': main() diff --git a/test/probe/test_reconstructor_durable.py b/test/probe/test_reconstructor_durable.py index cbb94163e9..ccd9e1c78c 100644 --- a/test/probe/test_reconstructor_durable.py +++ b/test/probe/test_reconstructor_durable.py @@ -95,7 +95,7 @@ class TestReconstructorPropDurable(ECProbeTest): if e.errno != errno.ENOENT: raise - # fire up reconstructor to propogate the .durable + # fire up reconstructor to propagate the .durable self.reconstructor.once() # fragment is still exactly as it was before! diff --git a/test/probe/test_reconstructor_revert.py b/test/probe/test_reconstructor_revert.py index 1daf7a3725..df4dc8beac 100755 --- a/test/probe/test_reconstructor_revert.py +++ b/test/probe/test_reconstructor_revert.py @@ -21,7 +21,7 @@ import random import shutil from collections import defaultdict -from test.probe.common import ECProbeTest +from test.probe.common import ECProbeTest, Body from swift.common import direct_client from swift.common.storage_policy import EC_POLICY @@ -31,32 +31,6 @@ from swift.obj import reconstructor from swiftclient import client -class Body(object): - - def __init__(self, total=3.5 * 2 ** 20): - self.total = total - self.hasher = md5() - self.size = 0 - self.chunk = 'test' * 16 * 2 ** 10 - - @property - def etag(self): - return self.hasher.hexdigest() - - def __iter__(self): - return self - - def next(self): - if self.size > self.total: - raise StopIteration() - self.size += len(self.chunk) - self.hasher.update(self.chunk) - return self.chunk - - def __next__(self): - return next(self) - - class TestReconstructorRevert(ECProbeTest): def setUp(self): @@ -159,7 +133,7 @@ class TestReconstructorRevert(ECProbeTest): hnode_id = (hnode['port'] - 6000) / 10 self.reconstructor.once(number=hnode_id) - # first threee primaries have data again + # first three primaries have data again for onode in (onodes[0], onodes[2]): self.direct_get(onode, opart) @@ -218,7 +192,7 @@ class TestReconstructorRevert(ECProbeTest): # enable the first node again self.revive_drive(p_dev2) - # propogate the delete... + # propagate the delete... # fire up reconstructor on handoff nodes only for hnode in hnodes: hnode_id = (hnode['port'] - 6000) / 10 diff --git a/test/probe/test_wsgi_servers.py b/test/probe/test_wsgi_servers.py new file mode 100644 index 0000000000..437912dcf8 --- /dev/null +++ b/test/probe/test_wsgi_servers.py @@ -0,0 +1,103 @@ +#!/usr/bin/python -u +# Copyright (c) 2010-2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import httplib +import random + +from swift.common.storage_policy import POLICIES +from swift.common.ring import Ring +from swift.common.manager import Manager + +from test.probe.common import resetswift + + +def putrequest(conn, method, path, headers): + + conn.putrequest(method, path, skip_host=(headers and 'Host' in headers)) + if headers: + for header, value in headers.items(): + conn.putheader(header, str(value)) + conn.endheaders() + + +class TestWSGIServerProcessHandling(unittest.TestCase): + + def setUp(self): + resetswift() + + def _check_reload(self, server_name, ip, port): + manager = Manager([server_name]) + manager.start() + + starting_pids = set(pid for server in manager.servers + for (_, pid) in server.iter_pid_files()) + + body = 'test' * 10 + conn = httplib.HTTPConnection('%s:%s' % (ip, port)) + + # sanity request + putrequest(conn, 'PUT', 'blah', + headers={'Content-Length': len(body)}) + conn.send(body) + resp = conn.getresponse() + self.assertEqual(resp.status // 100, 4) + resp.read() + + manager.reload() + + post_reload_pids = set(pid for server in manager.servers + for (_, pid) in server.iter_pid_files()) + + # none of the pids we started with are being tracked after reload + msg = 'expected all pids from %r to have died, but found %r' % ( + starting_pids, post_reload_pids) + self.assertFalse(starting_pids & post_reload_pids, msg) + + # ... and yet we can keep using the same connection! + putrequest(conn, 'PUT', 'blah', + headers={'Content-Length': len(body)}) + conn.send(body) + resp = conn.getresponse() + self.assertEqual(resp.status // 100, 4) + resp.read() + + # close our connection + conn.close() + + # sanity + post_close_pids = set(pid for server in manager.servers + for (_, pid) in server.iter_pid_files()) + self.assertEqual(post_reload_pids, post_close_pids) + + def test_proxy_reload(self): + self._check_reload('proxy-server', 'localhost', 8080) + + def test_object_reload(self): + policy = random.choice(list(POLICIES)) + policy.load_ring('/etc/swift') + node = random.choice(policy.object_ring.get_part_nodes(1)) + self._check_reload('object', node['ip'], node['port']) + + def test_account_container_reload(self): + for server in ('account', 'container'): + ring = Ring('/etc/swift', ring_name=server) + node = random.choice(ring.get_part_nodes(1)) + self._check_reload(server, node['ip'], node['port']) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/sample.conf b/test/sample.conf index d1aa030401..3cf3609e59 100644 --- a/test/sample.conf +++ b/test/sample.conf @@ -93,6 +93,7 @@ fake_syslog = False #max_meta_count = 90 #max_meta_overall_size = 4096 #max_header_size = 8192 +#extra_header_count = 0 #max_object_name_length = 1024 #container_listing_limit = 10000 #account_listing_limit = 10000 diff --git a/test/unit/__init__.py b/test/unit/__init__.py index a5d77062fe..ae9723a346 100644 --- a/test/unit/__init__.py +++ b/test/unit/__init__.py @@ -15,6 +15,7 @@ """ Swift tests """ +from __future__ import print_function import os import copy import logging @@ -37,11 +38,12 @@ from swift.common import swob, utils from swift.common.ring import Ring, RingData from hashlib import md5 import logging.handlers -from httplib import HTTPException + +from six.moves.http_client import HTTPException from swift.common import storage_policy from swift.common.storage_policy import StoragePolicy, ECStoragePolicy import functools -import cPickle as pickle +import six.moves.cPickle as pickle from gzip import GzipFile import mock as mocklib import inspect @@ -508,6 +510,8 @@ class FakeLogger(logging.Logger, object): self.lines_dict = {'critical': [], 'error': [], 'info': [], 'warning': [], 'debug': [], 'notice': []} + clear = _clear # this is a public interface + def get_lines_for_level(self, level): if level not in self.lines_dict: raise KeyError( @@ -571,8 +575,8 @@ class FakeLogger(logging.Logger, object): try: line = record.getMessage() except TypeError: - print 'WARNING: unable to format log message %r %% %r' % ( - record.msg, record.args) + print('WARNING: unable to format log message %r %% %r' % ( + record.msg, record.args)) raise self.lines_dict[record.levelname.lower()].append(line) @@ -596,7 +600,7 @@ class DebugLogger(FakeLogger): def handle(self, record): self._handle(record) - print self.formatter.format(record) + print(self.formatter.format(record)) class DebugLogAdapter(utils.LogAdapter): @@ -859,7 +863,9 @@ def fake_http_connect(*code_iter, **kwargs): headers = dict(self.expect_headers) if expect_status == 409: headers['X-Backend-Timestamp'] = self.timestamp - response = FakeConn(expect_status, headers=headers) + response = FakeConn(expect_status, + timestamp=self.timestamp, + headers=headers) response.status = expect_status return response diff --git a/test/unit/account/test_auditor.py b/test/unit/account/test_auditor.py index c79209bc09..e4cb382c23 100644 --- a/test/unit/account/test_auditor.py +++ b/test/unit/account/test_auditor.py @@ -252,9 +252,9 @@ class TestAuditorRealBroker(unittest.TestCase): error_lines = test_auditor.logger.get_lines_for_level('error') self.assertEqual(len(error_lines), 1) error_message = error_lines[0] - self.assert_(broker.db_file in error_message) - self.assert_('container_count' in error_message) - self.assert_('does not match' in error_message) + self.assertTrue(broker.db_file in error_message) + self.assertTrue('container_count' in error_message) + self.assertTrue('does not match' in error_message) self.assertEqual(test_auditor.logger.get_increment_counts(), {'failures': 1}) diff --git a/test/unit/account/test_backend.py b/test/unit/account/test_backend.py index 6598046487..8226195484 100644 --- a/test/unit/account/test_backend.py +++ b/test/unit/account/test_backend.py @@ -52,9 +52,9 @@ class TestAccountBroker(unittest.TestCase): pass except DatabaseConnectionError as e: self.assertTrue(hasattr(e, 'path')) - self.assertEquals(e.path, ':memory:') + self.assertEqual(e.path, ':memory:') self.assertTrue(hasattr(e, 'msg')) - self.assertEquals(e.msg, "DB doesn't exist") + self.assertEqual(e.msg, "DB doesn't exist") except Exception as e: self.fail("Unexpected exception raised: %r" % e) else: @@ -78,35 +78,35 @@ class TestAccountBroker(unittest.TestCase): raise Exception('OMG') except Exception: pass - self.assert_(broker.conn is None) + self.assertTrue(broker.conn is None) def test_empty(self): # Test AccountBroker.empty broker = AccountBroker(':memory:', account='a') broker.initialize(Timestamp('1').internal) - self.assert_(broker.empty()) + self.assertTrue(broker.empty()) broker.put_container('o', Timestamp(time()).internal, 0, 0, 0, POLICIES.default.idx) - self.assert_(not broker.empty()) + self.assertTrue(not broker.empty()) sleep(.00001) broker.put_container('o', 0, Timestamp(time()).internal, 0, 0, POLICIES.default.idx) - self.assert_(broker.empty()) + self.assertTrue(broker.empty()) def test_is_status_deleted(self): # Test AccountBroker.is_status_deleted broker1 = AccountBroker(':memory:', account='a') broker1.initialize(Timestamp(time()).internal) - self.assert_(not broker1.is_status_deleted()) + self.assertTrue(not broker1.is_status_deleted()) broker1.delete_db(Timestamp(time()).internal) - self.assert_(broker1.is_status_deleted()) + self.assertTrue(broker1.is_status_deleted()) broker2 = AccountBroker(':memory:', account='a') broker2.initialize(Timestamp(time()).internal) # Set delete_timestamp greater than put_timestamp broker2.merge_timestamps( time(), Timestamp(time()).internal, Timestamp(time() + 999).internal) - self.assert_(broker2.is_status_deleted()) + self.assertTrue(broker2.is_status_deleted()) def test_reclaim(self): broker = AccountBroker(':memory:', account='test_account') @@ -160,23 +160,9 @@ class TestAccountBroker(unittest.TestCase): broker.put_container('y', 0, 0, 0, 0, POLICIES.default.idx) broker.put_container('z', 0, 0, 0, 0, POLICIES.default.idx) broker.reclaim(Timestamp(time()).internal, time()) - # self.assertEqual(len(res), 2) - # self.assert_(isinstance(res, tuple)) - # containers, account_name = res - # self.assert_(containers is None) - # self.assert_(account_name is None) # Now delete the account broker.delete_db(Timestamp(time()).internal) broker.reclaim(Timestamp(time()).internal, time()) - # self.assertEqual(len(res), 2) - # self.assert_(isinstance(res, tuple)) - # containers, account_name = res - # self.assertEqual(account_name, 'test_account') - # self.assertEqual(len(containers), 3) - # self.assert_('x' in containers) - # self.assert_('y' in containers) - # self.assert_('z' in containers) - # self.assert_('a' not in containers) def test_delete_db_status(self): ts = (Timestamp(t).internal for t in itertools.count(int(time()))) @@ -185,7 +171,7 @@ class TestAccountBroker(unittest.TestCase): broker.initialize(start) info = broker.get_info() self.assertEqual(info['put_timestamp'], Timestamp(start).internal) - self.assert_(Timestamp(info['created_at']) >= start) + self.assertTrue(Timestamp(info['created_at']) >= start) self.assertEqual(info['delete_timestamp'], '0') if self.__class__ == TestAccountBrokerBeforeMetadata: self.assertEqual(info['status_changed_at'], '0') @@ -198,7 +184,7 @@ class TestAccountBroker(unittest.TestCase): broker.delete_db(delete_timestamp) info = broker.get_info() self.assertEqual(info['put_timestamp'], Timestamp(start).internal) - self.assert_(Timestamp(info['created_at']) >= start) + self.assertTrue(Timestamp(info['created_at']) >= start) self.assertEqual(info['delete_timestamp'], delete_timestamp) self.assertEqual(info['status_changed_at'], delete_timestamp) @@ -500,6 +486,11 @@ class TestAccountBroker(unittest.TestCase): POLICIES.default.idx) broker.put_container('a-b', Timestamp(time()).internal, 0, 0, 0, POLICIES.default.idx) + # NB: ord(".") == ord("-") + 1 + broker.put_container('a.', Timestamp(time()).internal, 0, 0, 0, + POLICIES.default.idx) + broker.put_container('a.b', Timestamp(time()).internal, 0, 0, 0, + POLICIES.default.idx) broker.put_container('b', Timestamp(time()).internal, 0, 0, 0, POLICIES.default.idx) broker.put_container('b-a', Timestamp(time()).internal, 0, 0, 0, @@ -509,20 +500,16 @@ class TestAccountBroker(unittest.TestCase): broker.put_container('c', Timestamp(time()).internal, 0, 0, 0, POLICIES.default.idx) listing = broker.list_containers_iter(15, None, None, None, None) - self.assertEqual(len(listing), 10) self.assertEqual([row[0] for row in listing], - ['a', 'a-', 'a-a', 'a-a-a', 'a-a-b', 'a-b', 'b', - 'b-a', 'b-b', 'c']) + ['a', 'a-', 'a-a', 'a-a-a', 'a-a-b', 'a-b', 'a.', + 'a.b', 'b', 'b-a', 'b-b', 'c']) listing = broker.list_containers_iter(15, None, None, '', '-') - self.assertEqual(len(listing), 5) self.assertEqual([row[0] for row in listing], - ['a', 'a-', 'b', 'b-', 'c']) + ['a', 'a-', 'a.', 'a.b', 'b', 'b-', 'c']) listing = broker.list_containers_iter(15, None, None, 'a-', '-') - self.assertEqual(len(listing), 4) self.assertEqual([row[0] for row in listing], ['a-', 'a-a', 'a-a-', 'a-b']) listing = broker.list_containers_iter(15, None, None, 'b-', '-') - self.assertEqual(len(listing), 2) self.assertEqual([row[0] for row in listing], ['b-a', 'b-b']) def test_chexor(self): @@ -598,8 +585,8 @@ class TestAccountBroker(unittest.TestCase): broker2.merge_items(json.loads(json.dumps(broker1.get_items_since( broker2.get_sync(id1), 1000))), id1) items = broker2.get_items_since(-1, 1000) - self.assertEquals(['b', snowman], - sorted([rec['name'] for rec in items])) + self.assertEqual(['b', snowman], + sorted([rec['name'] for rec in items])) items_by_name = dict((rec['name'], rec) for rec in items) self.assertEqual(items_by_name[snowman]['object_count'], 2) @@ -830,7 +817,7 @@ class TestAccountBrokerBeforeMetadata(TestAccountBroker): conn.execute('SELECT metadata FROM account_stat') except BaseException as err: exc = err - self.assert_('no such column: metadata' in str(exc)) + self.assertTrue('no such column: metadata' in str(exc)) def tearDown(self): AccountBroker.create_account_stat_table = \ @@ -917,12 +904,12 @@ class TestAccountBrokerBeforeSPI(TestAccountBroker): conn.execute('SELECT storage_policy_index FROM container') except BaseException as err: exc = err - self.assert_('no such column: storage_policy_index' in str(exc)) + self.assertTrue('no such column: storage_policy_index' in str(exc)) with broker.get() as conn: try: conn.execute('SELECT * FROM policy_stat') except sqlite3.OperationalError as err: - self.assert_('no such table: policy_stat' in str(err)) + self.assertTrue('no such table: policy_stat' in str(err)) else: self.fail('database created with policy_stat table') @@ -949,7 +936,7 @@ class TestAccountBrokerBeforeSPI(TestAccountBroker): ''').fetchone()[0] except sqlite3.OperationalError as err: # confirm that the table really isn't there - self.assert_('no such table: policy_stat' in str(err)) + self.assertTrue('no such table: policy_stat' in str(err)) else: self.fail('broker did not raise sqlite3.OperationalError ' 'trying to select from policy_stat table!') @@ -985,8 +972,8 @@ class TestAccountBrokerBeforeSPI(TestAccountBroker): ''').fetchone()[0] except sqlite3.OperationalError as err: # confirm that the table doesn't have this column - self.assert_('no such column: storage_policy_index' in - str(err)) + self.assertTrue('no such column: storage_policy_index' in + str(err)) else: self.fail('broker did not raise sqlite3.OperationalError ' 'trying to select from storage_policy_index ' @@ -1097,7 +1084,7 @@ class TestAccountBrokerBeforeSPI(TestAccountBroker): self.fail('mock exception was not raised') self.assertEqual(len(called), 1) - self.assert_('CREATE TABLE policy_stat' in called[0]) + self.assertTrue('CREATE TABLE policy_stat' in called[0]) # nothing was committed broker = AccountBroker(db_path, account='a') @@ -1105,7 +1092,7 @@ class TestAccountBrokerBeforeSPI(TestAccountBroker): try: conn.execute('SELECT * FROM policy_stat') except sqlite3.OperationalError as err: - self.assert_('no such table: policy_stat' in str(err)) + self.assertTrue('no such table: policy_stat' in str(err)) else: self.fail('half upgraded database!') container_count = conn.execute( @@ -1269,7 +1256,7 @@ class AccountBrokerPreTrackContainerCountSetup(object): ''').fetchone()[0] except sqlite3.OperationalError as err: # confirm that the column really isn't there - self.assert_('no such column: container_count' in str(err)) + self.assertTrue('no such column: container_count' in str(err)) else: self.fail('broker did not raise sqlite3.OperationalError ' 'trying to select container_count from policy_stat!') diff --git a/test/unit/account/test_reaper.py b/test/unit/account/test_reaper.py index b413a646a1..658bdc02cf 100644 --- a/test/unit/account/test_reaper.py +++ b/test/unit/account/test_reaper.py @@ -77,6 +77,7 @@ class FakeBroker(object): class FakeAccountBroker(object): def __init__(self, containers): self.containers = containers + self.containers_yielded = [] def get_info(self): info = {'account': 'a', @@ -101,11 +102,11 @@ class FakeRing(object): 'port': 6002, 'device': None}, {'id': '2', - 'ip': '10.10.10.1', + 'ip': '10.10.10.2', 'port': 6002, 'device': None}, {'id': '3', - 'ip': '10.10.10.1', + 'ip': '10.10.10.3', 'port': 6002, 'device': None}, ] @@ -387,7 +388,7 @@ class TestReaper(unittest.TestCase): '1429117638.86767') # verify calls to direct_delete_container - self.assertEquals(mocks['direct_delete_container'].call_count, 3) + self.assertEqual(mocks['direct_delete_container'].call_count, 3) for i, call_args in enumerate( mocks['direct_delete_container'].call_args_list): anode = acc_nodes[i % len(acc_nodes)] @@ -504,24 +505,26 @@ class TestReaper(unittest.TestCase): self.called_amount = 0 self.r = r = self.init_reaper({}, fakelogger=True) r.start_time = time.time() - ctx = [patch('swift.account.reaper.AccountReaper.reap_container', - self.fake_reap_container), - patch('swift.account.reaper.AccountReaper.get_account_ring', - self.fake_account_ring)] - with nested(*ctx): + with patch('swift.account.reaper.AccountReaper.reap_container', + self.fake_reap_container), \ + patch('swift.account.reaper.AccountReaper.get_account_ring', + self.fake_account_ring): nodes = r.get_account_ring().get_part_nodes() - self.assertTrue(r.reap_account(broker, 'partition', nodes)) + for container_shard, node in enumerate(nodes): + self.assertTrue( + r.reap_account(broker, 'partition', nodes, + container_shard=container_shard)) self.assertEqual(self.called_amount, 4) info_lines = r.logger.get_lines_for_level('info') - self.assertEqual(len(info_lines), 2) - start_line, stat_line = info_lines - self.assertEqual(start_line, 'Beginning pass on account a') - self.assertTrue(stat_line.find('1 containers deleted')) - self.assertTrue(stat_line.find('1 objects deleted')) - self.assertTrue(stat_line.find('1 containers remaining')) - self.assertTrue(stat_line.find('1 objects remaining')) - self.assertTrue(stat_line.find('1 containers possibly remaining')) - self.assertTrue(stat_line.find('1 objects possibly remaining')) + self.assertEqual(len(info_lines), 6) + for start_line, stat_line in zip(*[iter(info_lines)] * 2): + self.assertEqual(start_line, 'Beginning pass on account a') + self.assertTrue(stat_line.find('1 containers deleted')) + self.assertTrue(stat_line.find('1 objects deleted')) + self.assertTrue(stat_line.find('1 containers remaining')) + self.assertTrue(stat_line.find('1 objects remaining')) + self.assertTrue(stat_line.find('1 containers possibly remaining')) + self.assertTrue(stat_line.find('1 objects possibly remaining')) def test_reap_account_no_container(self): broker = FakeAccountBroker(tuple()) @@ -584,6 +587,67 @@ class TestReaper(unittest.TestCase): r.reap_device('sda1') self.assertEqual(self.called_amount, 0) + def test_reap_device_with_sharding(self): + devices = self.prepare_data_dir() + conf = {'devices': devices} + r = self.init_reaper(conf, myips=['10.10.10.2']) + container_shard_used = [-1] + + def fake_reap_account(*args, **kwargs): + container_shard_used[0] = kwargs.get('container_shard') + + with patch('swift.account.reaper.AccountBroker', + FakeAccountBroker), \ + patch('swift.account.reaper.AccountReaper.get_account_ring', + self.fake_account_ring), \ + patch('swift.account.reaper.AccountReaper.reap_account', + fake_reap_account): + r.reap_device('sda1') + # 10.10.10.2 is second node from ring + self.assertEqual(container_shard_used[0], 1) + + def test_reap_account_with_sharding(self): + devices = self.prepare_data_dir() + self.called_amount = 0 + conf = {'devices': devices} + r = self.init_reaper(conf, myips=['10.10.10.2']) + + container_reaped = [0] + + def fake_list_containers_iter(self, *args): + for container in self.containers: + if container in self.containers_yielded: + continue + + yield container, None, None, None + self.containers_yielded.append(container) + + def fake_reap_container(self, account, account_partition, + account_nodes, container): + container_reaped[0] += 1 + + ctx = [patch('swift.account.reaper.AccountBroker', + FakeAccountBroker), + patch('swift.account.reaper.AccountBroker.list_containers_iter', + fake_list_containers_iter), + patch('swift.account.reaper.AccountReaper.reap_container', + fake_reap_container), ] + fake_ring = FakeRing() + with nested(*ctx): + fake_broker = FakeAccountBroker(['c', 'd', 'e']) + r.reap_account(fake_broker, 10, fake_ring.nodes, 0) + self.assertEqual(container_reaped[0], 1) + + fake_broker = FakeAccountBroker(['c', 'd', 'e']) + container_reaped[0] = 0 + r.reap_account(fake_broker, 10, fake_ring.nodes, 1) + self.assertEqual(container_reaped[0], 2) + + container_reaped[0] = 0 + fake_broker = FakeAccountBroker(['c', 'd', 'e']) + r.reap_account(fake_broker, 10, fake_ring.nodes, 2) + self.assertEqual(container_reaped[0], 0) + def test_run_once(self): def prepare_data_dir(): devices_path = tempfile.mkdtemp() diff --git a/test/unit/account/test_replicator.py b/test/unit/account/test_replicator.py index 43e3a4d725..34aad91184 100644 --- a/test/unit/account/test_replicator.py +++ b/test/unit/account/test_replicator.py @@ -77,7 +77,7 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync): # add a row to "local" db broker.put_container('/a/c', time.time(), 0, 0, 0, POLICIES.default.idx) - #replicate + # replicate daemon = replicator.AccountReplicator({}) def _rsync_file(db_file, remote_file, **kwargs): diff --git a/test/unit/account/test_server.py b/test/unit/account/test_server.py index d3fbb90f41..5b7a9f0853 100644 --- a/test/unit/account/test_server.py +++ b/test/unit/account/test_server.py @@ -19,13 +19,14 @@ import mock import unittest from tempfile import mkdtemp from shutil import rmtree -from StringIO import StringIO from time import gmtime from test.unit import FakeLogger import itertools import random import simplejson +from six import BytesIO +from six import StringIO import xml.dom.minidom from swift import __version__ as swift_version @@ -62,13 +63,13 @@ class TestAccountController(unittest.TestCase): req = Request.blank('/sda1/p/a/c/o', {'REQUEST_METHOD': 'OPTIONS'}) req.content_length = 0 resp = server_handler.OPTIONS(req) - self.assertEquals(200, resp.status_int) + self.assertEqual(200, resp.status_int) for verb in 'OPTIONS GET POST PUT DELETE HEAD REPLICATE'.split(): self.assertTrue( verb in resp.headers['Allow'].split(', ')) - self.assertEquals(len(resp.headers['Allow'].split(', ')), 7) - self.assertEquals(resp.headers['Server'], - (server_handler.server_type + '/' + swift_version)) + self.assertEqual(len(resp.headers['Allow'].split(', ')), 7) + self.assertEqual(resp.headers['Server'], + (server_handler.server_type + '/' + swift_version)) def test_DELETE_not_found(self): req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE', @@ -394,7 +395,7 @@ class TestAccountController(unittest.TestCase): req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 204) - self.assert_('x-account-meta-test' not in resp.headers) + self.assertTrue('x-account-meta-test' not in resp.headers) def test_PUT_GET_sys_metadata(self): prefix = get_sys_meta_prefix('account') @@ -455,7 +456,7 @@ class TestAccountController(unittest.TestCase): req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 204) - self.assert_(hdr not in resp.headers) + self.assertTrue(hdr not in resp.headers) def test_PUT_invalid_partition(self): req = Request.blank('/sda1/./a', environ={'REQUEST_METHOD': 'PUT', @@ -519,7 +520,7 @@ class TestAccountController(unittest.TestCase): req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 204) - self.assert_('x-account-meta-test' not in resp.headers) + self.assertTrue('x-account-meta-test' not in resp.headers) def test_POST_HEAD_sys_metadata(self): prefix = get_sys_meta_prefix('account') @@ -572,7 +573,7 @@ class TestAccountController(unittest.TestCase): req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 204) - self.assert_(hdr not in resp.headers) + self.assertTrue(hdr not in resp.headers) def test_POST_invalid_partition(self): req = Request.blank('/sda1/./a', environ={'REQUEST_METHOD': 'POST', @@ -598,11 +599,11 @@ class TestAccountController(unittest.TestCase): req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE', 'HTTP_X_TIMESTAMP': '1'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'POST', 'HTTP_X_TIMESTAMP': '2'}) resp = req.get_response(self.controller) @@ -1094,7 +1095,7 @@ class TestAccountController(unittest.TestCase): req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Put-Timestamp': '1', 'X-Delete-Timestamp': '0', @@ -1102,7 +1103,7 @@ class TestAccountController(unittest.TestCase): 'X-Bytes-Used': '0', 'X-Timestamp': normalize_timestamp(0)}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'}) req.accept = 'application/*' resp = req.get_response(self.controller) @@ -1358,7 +1359,7 @@ class TestAccountController(unittest.TestCase): self.assertEqual(resp.status_int, 507) def test_through_call(self): - inbuf = StringIO() + inbuf = BytesIO() errbuf = StringIO() outbuf = StringIO() @@ -1384,7 +1385,7 @@ class TestAccountController(unittest.TestCase): self.assertEqual(outbuf.getvalue()[:4], '404 ') def test_through_call_invalid_path(self): - inbuf = StringIO() + inbuf = BytesIO() errbuf = StringIO() outbuf = StringIO() @@ -1410,7 +1411,7 @@ class TestAccountController(unittest.TestCase): self.assertEqual(outbuf.getvalue()[:4], '400 ') def test_through_call_invalid_path_utf8(self): - inbuf = StringIO() + inbuf = BytesIO() errbuf = StringIO() outbuf = StringIO() @@ -1582,7 +1583,7 @@ class TestAccountController(unittest.TestCase): def test_correct_allowed_method(self): # Test correct work for allowed method using # swift.account.server.AccountController.__call__ - inbuf = StringIO() + inbuf = BytesIO() errbuf = StringIO() outbuf = StringIO() self.controller = AccountController( @@ -1621,7 +1622,7 @@ class TestAccountController(unittest.TestCase): def test_not_allowed_method(self): # Test correct work for NOT allowed method using # swift.account.server.AccountController.__call__ - inbuf = StringIO() + inbuf = BytesIO() errbuf = StringIO() outbuf = StringIO() self.controller = AccountController( @@ -1658,7 +1659,7 @@ class TestAccountController(unittest.TestCase): self.assertEqual(response, answer) def test_call_incorrect_replication_method(self): - inbuf = StringIO() + inbuf = BytesIO() errbuf = StringIO() outbuf = StringIO() self.controller = AccountController( @@ -1686,8 +1687,8 @@ class TestAccountController(unittest.TestCase): 'wsgi.multiprocess': False, 'wsgi.run_once': False} self.controller(env, start_response) - self.assertEquals(errbuf.getvalue(), '') - self.assertEquals(outbuf.getvalue()[:4], '405 ') + self.assertEqual(errbuf.getvalue(), '') + self.assertEqual(outbuf.getvalue()[:4], '405 ') def test_GET_log_requests_true(self): self.controller.logger = FakeLogger() @@ -1747,15 +1748,15 @@ class TestAccountController(unittest.TestCase): req = Request.blank('/sda1/p/a', method=method) resp = req.get_response(self.controller) self.assertEqual(resp.status_int // 100, 2) - self.assertEquals(resp.headers['X-Account-Object-Count'], '2') - self.assertEquals(resp.headers['X-Account-Bytes-Used'], '4') - self.assertEquals( + self.assertEqual(resp.headers['X-Account-Object-Count'], '2') + self.assertEqual(resp.headers['X-Account-Bytes-Used'], '4') + self.assertEqual( resp.headers['X-Account-Storage-Policy-%s-Object-Count' % POLICIES[0].name], '2') - self.assertEquals( + self.assertEqual( resp.headers['X-Account-Storage-Policy-%s-Bytes-Used' % POLICIES[0].name], '4') - self.assertEquals( + self.assertEqual( resp.headers['X-Account-Storage-Policy-%s-Container-Count' % POLICIES[0].name], '1') @@ -1785,15 +1786,15 @@ class TestAccountController(unittest.TestCase): req = Request.blank('/sda1/p/a', method=method) resp = req.get_response(self.controller) self.assertEqual(resp.status_int // 100, 2) - self.assertEquals(resp.headers['X-Account-Object-Count'], '2') - self.assertEquals(resp.headers['X-Account-Bytes-Used'], '4') - self.assertEquals( + self.assertEqual(resp.headers['X-Account-Object-Count'], '2') + self.assertEqual(resp.headers['X-Account-Bytes-Used'], '4') + self.assertEqual( resp.headers['X-Account-Storage-Policy-%s-Object-Count' % policy.name], '2') - self.assertEquals( + self.assertEqual( resp.headers['X-Account-Storage-Policy-%s-Bytes-Used' % policy.name], '4') - self.assertEquals( + self.assertEqual( resp.headers['X-Account-Storage-Policy-%s-Container-Count' % policy.name], '1') @@ -1810,7 +1811,7 @@ class TestAccountController(unittest.TestCase): resp = req.get_response(self.controller) self.assertEqual(resp.status_int // 100, 2) for key in resp.headers: - self.assert_('storage-policy' not in key.lower()) + self.assertTrue('storage-policy' not in key.lower()) def test_empty_except_for_used_policies(self): ts = itertools.count() @@ -1826,7 +1827,7 @@ class TestAccountController(unittest.TestCase): resp = req.get_response(self.controller) self.assertEqual(resp.status_int // 100, 2) for key in resp.headers: - self.assert_('storage-policy' not in key.lower()) + self.assertTrue('storage-policy' not in key.lower()) # add a container policy = random.choice(POLICIES) @@ -1847,7 +1848,7 @@ class TestAccountController(unittest.TestCase): self.assertEqual(resp.status_int // 100, 2) for key in resp.headers: if 'storage-policy' in key.lower(): - self.assert_(policy.name.lower() in key.lower()) + self.assertTrue(policy.name.lower() in key.lower()) def test_multiple_policies_in_use(self): ts = itertools.count() diff --git a/test/unit/cli/test_form_signature.py b/test/unit/cli/test_form_signature.py index 3120f08cda..55276bcbb2 100644 --- a/test/unit/cli/test_form_signature.py +++ b/test/unit/cli/test_form_signature.py @@ -17,8 +17,8 @@ import hashlib import hmac import mock +from six import StringIO import unittest -from StringIO import StringIO from swift.cli import form_signature diff --git a/test/unit/cli/test_info.py b/test/unit/cli/test_info.py index c9452ae5ac..677f275d98 100644 --- a/test/unit/cli/test_info.py +++ b/test/unit/cli/test_info.py @@ -15,10 +15,10 @@ import os import unittest import mock -from cStringIO import StringIO from shutil import rmtree from tempfile import mkdtemp +from six.moves import cStringIO as StringIO from test.unit import patch_policies, write_fake_ring from swift.common import ring, utils @@ -128,8 +128,8 @@ Metadata: No system metadata found in db file User Metadata: {'mydata': 'swift'}''' - self.assertEquals(sorted(out.getvalue().strip().split('\n')), - sorted(exp_out.split('\n'))) + self.assertEqual(sorted(out.getvalue().strip().split('\n')), + sorted(exp_out.split('\n'))) info = dict( account='acct', @@ -175,8 +175,8 @@ Metadata: X-Container-Foo: bar System Metadata: {'mydata': 'swift'} No user metadata found in db file''' % POLICIES[0].name - self.assertEquals(sorted(out.getvalue().strip().split('\n')), - sorted(exp_out.split('\n'))) + self.assertEqual(sorted(out.getvalue().strip().split('\n')), + sorted(exp_out.split('\n'))) def test_print_ring_locations_invalid_args(self): self.assertRaises(ValueError, print_ring_locations, @@ -306,7 +306,7 @@ No user metadata found in db file''' % POLICIES[0].name if exp_raised: exp_out = 'Does not appear to be a DB of type "account":' \ ' ./d49d0ecbb53be1fcc49624f2f7c7ccae.db' - self.assertEquals(out.getvalue().strip(), exp_out) + self.assertEqual(out.getvalue().strip(), exp_out) else: self.fail("Expected an InfoSystemExit exception to be raised") @@ -334,8 +334,8 @@ class TestPrintObj(TestCliInfoBase): out = StringIO() with mock.patch('sys.stdout', out): self.assertRaises(InfoSystemExit, print_obj, datafile) - self.assertEquals(out.getvalue().strip(), - 'Invalid metadata') + self.assertEqual(out.getvalue().strip(), + 'Invalid metadata') def test_print_obj_valid(self): out = StringIO() @@ -386,6 +386,23 @@ class TestPrintObjFullMeta(TestCliInfoBase): print_obj(self.datafile, swift_dir=self.testdir) self.assertTrue('/objects-1/' in out.getvalue()) + def test_print_obj_policy_index(self): + # Check an output of policy index when current directory is in + # object-* directory + out = StringIO() + hash_dir = os.path.dirname(self.datafile) + file_name = os.path.basename(self.datafile) + + # Change working directory to object hash dir + cwd = os.getcwd() + try: + os.chdir(hash_dir) + with mock.patch('sys.stdout', out): + print_obj(file_name, swift_dir=self.testdir) + finally: + os.chdir(cwd) + self.assertTrue('X-Backend-Storage-Policy-Index: 1' in out.getvalue()) + def test_print_obj_meta_and_ts_files(self): # verify that print_obj will also read from meta and ts files base = os.path.splitext(self.datafile)[0] @@ -472,7 +489,7 @@ Other Metadata: No metadata found''' % ( utils.Timestamp(106.3).internal) - self.assertEquals(out.getvalue().strip(), exp_out) + self.assertEqual(out.getvalue().strip(), exp_out) metadata = get_metadata({ 'X-Object-Sysmeta-Mtime': '107.3', @@ -497,7 +514,7 @@ Other Metadata: No metadata found''' % ( utils.Timestamp(106.3).internal) - self.assertEquals(out.getvalue().strip(), exp_out) + self.assertEqual(out.getvalue().strip(), exp_out) metadata = get_metadata({ 'X-Object-Meta-Mtime': '107.3', @@ -522,7 +539,7 @@ Other Metadata: X-Object-Mtime: 107.3''' % ( utils.Timestamp(106.3).internal) - self.assertEquals(out.getvalue().strip(), exp_out) + self.assertEqual(out.getvalue().strip(), exp_out) metadata = get_metadata({}) out = StringIO() @@ -543,7 +560,7 @@ Other Metadata: No metadata found''' % ( utils.Timestamp(106.3).internal) - self.assertEquals(out.getvalue().strip(), exp_out) + self.assertEqual(out.getvalue().strip(), exp_out) metadata = get_metadata({'X-Object-Meta-Mtime': '107.3'}) metadata['name'] = '/a-s' @@ -566,7 +583,7 @@ Other Metadata: No metadata found''' % ( utils.Timestamp(106.3).internal) - self.assertEquals(out.getvalue().strip(), exp_out) + self.assertEqual(out.getvalue().strip(), exp_out) metadata = get_metadata({'X-Object-Meta-Mtime': '107.3'}) del metadata['Content-Type'] @@ -588,7 +605,7 @@ Other Metadata: No metadata found''' % ( utils.Timestamp(106.3).internal) - self.assertEquals(out.getvalue().strip(), exp_out) + self.assertEqual(out.getvalue().strip(), exp_out) metadata = get_metadata({'X-Object-Meta-Mtime': '107.3'}) del metadata['X-Timestamp'] @@ -609,4 +626,22 @@ User Metadata: Other Metadata: No metadata found''' - self.assertEquals(out.getvalue().strip(), exp_out) + self.assertEqual(out.getvalue().strip(), exp_out) + + +class TestPrintObjWeirdPath(TestPrintObjFullMeta): + def setUp(self): + super(TestPrintObjWeirdPath, self).setUp() + # device name is objects-0 instead of sda, this is weird. + self.datafile = os.path.join(self.testdir, + 'objects-0', 'objects-1', + '1', 'ea8', + 'db4449e025aca992307c7c804a67eea8', + '1402017884.18202.data') + utils.mkdirs(os.path.dirname(self.datafile)) + with open(self.datafile, 'wb') as fp: + md = {'name': '/AUTH_admin/c/obj', + 'Content-Type': 'application/octet-stream', + 'ETag': 'd41d8cd98f00b204e9800998ecf8427e', + 'Content-Length': 0} + write_metadata(fp, md) diff --git a/test/unit/cli/test_recon.py b/test/unit/cli/test_recon.py index dd53ae9d54..345097c63f 100644 --- a/test/unit/cli/test_recon.py +++ b/test/unit/cli/test_recon.py @@ -20,13 +20,13 @@ import os import random import re import string -from StringIO import StringIO import tempfile import time import unittest import urlparse from eventlet.green import urllib2 +from six import StringIO from swift.cli import recon from swift.common import utils @@ -61,7 +61,7 @@ class TestScout(unittest.TestCase): @mock.patch('eventlet.green.urllib2.urlopen') def test_scout_ok(self, mock_urlopen): mock_urlopen.return_value.read = lambda: json.dumps([]) - url, content, status = self.scout_instance.scout( + url, content, status, ts_start, ts_end = self.scout_instance.scout( ("127.0.0.1", "8080")) self.assertEqual(url, self.url) self.assertEqual(content, []) @@ -70,7 +70,7 @@ class TestScout(unittest.TestCase): @mock.patch('eventlet.green.urllib2.urlopen') def test_scout_url_error(self, mock_urlopen): mock_urlopen.side_effect = urllib2.URLError("") - url, content, status = self.scout_instance.scout( + url, content, status, ts_start, ts_end = self.scout_instance.scout( ("127.0.0.1", "8080")) self.assertTrue(isinstance(content, urllib2.URLError)) self.assertEqual(url, self.url) @@ -80,7 +80,7 @@ class TestScout(unittest.TestCase): def test_scout_http_error(self, mock_urlopen): mock_urlopen.side_effect = urllib2.HTTPError( self.url, 404, "Internal error", None, None) - url, content, status = self.scout_instance.scout( + url, content, status, ts_start, ts_end = self.scout_instance.scout( ("127.0.0.1", "8080")) self.assertEqual(url, self.url) self.assertTrue(isinstance(content, urllib2.HTTPError)) @@ -218,7 +218,7 @@ class TestRecon(unittest.TestCase): '/etc/swift/object-1.ring.gz': empty_file_hash, } status = 200 - scout_instance.scout.return_value = (url, response, status) + scout_instance.scout.return_value = (url, response, status, 0, 0) mock_scout.return_value = scout_instance stdout = StringIO() mock_hash = mock.MagicMock() @@ -274,7 +274,7 @@ class TestRecon(unittest.TestCase): url = 'http://%s:%s/recon/quarantined' % host response = responses[host[1]] status = 200 - return url, response, status + return url, response, status, 0, 0 stdout = StringIO() patches = [ @@ -290,10 +290,10 @@ class TestRecon(unittest.TestCase): m = r.match(line) if m: ex = expected.pop(m.group(1)) - self.assertEquals(m.group(2), - " low: %s, high: %s, avg: %s, total: %s," - " Failed: %s%%, no_result: %s, reported: %s" - % ex) + self.assertEqual(m.group(2), + " low: %s, high: %s, avg: %s, total: %s," + " Failed: %s%%, no_result: %s, reported: %s" + % ex) self.assertFalse(expected) def test_drive_audit_check(self): @@ -311,7 +311,7 @@ class TestRecon(unittest.TestCase): url = 'http://%s:%s/recon/driveaudit' % host response = responses[host[1]] status = 200 - return url, response, status + return url, response, status, 0, 0 stdout = StringIO() patches = [ @@ -328,10 +328,10 @@ class TestRecon(unittest.TestCase): for line in lines: m = r.match(line) if m: - self.assertEquals(m.group(2), - " low: %s, high: %s, avg: %s, total: %s," - " Failed: %s%%, no_result: %s, reported: %s" - % expected) + self.assertEqual(m.group(2), + " low: %s, high: %s, avg: %s, total: %s," + " Failed: %s%%, no_result: %s, reported: %s" + % expected) class TestReconCommands(unittest.TestCase): @@ -387,7 +387,7 @@ class TestReconCommands(unittest.TestCase): res_account = 'Invalid: http://127.0.0.1:6012/ is account-server' valid = "1/1 hosts ok, 0 error[s] while checking hosts." - #Test for object server type - default + # Test for object server type - default with nested(*patches): self.recon.server_type_check(hosts) @@ -396,7 +396,7 @@ class TestReconCommands(unittest.TestCase): self.assertTrue(res_account in output.splitlines()) stdout.truncate(0) - #Test ok for object server type - default + # Test ok for object server type - default with nested(*patches): self.recon.server_type_check([hosts[0]]) @@ -404,7 +404,7 @@ class TestReconCommands(unittest.TestCase): self.assertTrue(valid in output.splitlines()) stdout.truncate(0) - #Test for account server type + # Test for account server type with nested(*patches): self.recon.server_type = 'account' self.recon.server_type_check(hosts) @@ -414,7 +414,7 @@ class TestReconCommands(unittest.TestCase): self.assertTrue(res_object in output.splitlines()) stdout.truncate(0) - #Test ok for account server type + # Test ok for account server type with nested(*patches): self.recon.server_type = 'account' self.recon.server_type_check([hosts[2]]) @@ -423,7 +423,7 @@ class TestReconCommands(unittest.TestCase): self.assertTrue(valid in output.splitlines()) stdout.truncate(0) - #Test for container server type + # Test for container server type with nested(*patches): self.recon.server_type = 'container' self.recon.server_type_check(hosts) @@ -433,7 +433,7 @@ class TestReconCommands(unittest.TestCase): self.assertTrue(res_object in output.splitlines()) stdout.truncate(0) - #Test ok for container server type + # Test ok for container server type with nested(*patches): self.recon.server_type = 'container' self.recon.server_type_check([hosts[1]]) @@ -491,7 +491,7 @@ class TestReconCommands(unittest.TestCase): return [('http://127.0.0.1:6010/recon/auditor/object', { 'object_auditor_stats_ALL': values, 'object_auditor_stats_ZBF': values, - }, 200)] + }, 200, 0, 0)] response = {} @@ -535,7 +535,9 @@ class TestReconCommands(unittest.TestCase): "avail": 15, "used": 85, "size": 100}, {"device": "sdd1", "mounted": True, "avail": 15, "used": 85, "size": 100}], - 200)] + 200, + 0, + 0)] cli = recon.SwiftRecon() cli.pool.imap = dummy_request @@ -576,40 +578,6 @@ class TestReconCommands(unittest.TestCase): cli.disk_usage([('127.0.0.1', 6010)], 5, 0) mock_print.assert_has_calls(expected_calls) - @mock.patch('__builtin__.print') - @mock.patch('time.time') - def test_object_replication_check(self, mock_now, mock_print): - now = 1430000000.0 - - def dummy_request(*args, **kwargs): - return [ - ('http://127.0.0.1:6010/recon/replication/object', - {"object_replication_time": 61, - "object_replication_last": now}, - 200), - ('http://127.0.0.1:6020/recon/replication/object', - {"object_replication_time": 23, - "object_replication_last": now}, - 200), - ] - - cli = recon.SwiftRecon() - cli.pool.imap = dummy_request - - default_calls = [ - mock.call('[replication_time] low: 23, high: 61, avg: 42.0, ' + - 'total: 84, Failed: 0.0%, no_result: 0, reported: 2'), - mock.call('Oldest completion was 2015-04-25 22:13:20 ' + - '(42 seconds ago) by 127.0.0.1:6010.'), - mock.call('Most recent completion was 2015-04-25 22:13:20 ' + - '(42 seconds ago) by 127.0.0.1:6010.'), - ] - - mock_now.return_value = now + 42 - cli.object_replication_check([('127.0.0.1', 6010), - ('127.0.0.1', 6020)]) - mock_print.assert_has_calls(default_calls) - @mock.patch('__builtin__.print') @mock.patch('time.time') def test_replication_check(self, mock_now, mock_print): @@ -625,7 +593,9 @@ class TestReconCommands(unittest.TestCase): "remote_merge": 0, "diff_capped": 0, "start": now, "hashmatch": 0, "diff": 0, "empty": 0}, "replication_time": 42}, - 200), + 200, + 0, + 0), ('http://127.0.0.1:6021/recon/replication/container', {"replication_last": now, "replication_stats": { @@ -634,7 +604,9 @@ class TestReconCommands(unittest.TestCase): "remote_merge": 0, "diff_capped": 0, "start": now, "hashmatch": 0, "diff": 0, "empty": 0}, "replication_time": 23}, - 200), + 200, + 0, + 0), ] cli = recon.SwiftRecon() @@ -671,11 +643,15 @@ class TestReconCommands(unittest.TestCase): ('http://127.0.0.1:6010/recon/load', {"1m": 0.2, "5m": 0.4, "15m": 0.25, "processes": 10000, "tasks": "1/128"}, - 200), + 200, + 0, + 0), ('http://127.0.0.1:6020/recon/load', {"1m": 0.4, "5m": 0.8, "15m": 0.75, "processes": 9000, "tasks": "1/200"}, - 200), + 200, + 0, + 0), ] cli = recon.SwiftRecon() @@ -695,3 +671,75 @@ class TestReconCommands(unittest.TestCase): # We need any_order=True because the order of calls depends on the dict # that is returned from the recon middleware, thus can't rely on it mock_print.assert_has_calls(default_calls, any_order=True) + + @mock.patch('__builtin__.print') + @mock.patch('time.time') + def test_time_check(self, mock_now, mock_print): + now = 1430000000.0 + mock_now.return_value = now + + def dummy_request(*args, **kwargs): + return [ + ('http://127.0.0.1:6010/recon/load', + now, + 200, + now - 0.5, + now + 0.5), + ('http://127.0.0.1:6020/recon/load', + now, + 200, + now, + now), + ] + + cli = recon.SwiftRecon() + cli.pool.imap = dummy_request + + default_calls = [ + mock.call('2/2 hosts matched, 0 error[s] while checking hosts.') + ] + + cli.time_check([('127.0.0.1', 6010), ('127.0.0.1', 6020)]) + # We need any_order=True because the order of calls depends on the dict + # that is returned from the recon middleware, thus can't rely on it + mock_print.assert_has_calls(default_calls, any_order=True) + + @mock.patch('__builtin__.print') + @mock.patch('time.time') + def test_time_check_mismatch(self, mock_now, mock_print): + now = 1430000000.0 + mock_now.return_value = now + + def dummy_request(*args, **kwargs): + return [ + ('http://127.0.0.1:6010/recon/time', + now, + 200, + now + 0.5, + now + 1.3), + ('http://127.0.0.1:6020/recon/time', + now, + 200, + now, + now), + ] + + cli = recon.SwiftRecon() + cli.pool.imap = dummy_request + + default_calls = [ + mock.call("!! http://127.0.0.1:6010/recon/time current time is " + "2015-04-25 22:13:21, but remote is " + "2015-04-25 22:13:20, differs by 1.30 sec"), + mock.call('1/2 hosts matched, 0 error[s] while checking hosts.'), + ] + + def mock_localtime(*args, **kwargs): + return time.gmtime(*args, **kwargs) + + with mock.patch("time.localtime", mock_localtime): + cli.time_check([('127.0.0.1', 6010), ('127.0.0.1', 6020)]) + + # We need any_order=True because the order of calls depends on the dict + # that is returned from the recon middleware, thus can't rely on it + mock_print.assert_has_calls(default_calls, any_order=True) diff --git a/test/unit/cli/test_ring_builder_analyzer.py b/test/unit/cli/test_ring_builder_analyzer.py index 52ceb8e354..f69bfcef1b 100644 --- a/test/unit/cli/test_ring_builder_analyzer.py +++ b/test/unit/cli/test_ring_builder_analyzer.py @@ -14,22 +14,27 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os import json import mock import unittest from StringIO import StringIO +from test.unit import with_tempdir from swift.cli.ring_builder_analyzer import parse_scenario, run_scenario class TestRunScenario(unittest.TestCase): - def test_it_runs(self): + @with_tempdir + def test_it_runs(self, tempdir): + builder_path = os.path.join(tempdir, 'test.builder') scenario = { 'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0, 'rounds': [[['add', 'r1z2-3.4.5.6:7/sda8', 100], ['add', 'z2-3.4.5.6:7/sda9', 200]], [['set_weight', 0, 150]], - [['remove', 1]]]} + [['remove', 1]], + [['save', builder_path]]]} parsed = parse_scenario(json.dumps(scenario)) fake_stdout = StringIO() @@ -40,6 +45,7 @@ class TestRunScenario(unittest.TestCase): # this doesn't crash and produces output that resembles something # useful is good enough. self.assertTrue('Rebalance' in fake_stdout.getvalue()) + self.assertTrue(os.path.exists(builder_path)) class TestParseScenario(unittest.TestCase): @@ -62,8 +68,8 @@ class TestParseScenario(unittest.TestCase): 'meta': '', 'port': 7, 'region': 1, - 'replication_ip': None, - 'replication_port': None, + 'replication_ip': '3.4.5.6', + 'replication_port': 7, 'weight': 100.0, 'zone': 2}], ['add', {'device': u'sda9', @@ -71,8 +77,8 @@ class TestParseScenario(unittest.TestCase): 'meta': '', 'port': 7, 'region': 1, - 'replication_ip': None, - 'replication_port': None, + 'replication_ip': '3.4.5.6', + 'replication_port': 7, 'weight': 200.0, 'zone': 2}]], [['set_weight', 0, 150.0]], @@ -180,7 +186,14 @@ class TestParseScenario(unittest.TestCase): # can't parse busted = dict(base, rounds=[[['add', 'not a good value', 100]]]) - self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + # N.B. the ValueError's coming out of ring.utils.parse_add_value + # are already pretty good + expected = "Invalid device specifier (round 0, command 0): " \ + "Invalid add value: not a good value" + try: + parse_scenario(json.dumps(busted)) + except ValueError as err: + self.assertEqual(str(err), expected) # negative weight busted = dict(base, rounds=[[['add', 'r1z2-1.2.3.4:6000/d7', -1]]]) @@ -216,7 +229,12 @@ class TestParseScenario(unittest.TestCase): # bad dev id busted = dict(base, rounds=[[['set_weight', 'not an int', 90]]]) - self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + expected = "Invalid device ID in set_weight (round 0, command 0): " \ + "invalid literal for int() with base 10: 'not an int'" + try: + parse_scenario(json.dumps(busted)) + except ValueError as e: + self.assertEqual(str(e), expected) # negative weight busted = dict(base, rounds=[[['set_weight', 1, -1]]]) @@ -225,3 +243,11 @@ class TestParseScenario(unittest.TestCase): # bogus weight busted = dict(base, rounds=[[['set_weight', 1, 'bogus']]]) self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + + def test_bad_save(self): + base = { + 'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0} + + # no builder name + busted = dict(base, rounds=[[['save']]]) + self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) diff --git a/test/unit/cli/test_ringbuilder.py b/test/unit/cli/test_ringbuilder.py index f3df11dc1f..fac1391cc3 100644 --- a/test/unit/cli/test_ringbuilder.py +++ b/test/unit/cli/test_ringbuilder.py @@ -16,10 +16,11 @@ import logging import mock import os -import StringIO +import six import tempfile import unittest import uuid +import shlex from swift.cli import ringbuilder from swift.common import exceptions @@ -29,8 +30,11 @@ from swift.common.ring import RingBuilder class RunSwiftRingBuilderMixin(object): def run_srb(self, *argv): - mock_stdout = StringIO.StringIO() - mock_stderr = StringIO.StringIO() + if len(argv) == 1 and isinstance(argv[0], basestring): + # convert a single string to a list + argv = shlex.split(argv[0]) + mock_stdout = six.StringIO() + mock_stderr = six.StringIO() srb_args = ["", self.tempfile] + [str(s) for s in argv] @@ -40,7 +44,10 @@ class RunSwiftRingBuilderMixin(object): ringbuilder.main(srb_args) except SystemExit as err: if err.code not in (0, 1): # (success, warning) - raise + msg = 'Unexpected exit status %s\n' % err.code + msg += 'STDOUT:\n%s\nSTDERR:\n%s\n' % ( + mock_stdout.getvalue(), mock_stderr.getvalue()) + self.fail(msg) return (mock_stdout.getvalue(), mock_stderr.getvalue()) @@ -140,7 +147,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder._parse_search_values(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_find_parts(self): rb = RingBuilder(8, 3, 0) @@ -178,7 +185,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder._parse_list_parts_values(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_parse_add_values_number_of_arguments(self): # Test Number of arguments abnormal @@ -188,7 +195,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder._parse_add_values(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_set_weight_values_no_devices(self): # Test no devices @@ -197,7 +204,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder._set_weight_values([], 100) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_parse_set_weight_values_number_of_arguments(self): # Test Number of arguments abnormal @@ -207,7 +214,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder._parse_set_weight_values(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) argv = ["--region", "2"] err = None @@ -215,7 +222,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder._parse_set_weight_values(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_set_info_values_no_devices(self): # Test no devices @@ -224,7 +231,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder._set_info_values([], 100) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_parse_set_info_values_number_of_arguments(self): # Test Number of arguments abnormal @@ -234,7 +241,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder._parse_set_info_values(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_parse_remove_values_number_of_arguments(self): # Test Number of arguments abnormal @@ -244,7 +251,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder._parse_remove_values(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_create_ring(self): argv = ["", self.tmpfile, "create", "6", "3.14159265359", "1"] @@ -398,7 +405,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder.main(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_add_device_already_exists(self): # Test Add a device that already exists @@ -409,7 +416,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder.main(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_remove_device(self): for search_value in self.search_values: @@ -685,7 +692,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder.main(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_remove_device_no_matching(self): self.create_sample_ring() @@ -697,7 +704,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder.main(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_set_weight(self): for search_value in self.search_values: @@ -896,7 +903,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder.main(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_set_weight_no_matching(self): self.create_sample_ring() @@ -908,7 +915,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder.main(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_set_info(self): for search_value in self.search_values: @@ -1188,7 +1195,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder.main(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_set_info_no_matching(self): self.create_sample_ring() @@ -1200,7 +1207,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder.main(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_set_info_already_exists(self): self.create_sample_ring() @@ -1223,7 +1230,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder.main(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_set_min_part_hours(self): self.create_sample_ring() @@ -1240,7 +1247,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder.main(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_set_replicas(self): self.create_sample_ring() @@ -1314,7 +1321,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder.main(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_set_replicas_invalid_value(self): # Test not a valid number @@ -1324,7 +1331,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder.main(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) # Test new replicas is 0 argv = ["", self.tmpfile, "set_replicas", "0"] @@ -1333,7 +1340,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder.main(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_validate(self): self.create_sample_ring() @@ -1351,7 +1358,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder.main(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_validate_corrupted_file(self): self.create_sample_ring() @@ -1369,7 +1376,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder.main(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_validate_non_existent_file(self): rand_file = '%s/%s' % ('/tmp', str(uuid.uuid4())) @@ -1379,7 +1386,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder.main(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_validate_non_accessible_file(self): with mock.patch.object( @@ -1391,7 +1398,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder.main(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_validate_generic_error(self): with mock.patch.object( @@ -1403,7 +1410,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder.main(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_search_device_ipv4_old_format(self): self.create_sample_ring() @@ -1503,7 +1510,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder.main(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_search_device_no_matching(self): self.create_sample_ring() @@ -1515,7 +1522,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder.main(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_list_parts_ipv4_old_format(self): self.create_sample_ring() @@ -1615,7 +1622,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder.main(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_list_parts_no_matching(self): self.create_sample_ring() @@ -1627,7 +1634,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder.main(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_unknown(self): argv = ["", self.tmpfile, "unknown"] @@ -1636,7 +1643,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder.main(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_default(self): self.create_sample_ring() @@ -1662,7 +1669,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder.main(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 1) + self.assertEqual(err.code, 1) def test_rebalance_no_devices(self): # Test no devices @@ -1674,7 +1681,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder.main(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_write_ring(self): self.create_sample_ring() @@ -1695,7 +1702,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder.main(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) def test_warn_at_risk(self): self.create_sample_ring() @@ -1708,7 +1715,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder.main(argv) except SystemExit as e: err = e - self.assertEquals(err.code, 1) + self.assertEqual(err.code, 1) def test_invalid_device_name(self): self.create_sample_ring() @@ -1724,7 +1731,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder.main(argv) except SystemExit as exc: err = exc - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) argv = ["", self.tmpfile, @@ -1739,7 +1746,31 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ringbuilder.main(argv) except SystemExit as exc: err = exc - self.assertEquals(err.code, 2) + self.assertEqual(err.code, 2) + + def test_dispersion_command(self): + self.create_sample_ring() + self.run_srb('rebalance') + out, err = self.run_srb('dispersion -v') + self.assertIn('dispersion', out.lower()) + self.assertFalse(err) + + def test_use_ringfile_as_builderfile(self): + mock_stdout = six.StringIO() + mock_stderr = six.StringIO() + + argv = ["", "object.ring.gz"] + + try: + with mock.patch("sys.stdout", mock_stdout): + with mock.patch("sys.stderr", mock_stderr): + ringbuilder.main(argv) + except SystemExit: + pass + expected = "Note: using object.builder instead of object.ring.gz " \ + "as builder file\n" \ + "Ring Builder file does not exist: object.builder\n" + self.assertEqual(expected, mock_stdout.getvalue()) class TestRebalanceCommand(unittest.TestCase, RunSwiftRingBuilderMixin): @@ -1756,8 +1787,8 @@ class TestRebalanceCommand(unittest.TestCase, RunSwiftRingBuilderMixin): pass def run_srb(self, *argv): - mock_stdout = StringIO.StringIO() - mock_stderr = StringIO.StringIO() + mock_stdout = six.StringIO() + mock_stderr = six.StringIO() srb_args = ["", self.tempfile] + [str(s) for s in argv] diff --git a/test/unit/common/middleware/helpers.py b/test/unit/common/middleware/helpers.py index 7c1b45571e..bc6ad50fdd 100644 --- a/test/unit/common/middleware/helpers.py +++ b/test/unit/common/middleware/helpers.py @@ -76,7 +76,7 @@ class FakeSwift(object): path += '?' + env['QUERY_STRING'] if 'swift.authorize' in env: - resp = env['swift.authorize']() + resp = env['swift.authorize'](swob.Request(env)) if resp: return resp(env, start_response) diff --git a/test/unit/common/middleware/test_account_quotas.py b/test/unit/common/middleware/test_account_quotas.py index e8c2563c5a..e8e461fd53 100644 --- a/test/unit/common/middleware/test_account_quotas.py +++ b/test/unit/common/middleware/test_account_quotas.py @@ -96,7 +96,7 @@ class TestAccountQuota(unittest.TestCase): environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache}) res = req.get_response(app) - #Response code of 200 because authentication itself is not done here + # Response code of 200 because authentication itself is not done here self.assertEquals(res.status_int, 200) def test_no_quotas(self): @@ -253,7 +253,7 @@ class TestAccountQuota(unittest.TestCase): cache = FakeCache(None) req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT', - 'swift.cache': cache}, + 'swift.cache': cache}, headers={'x-copy-from': '/c2/o2'}) res = req.get_response(app) self.assertEquals(res.status_int, 413) @@ -267,7 +267,7 @@ class TestAccountQuota(unittest.TestCase): cache = FakeCache(None) req = Request.blank('/v1/a/c2/o2', environ={'REQUEST_METHOD': 'COPY', - 'swift.cache': cache}, + 'swift.cache': cache}, headers={'Destination': '/c/o'}) res = req.get_response(app) self.assertEquals(res.status_int, 413) @@ -281,7 +281,7 @@ class TestAccountQuota(unittest.TestCase): cache = FakeCache(None) req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT', - 'swift.cache': cache}, + 'swift.cache': cache}, headers={'x-copy-from': '/c2/o2'}) res = req.get_response(app) self.assertEquals(res.status_int, 200) @@ -294,7 +294,7 @@ class TestAccountQuota(unittest.TestCase): cache = FakeCache(None) req = Request.blank('/v1/a/c2/o2', environ={'REQUEST_METHOD': 'COPY', - 'swift.cache': cache}, + 'swift.cache': cache}, headers={'Destination': '/c/o'}) res = req.get_response(app) self.assertEquals(res.status_int, 200) @@ -306,7 +306,7 @@ class TestAccountQuota(unittest.TestCase): cache = FakeCache(None) req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT', - 'swift.cache': cache}, + 'swift.cache': cache}, headers={'x-copy-from': '/c2/o3'}) res = req.get_response(app) self.assertEquals(res.status_int, 200) @@ -318,7 +318,7 @@ class TestAccountQuota(unittest.TestCase): cache = FakeCache(None) req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT', - 'swift.cache': cache}, + 'swift.cache': cache}, headers={'x-copy-from': 'bad_path'}) res = req.get_response(app) self.assertEquals(res.status_int, 412) diff --git a/test/unit/common/middleware/test_acl.py b/test/unit/common/middleware/test_acl.py index e74044d405..aa100bc4db 100644 --- a/test/unit/common/middleware/test_acl.py +++ b/test/unit/common/middleware/test_acl.py @@ -159,48 +159,49 @@ class TestACL(unittest.TestCase): (args, result, expected)) def test_referrer_allowed(self): - self.assert_(not acl.referrer_allowed('host', None)) - self.assert_(not acl.referrer_allowed('host', [])) - self.assert_(acl.referrer_allowed(None, ['*'])) - self.assert_(acl.referrer_allowed('', ['*'])) - self.assert_(not acl.referrer_allowed(None, ['specific.host'])) - self.assert_(not acl.referrer_allowed('', ['specific.host'])) - self.assert_(acl.referrer_allowed('http://www.example.com/index.html', - ['.example.com'])) - self.assert_(acl.referrer_allowed( + self.assertTrue(not acl.referrer_allowed('host', None)) + self.assertTrue(not acl.referrer_allowed('host', [])) + self.assertTrue(acl.referrer_allowed(None, ['*'])) + self.assertTrue(acl.referrer_allowed('', ['*'])) + self.assertTrue(not acl.referrer_allowed(None, ['specific.host'])) + self.assertTrue(not acl.referrer_allowed('', ['specific.host'])) + self.assertTrue( + acl.referrer_allowed('http://www.example.com/index.html', + ['.example.com'])) + self.assertTrue(acl.referrer_allowed( 'http://user@www.example.com/index.html', ['.example.com'])) - self.assert_(acl.referrer_allowed( + self.assertTrue(acl.referrer_allowed( 'http://user:pass@www.example.com/index.html', ['.example.com'])) - self.assert_(acl.referrer_allowed( + self.assertTrue(acl.referrer_allowed( 'http://www.example.com:8080/index.html', ['.example.com'])) - self.assert_(acl.referrer_allowed( + self.assertTrue(acl.referrer_allowed( 'http://user@www.example.com:8080/index.html', ['.example.com'])) - self.assert_(acl.referrer_allowed( + self.assertTrue(acl.referrer_allowed( 'http://user:pass@www.example.com:8080/index.html', ['.example.com'])) - self.assert_(acl.referrer_allowed( + self.assertTrue(acl.referrer_allowed( 'http://user:pass@www.example.com:8080', ['.example.com'])) - self.assert_(acl.referrer_allowed('http://www.example.com', - ['.example.com'])) - self.assert_(not acl.referrer_allowed( + self.assertTrue(acl.referrer_allowed('http://www.example.com', + ['.example.com'])) + self.assertTrue(not acl.referrer_allowed( 'http://thief.example.com', ['.example.com', '-thief.example.com'])) - self.assert_(not acl.referrer_allowed( + self.assertTrue(not acl.referrer_allowed( 'http://thief.example.com', ['*', '-thief.example.com'])) - self.assert_(acl.referrer_allowed( + self.assertTrue(acl.referrer_allowed( 'http://www.example.com', ['.other.com', 'www.example.com'])) - self.assert_(acl.referrer_allowed( + self.assertTrue(acl.referrer_allowed( 'http://www.example.com', ['-.example.com', 'www.example.com'])) # This is considered a relative uri to the request uri, a mode not # currently supported. - self.assert_(not acl.referrer_allowed('www.example.com', - ['.example.com'])) - self.assert_(not acl.referrer_allowed('../index.html', - ['.example.com'])) - self.assert_(acl.referrer_allowed('www.example.com', ['*'])) + self.assertTrue(not acl.referrer_allowed('www.example.com', + ['.example.com'])) + self.assertTrue(not acl.referrer_allowed('../index.html', + ['.example.com'])) + self.assertTrue(acl.referrer_allowed('www.example.com', ['*'])) if __name__ == '__main__': diff --git a/test/unit/common/middleware/test_bulk.py b/test/unit/common/middleware/test_bulk.py index d637c2a55b..0c6845cd8e 100644 --- a/test/unit/common/middleware/test_bulk.py +++ b/test/unit/common/middleware/test_bulk.py @@ -21,9 +21,9 @@ import tarfile import urllib import zlib import mock +from six import BytesIO from shutil import rmtree from tempfile import mkdtemp -from StringIO import StringIO from eventlet import sleep from mock import patch, call from test.unit.common.middleware.helpers import FakeSwift @@ -155,7 +155,7 @@ class TestUntarMetadata(unittest.TestCase): with open(os.path.join(self.testdir, "obj2"), "w") as fh2: fh2.write("obj2 contents\n") - tar_ball = StringIO() + tar_ball = BytesIO() tar_file = tarfile.TarFile.open(fileobj=tar_ball, mode="w", format=tarfile.PAX_FORMAT) @@ -297,10 +297,12 @@ class TestUntar(unittest.TestCase): req.headers['transfer-encoding'] = 'chunked' resp_body = self.handle_extract_and_iter( req, compress_format, 'application/xml') - self.assert_('201 Created' in - resp_body) - self.assert_('6' in - resp_body) + self.assertTrue( + '201 Created' in + resp_body) + self.assertTrue( + '6' in + resp_body) # test out nonexistent format req = Request.blank('/tar_works/acc/cont/?extract-archive=tar', @@ -316,7 +318,7 @@ class TestUntar(unittest.TestCase): app_iter = self.bulk(req.environ, fake_start_response) resp_body = ''.join([i for i in app_iter]) - self.assert_('Response Status: 406' in resp_body) + self.assertTrue('Response Status: 406' in resp_body) def test_extract_call(self): base_name = 'base_works_gz' @@ -600,10 +602,10 @@ class TestUntar(unittest.TestCase): def test_get_response_body(self): txt_body = bulk.get_response_body( 'bad_formay', {'hey': 'there'}, [['json > xml', '202 Accepted']]) - self.assert_('hey: there' in txt_body) + self.assertTrue('hey: there' in txt_body) xml_body = bulk.get_response_body( 'text/xml', {'hey': 'there'}, [['json > xml', '202 Accepted']]) - self.assert_('>' in xml_body) + self.assertTrue('>' in xml_body) class TestDelete(unittest.TestCase): @@ -678,7 +680,7 @@ class TestDelete(unittest.TestCase): headers={'Accept': 'application/json', 'Content-Type': 'text/xml'}) req.method = 'POST' - req.environ['wsgi.input'] = StringIO('/c/f\n/c/f404') + req.environ['wsgi.input'] = BytesIO(b'/c/f\n/c/f404') resp_body = self.handle_delete_and_iter(req) resp_data = utils.json.loads(resp_body) self.assertEquals(resp_data['Response Status'], '406 Not Acceptable') @@ -691,7 +693,7 @@ class TestDelete(unittest.TestCase): req.method = 'POST' req.headers['Transfer-Encoding'] = 'chunked' req.headers['Accept'] = 'application/json' - req.environ['wsgi.input'] = StringIO('/c/f%20') + req.environ['wsgi.input'] = BytesIO(b'/c/f%20') list(self.bulk(req.environ, fake_start_response)) # iterate over resp self.assertEquals( self.app.delete_paths, ['/delete_works/AUTH_Acc/c/f ']) @@ -706,7 +708,7 @@ class TestDelete(unittest.TestCase): with patch.object(self.bulk, 'max_path_length', 2): results = [] - req.environ['wsgi.input'] = StringIO('1\n2\n3') + req.environ['wsgi.input'] = BytesIO(b'1\n2\n3') results = self.bulk.get_objs_to_delete(req) self.assertEquals(results, [{'name': '1'}, {'name': '2'}, {'name': '3'}]) @@ -737,8 +739,8 @@ class TestDelete(unittest.TestCase): def test_bulk_delete_too_many_newlines(self): req = Request.blank('/delete_works/AUTH_Acc') req.method = 'POST' - data = '\n\n' * self.bulk.max_deletes_per_request - req.environ['wsgi.input'] = StringIO(data) + data = b'\n\n' * self.bulk.max_deletes_per_request + req.environ['wsgi.input'] = BytesIO(data) req.content_length = len(data) resp_body = self.handle_delete_and_iter(req) self.assertTrue('413 Request Entity Too Large' in resp_body) @@ -857,8 +859,8 @@ class TestDelete(unittest.TestCase): headers={'Accept': 'application/json'}) req.method = 'POST' bad_file = 'c/' + ('1' * self.bulk.max_path_length) - data = '/c/f\n' + bad_file + '\n/c/f' - req.environ['wsgi.input'] = StringIO(data) + data = b'/c/f\n' + bad_file.encode('ascii') + b'\n/c/f' + req.environ['wsgi.input'] = BytesIO(data) req.headers['Transfer-Encoding'] = 'chunked' resp_body = self.handle_delete_and_iter(req) resp_data = utils.json.loads(resp_body) diff --git a/test/unit/common/middleware/test_dlo.py b/test/unit/common/middleware/test_dlo.py index 119e4aba55..702eb2432d 100644 --- a/test/unit/common/middleware/test_dlo.py +++ b/test/unit/common/middleware/test_dlo.py @@ -1,4 +1,4 @@ -#-*- coding:utf-8 -*- +# coding: utf-8 # Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -793,7 +793,7 @@ class TestDloGetManifest(DloTestCase): def test_get_with_auth_overridden(self): auth_got_called = [0] - def my_auth(): + def my_auth(req): auth_got_called[0] += 1 return None diff --git a/test/unit/common/middleware/test_except.py b/test/unit/common/middleware/test_except.py index 227a6b883d..dd2dd667ab 100644 --- a/test/unit/common/middleware/test_except.py +++ b/test/unit/common/middleware/test_except.py @@ -69,7 +69,7 @@ class TestCatchErrors(unittest.TestCase): self.assertEquals(self.logger.txn_id, None) def start_response(status, headers, exc_info=None): - self.assert_('X-Trans-Id' in (x[0] for x in headers)) + self.assertTrue('X-Trans-Id' in (x[0] for x in headers)) app = catch_errors.CatchErrorMiddleware(FakeApp(), {}) req = Request.blank('/v1/a/c/o') app(req.environ, start_response) @@ -79,7 +79,7 @@ class TestCatchErrors(unittest.TestCase): self.assertEquals(self.logger.txn_id, None) def start_response(status, headers, exc_info=None): - self.assert_('X-Trans-Id' in (x[0] for x in headers)) + self.assertTrue('X-Trans-Id' in (x[0] for x in headers)) app = catch_errors.CatchErrorMiddleware(FakeApp(True), {}) req = Request.blank('/v1/a/c/o') app(req.environ, start_response) @@ -96,7 +96,7 @@ class TestCatchErrors(unittest.TestCase): self.assertEquals(self.logger.txn_id, None) def start_response(status, headers, exc_info=None): - self.assert_('X-Trans-Id' in (x[0] for x in headers)) + self.assertTrue('X-Trans-Id' in (x[0] for x in headers)) app = catch_errors.CatchErrorMiddleware( FakeApp(), {'trans_id_suffix': '-stuff'}) req = Request.blank('/v1/a/c/o') @@ -107,7 +107,7 @@ class TestCatchErrors(unittest.TestCase): self.assertEquals(self.logger.txn_id, None) def start_response(status, headers, exc_info=None): - self.assert_('X-Trans-Id' in (x[0] for x in headers)) + self.assertTrue('X-Trans-Id' in (x[0] for x in headers)) app = catch_errors.CatchErrorMiddleware( FakeApp(), {'trans_id_suffix': '-fromconf'}) req = Request.blank('/v1/a/c/o', @@ -119,7 +119,7 @@ class TestCatchErrors(unittest.TestCase): self.assertEquals(self.logger.txn_id, None) def start_response(status, headers, exc_info=None): - self.assert_('X-Trans-Id' in (x[0] for x in headers)) + self.assertTrue('X-Trans-Id' in (x[0] for x in headers)) app = catch_errors.CatchErrorMiddleware( FakeApp(), {'trans_id_suffix': '-fromconf'}) req = Request.blank('/v1/a/c/o', @@ -132,7 +132,7 @@ class TestCatchErrors(unittest.TestCase): self.assertEquals(self.logger.txn_id, None) def start_response(status, headers, exc_info=None): - self.assert_('X-Trans-Id' in (x[0] for x in headers)) + self.assertTrue('X-Trans-Id' in (x[0] for x in headers)) app = catch_errors.CatchErrorMiddleware(FakeApp(), {}) req = Request.blank('/v1/a/c/o', headers={'X-Trans-Id-Extra': 'xan than"gum'}) diff --git a/test/unit/common/middleware/test_formpost.py b/test/unit/common/middleware/test_formpost.py index abc11edb85..6b7ecead17 100644 --- a/test/unit/common/middleware/test_formpost.py +++ b/test/unit/common/middleware/test_formpost.py @@ -16,9 +16,11 @@ import hmac import unittest from hashlib import sha1 -from StringIO import StringIO from time import time +import six +from six import BytesIO + from swift.common.swob import Request, Response from swift.common.middleware import tempauth, formpost from swift.common.utils import split_path @@ -42,13 +44,13 @@ class FakeApp(object): if self.check_no_query_string and env.get('QUERY_STRING'): raise Exception('Query string %s should have been discarded!' % env['QUERY_STRING']) - body = '' + body = b'' while True: chunk = env['wsgi.input'].read() if not chunk: break body += chunk - env['wsgi.input'] = StringIO(body) + env['wsgi.input'] = BytesIO(body) self.requests.append(Request.blank('', environ=env)) if env.get('swift.authorize_override') and \ env.get('REMOTE_USER') != '.wsgi.pre_authed': @@ -72,39 +74,40 @@ class TestCappedFileLikeObject(unittest.TestCase): def test_whole(self): self.assertEquals( - formpost._CappedFileLikeObject(StringIO('abc'), 10).read(), 'abc') + formpost._CappedFileLikeObject(BytesIO(b'abc'), 10).read(), + b'abc') def test_exceeded(self): exc = None try: - formpost._CappedFileLikeObject(StringIO('abc'), 2).read() + formpost._CappedFileLikeObject(BytesIO(b'abc'), 2).read() except EOFError as err: exc = err self.assertEquals(str(exc), 'max_file_size exceeded') def test_whole_readline(self): - fp = formpost._CappedFileLikeObject(StringIO('abc\ndef'), 10) - self.assertEquals(fp.readline(), 'abc\n') - self.assertEquals(fp.readline(), 'def') - self.assertEquals(fp.readline(), '') + fp = formpost._CappedFileLikeObject(BytesIO(b'abc\ndef'), 10) + self.assertEquals(fp.readline(), b'abc\n') + self.assertEquals(fp.readline(), b'def') + self.assertEquals(fp.readline(), b'') def test_exceeded_readline(self): - fp = formpost._CappedFileLikeObject(StringIO('abc\ndef'), 5) - self.assertEquals(fp.readline(), 'abc\n') + fp = formpost._CappedFileLikeObject(BytesIO(b'abc\ndef'), 5) + self.assertEquals(fp.readline(), b'abc\n') exc = None try: - self.assertEquals(fp.readline(), 'def') + self.assertEquals(fp.readline(), b'def') except EOFError as err: exc = err self.assertEquals(str(exc), 'max_file_size exceeded') def test_read_sized(self): - fp = formpost._CappedFileLikeObject(StringIO('abcdefg'), 10) - self.assertEquals(fp.read(2), 'ab') - self.assertEquals(fp.read(2), 'cd') - self.assertEquals(fp.read(2), 'ef') - self.assertEquals(fp.read(2), 'g') - self.assertEquals(fp.read(2), '') + fp = formpost._CappedFileLikeObject(BytesIO(b'abcdefg'), 10) + self.assertEquals(fp.read(2), b'ab') + self.assertEquals(fp.read(2), b'cd') + self.assertEquals(fp.read(2), b'ef') + self.assertEquals(fp.read(2), b'g') + self.assertEquals(fp.read(2), b'') class TestFormPost(unittest.TestCase): @@ -195,7 +198,9 @@ class TestFormPost(unittest.TestCase): '------WebKitFormBoundaryNcxTqxSlX7t4TDkR--', '', ] - wsgi_errors = StringIO() + if six.PY3: + body = [line.encode('utf-8') for line in body] + wsgi_errors = six.StringIO() env = { 'CONTENT_TYPE': 'multipart/form-data; ' 'boundary=----WebKitFormBoundaryNcxTqxSlX7t4TDkR', @@ -241,7 +246,7 @@ class TestFormPost(unittest.TestCase): key = 'abc' sig, env, body = self._make_sig_env_body( '/v1/AUTH_test/container', '', 1024, 10, int(time() - 10), key) - env['wsgi.input'] = StringIO('\r\n'.join(body)) + env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) env['swift.account/AUTH_test'] = self._fake_cache_env( 'AUTH_test', [key]) self.app = FakeApp(iter([('201 Created', {}, ''), @@ -281,7 +286,7 @@ class TestFormPost(unittest.TestCase): '%s\n%s\n%s\n%s\n%s' % ( path, redirect, max_file_size, max_file_count, expires), sha1).hexdigest() - wsgi_input = StringIO('\r\n'.join([ + wsgi_input = '\r\n'.join([ '------WebKitFormBoundaryNcxTqxSlX7t4TDkR', 'Content-Disposition: form-data; name="redirect"', '', @@ -321,8 +326,11 @@ class TestFormPost(unittest.TestCase): '', '------WebKitFormBoundaryNcxTqxSlX7t4TDkR--', '', - ])) - wsgi_errors = StringIO() + ]) + if six.PY3: + wsgi_input = wsgi_input.encode('utf-8') + wsgi_input = BytesIO(wsgi_input) + wsgi_errors = six.StringIO() env = { 'CONTENT_TYPE': 'multipart/form-data; ' 'boundary=----WebKitFormBoundaryNcxTqxSlX7t4TDkR', @@ -395,7 +403,7 @@ class TestFormPost(unittest.TestCase): '%s\n%s\n%s\n%s\n%s' % ( path, redirect, max_file_size, max_file_count, expires), sha1).hexdigest() - wsgi_input = StringIO('\r\n'.join([ + wsgi_input = '\r\n'.join([ '-----------------------------168072824752491622650073', 'Content-Disposition: form-data; name="redirect"', '', @@ -435,8 +443,11 @@ class TestFormPost(unittest.TestCase): '', '-----------------------------168072824752491622650073--', '' - ])) - wsgi_errors = StringIO() + ]) + if six.PY3: + wsgi_input = wsgi_input.encode('utf-8') + wsgi_input = BytesIO(wsgi_input) + wsgi_errors = six.StringIO() env = { 'CONTENT_TYPE': 'multipart/form-data; ' 'boundary=---------------------------168072824752491622650073', @@ -508,7 +519,7 @@ class TestFormPost(unittest.TestCase): '%s\n%s\n%s\n%s\n%s' % ( path, redirect, max_file_size, max_file_count, expires), sha1).hexdigest() - wsgi_input = StringIO('\r\n'.join([ + wsgi_input = '\r\n'.join([ '------WebKitFormBoundaryq3CFxUjfsDMu8XsA', 'Content-Disposition: form-data; name="redirect"', '', @@ -548,8 +559,11 @@ class TestFormPost(unittest.TestCase): '', '------WebKitFormBoundaryq3CFxUjfsDMu8XsA--', '' - ])) - wsgi_errors = StringIO() + ]) + if six.PY3: + wsgi_input = wsgi_input.encode('utf-8') + wsgi_input = BytesIO(wsgi_input) + wsgi_errors = six.StringIO() env = { 'CONTENT_TYPE': 'multipart/form-data; ' 'boundary=----WebKitFormBoundaryq3CFxUjfsDMu8XsA', @@ -624,7 +638,7 @@ class TestFormPost(unittest.TestCase): '%s\n%s\n%s\n%s\n%s' % ( path, redirect, max_file_size, max_file_count, expires), sha1).hexdigest() - wsgi_input = StringIO('\r\n'.join([ + wsgi_input = '\r\n'.join([ '-----------------------------7db20d93017c', 'Content-Disposition: form-data; name="redirect"', '', @@ -664,8 +678,11 @@ class TestFormPost(unittest.TestCase): '', '-----------------------------7db20d93017c--', '' - ])) - wsgi_errors = StringIO() + ]) + if six.PY3: + wsgi_input = wsgi_input.encode('utf-8') + wsgi_input = BytesIO(wsgi_input) + wsgi_errors = six.StringIO() env = { 'CONTENT_TYPE': 'multipart/form-data; ' 'boundary=---------------------------7db20d93017c', @@ -729,7 +746,7 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '/v1/AUTH_test/container', 'http://brim.net', 5, 10, int(time() + 86400), key) - env['wsgi.input'] = StringIO('XX' + '\r\n'.join(body)) + env['wsgi.input'] = BytesIO(b'XX' + b'\r\n'.join(body)) env['swift.account/AUTH_test'] = self._fake_cache_env( 'AUTH_test', [key]) env['swift.container/AUTH_test/container'] = {'meta': {}} @@ -765,7 +782,7 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '/v1/AUTH_test/container', 'http://brim.net', 5, 10, int(time() + 86400), key) - env['wsgi.input'] = StringIO('\r\n'.join(body)) + env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) env['swift.account/AUTH_test'] = self._fake_cache_env( 'AUTH_test', [key]) env['swift.container/AUTH_test/container'] = {'meta': {}} @@ -796,7 +813,7 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '/v1/AUTH_test/container', 'http://brim.net', 1024, 1, int(time() + 86400), key) - env['wsgi.input'] = StringIO('\r\n'.join(body)) + env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) env['swift.account/AUTH_test'] = self._fake_cache_env( 'AUTH_test', [key]) env['swift.container/AUTH_test/container'] = {'meta': {}} @@ -837,7 +854,7 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key) env['QUERY_STRING'] = 'this=should¬=get&passed' - env['wsgi.input'] = StringIO('\r\n'.join(body)) + env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) env['swift.account/AUTH_test'] = self._fake_cache_env( 'AUTH_test', [key]) env['swift.container/AUTH_test/container'] = {'meta': {}} @@ -872,7 +889,7 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '/v1/AUTH_test/container', 'http://brim.net', 1024, 10, int(time() + 86400), key) - env['wsgi.input'] = StringIO('\r\n'.join(body)) + env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) env['swift.account/AUTH_test'] = self._fake_cache_env( 'AUTH_test', [key]) env['swift.container/AUTH_test/container'] = {'meta': {}} @@ -915,7 +932,7 @@ class TestFormPost(unittest.TestCase): # Tack on an extra char to redirect, but shouldn't matter since it # should get truncated off on read. redirect += 'b' - env['wsgi.input'] = StringIO('\r\n'.join([ + wsgi_input = '\r\n'.join([ '------WebKitFormBoundaryNcxTqxSlX7t4TDkR', 'Content-Disposition: form-data; name="redirect"', '', @@ -955,7 +972,10 @@ class TestFormPost(unittest.TestCase): '', '------WebKitFormBoundaryNcxTqxSlX7t4TDkR--', '', - ])) + ]) + if six.PY3: + wsgi_input = wsgi_input.encode('utf-8') + env['wsgi.input'] = BytesIO(wsgi_input) env['swift.account/AUTH_test'] = self._fake_cache_env( 'AUTH_test', [key]) env['swift.container/AUTH_test/container'] = {'meta': {}} @@ -1000,7 +1020,7 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '/v1/AUTH_test/container', redirect, max_file_size, max_file_count, expires, key) - env['wsgi.input'] = StringIO('\r\n'.join([ + wsgi_input = '\r\n'.join([ '------WebKitFormBoundaryNcxTqxSlX7t4TDkR', 'Content-Disposition: form-data; name="redirect"', '', @@ -1023,7 +1043,10 @@ class TestFormPost(unittest.TestCase): sig, '------WebKitFormBoundaryNcxTqxSlX7t4TDkR--', '', - ])) + ]) + if six.PY3: + wsgi_input = wsgi_input.encode('utf-8') + env['wsgi.input'] = BytesIO(wsgi_input) env['swift.account/AUTH_test'] = self._fake_cache_env( 'AUTH_test', [key]) env['swift.container/AUTH_test/container'] = {'meta': {}} @@ -1063,7 +1086,7 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '/v1/AUTH_test/container', 'http://redirect', 1024, 10, int(time() + 86400), key, user_agent=False) - env['wsgi.input'] = StringIO('\r\n'.join(body)) + env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) env['swift.account/AUTH_test'] = self._fake_cache_env( 'AUTH_test', [key]) env['swift.container/AUTH_test/container'] = {'meta': {}} @@ -1084,7 +1107,7 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '/v1/AUTH_test/container', 'http://redirect', 1024, 10, int(time() + 86400), key, user_agent=False) - env['wsgi.input'] = StringIO('\r\n'.join(body)) + env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) env['swift.account/AUTH_test'] = self._fake_cache_env( 'AUTH_test', [key]) env['swift.container/AUTH_test/container'] = {'meta': {}} @@ -1112,7 +1135,7 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '/v1/AUTH_test/container', 'http://redirect', 1024, 10, int(time() + 86400), key) - env['wsgi.input'] = StringIO('\r\n'.join(body)) + env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) # Stick it in X-Account-Meta-Temp-URL-Key-2 and make sure we get it env['swift.account/AUTH_test'] = self._fake_cache_env( 'AUTH_test', ['bert', key]) @@ -1149,7 +1172,7 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '/v1/AUTH_test/container', 'http://redirect', 1024, 10, int(time() + 86400), key) - env['wsgi.input'] = StringIO('\r\n'.join(body)) + env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) env['swift.account/AUTH_test'] = self._fake_cache_env('AUTH_test') # Stick it in X-Container-Meta-Temp-URL-Key-2 and ensure we get it env['swift.container/AUTH_test/container'] = {'meta': meta} @@ -1175,7 +1198,7 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '/v1/AUTH_test/container', 'http://redirect', 1024, 10, int(time() + 86400), key) - env['wsgi.input'] = StringIO('\r\n'.join(body)) + env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) env['swift.account/AUTH_test'] = self._fake_cache_env( 'AUTH_test', [key]) env['swift.container/AUTH_test/container'] = {'meta': {}} @@ -1213,7 +1236,7 @@ class TestFormPost(unittest.TestCase): sig, env, body = self._make_sig_env_body( '/v1/AUTH_test/container', 'http://redirect?one=two', 1024, 10, int(time() + 86400), key) - env['wsgi.input'] = StringIO('\r\n'.join(body)) + env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) env['swift.account/AUTH_test'] = self._fake_cache_env( 'AUTH_test', [key]) env['swift.container/AUTH_test/container'] = {'meta': {}} @@ -1251,7 +1274,7 @@ class TestFormPost(unittest.TestCase): key = 'abc' sig, env, body = self._make_sig_env_body( '/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key) - env['wsgi.input'] = StringIO('\r\n'.join(body)) + env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) env['swift.account/AUTH_test'] = self._fake_cache_env( 'AUTH_test', [key]) env['swift.container/AUTH_test/container'] = {'meta': {}} @@ -1288,7 +1311,7 @@ class TestFormPost(unittest.TestCase): key = 'abc' sig, env, body = self._make_sig_env_body( '/v1/AUTH_test/container', '', 1024, 10, int(time() - 10), key) - env['wsgi.input'] = StringIO('\r\n'.join(body)) + env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) env['swift.account/AUTH_test'] = self._fake_cache_env( 'AUTH_test', [key]) self.app = FakeApp(iter([('201 Created', {}, ''), @@ -1321,7 +1344,7 @@ class TestFormPost(unittest.TestCase): key = 'abc' sig, env, body = self._make_sig_env_body( '/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key) - env['wsgi.input'] = StringIO('\r\n'.join(body)) + env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) # Change key to invalidate sig env['swift.account/AUTH_test'] = self._fake_cache_env( 'AUTH_test', [key + ' is bogus now']) @@ -1355,7 +1378,7 @@ class TestFormPost(unittest.TestCase): key = 'abc' sig, env, body = self._make_sig_env_body( '/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key) - env['wsgi.input'] = StringIO('XX' + '\r\n'.join(body)) + env['wsgi.input'] = BytesIO(b'XX' + b'\r\n'.join(body)) env['swift.account/AUTH_test'] = self._fake_cache_env( 'AUTH_test', [key]) self.app = FakeApp(iter([('201 Created', {}, ''), @@ -1388,7 +1411,7 @@ class TestFormPost(unittest.TestCase): key = 'abc' sig, env, body = self._make_sig_env_body( '/v2/AUTH_test/container', '', 1024, 10, int(time() + 86400), key) - env['wsgi.input'] = StringIO('\r\n'.join(body)) + env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) env['swift.account/AUTH_test'] = self._fake_cache_env( 'AUTH_test', [key]) self.app = FakeApp(iter([('201 Created', {}, ''), @@ -1421,7 +1444,7 @@ class TestFormPost(unittest.TestCase): key = 'abc' sig, env, body = self._make_sig_env_body( '//AUTH_test/container', '', 1024, 10, int(time() + 86400), key) - env['wsgi.input'] = StringIO('\r\n'.join(body)) + env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) env['swift.account/AUTH_test'] = self._fake_cache_env( 'AUTH_test', [key]) self.app = FakeApp(iter([('201 Created', {}, ''), @@ -1454,7 +1477,7 @@ class TestFormPost(unittest.TestCase): key = 'abc' sig, env, body = self._make_sig_env_body( '/v1//container', '', 1024, 10, int(time() + 86400), key) - env['wsgi.input'] = StringIO('\r\n'.join(body)) + env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) env['swift.account/AUTH_test'] = self._fake_cache_env( 'AUTH_test', [key]) self.app = FakeApp(iter([('201 Created', {}, ''), @@ -1487,7 +1510,7 @@ class TestFormPost(unittest.TestCase): key = 'abc' sig, env, body = self._make_sig_env_body( '/v1/AUTH_tst/container', '', 1024, 10, int(time() + 86400), key) - env['wsgi.input'] = StringIO('\r\n'.join(body)) + env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) env['swift.account/AUTH_test'] = self._fake_cache_env( 'AUTH_test', [key]) self.app = FakeApp(iter([ @@ -1522,7 +1545,7 @@ class TestFormPost(unittest.TestCase): key = 'abc' sig, env, body = self._make_sig_env_body( '/v1/AUTH_test', '', 1024, 10, int(time() + 86400), key) - env['wsgi.input'] = StringIO('\r\n'.join(body)) + env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) env['swift.account/AUTH_test'] = self._fake_cache_env( 'AUTH_test', [key]) self.app = FakeApp(iter([('201 Created', {}, ''), @@ -1560,7 +1583,7 @@ class TestFormPost(unittest.TestCase): if v == str(expires): body[i] = 'badvalue' break - env['wsgi.input'] = StringIO('\r\n'.join(body)) + env['wsgi.input'] = BytesIO(b'\r\n'.join(body)) env['swift.account/AUTH_test'] = self._fake_cache_env( 'AUTH_test', [key]) self.app = FakeApp(iter([('201 Created', {}, ''), @@ -1600,7 +1623,8 @@ class TestFormPost(unittest.TestCase): key = 'abc' sig, env, body = self._make_sig_env_body( '/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key) - env['wsgi.input'] = StringIO('\r\n'.join(x_delete_body_part + body)) + wsgi_input = b'\r\n'.join(x_delete_body_part + body) + env['wsgi.input'] = BytesIO(wsgi_input) env['swift.account/AUTH_test'] = self._fake_cache_env( 'AUTH_test', [key]) env['swift.container/AUTH_test/container'] = {'meta': {}} @@ -1642,7 +1666,8 @@ class TestFormPost(unittest.TestCase): key = 'abc' sig, env, body = self._make_sig_env_body( '/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key) - env['wsgi.input'] = StringIO('\r\n'.join(x_delete_body_part + body)) + wsgi_input = b'\r\n'.join(x_delete_body_part + body) + env['wsgi.input'] = BytesIO(wsgi_input) env['swift.account/AUTH_test'] = self._fake_cache_env( 'AUTH_test', [key]) self.app = FakeApp(iter([('201 Created', {}, ''), @@ -1676,7 +1701,8 @@ class TestFormPost(unittest.TestCase): key = 'abc' sig, env, body = self._make_sig_env_body( '/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key) - env['wsgi.input'] = StringIO('\r\n'.join(x_delete_body_part + body)) + wsgi_input = b'\r\n'.join(x_delete_body_part + body) + env['wsgi.input'] = BytesIO(wsgi_input) env['swift.account/AUTH_test'] = self._fake_cache_env( 'AUTH_test', [key]) env['swift.container/AUTH_test/container'] = {'meta': {}} @@ -1718,7 +1744,8 @@ class TestFormPost(unittest.TestCase): key = 'abc' sig, env, body = self._make_sig_env_body( '/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key) - env['wsgi.input'] = StringIO('\r\n'.join(x_delete_body_part + body)) + wsgi_input = b'\r\n'.join(x_delete_body_part + body) + env['wsgi.input'] = BytesIO(wsgi_input) env['swift.account/AUTH_test'] = self._fake_cache_env( 'AUTH_test', [key]) self.app = FakeApp(iter([('201 Created', {}, ''), diff --git a/test/unit/common/middleware/test_list_endpoints.py b/test/unit/common/middleware/test_list_endpoints.py index 2537d0ffdc..00dd426b93 100644 --- a/test/unit/common/middleware/test_list_endpoints.py +++ b/test/unit/common/middleware/test_list_endpoints.py @@ -105,7 +105,8 @@ class TestListEndpoints(unittest.TestCase): def FakeGetInfo(self, env, app, swift_source=None): info = {'status': 0, 'sync_key': None, 'meta': {}, 'cors': {'allow_origin': None, 'expose_headers': None, - 'max_age': None}, 'sysmeta': {}, 'read_acl': None, + 'max_age': None}, + 'sysmeta': {}, 'read_acl': None, 'object_count': None, 'write_acl': None, 'versions': None, 'bytes': None} info['storage_policy'] = self.policy_to_test diff --git a/test/unit/common/middleware/test_memcache.py b/test/unit/common/middleware/test_memcache.py index d45fca7e89..44c4fc3987 100644 --- a/test/unit/common/middleware/test_memcache.py +++ b/test/unit/common/middleware/test_memcache.py @@ -16,9 +16,9 @@ import os from textwrap import dedent import unittest -from ConfigParser import NoSectionError, NoOptionError import mock +from six.moves.configparser import NoSectionError, NoOptionError from swift.common.middleware import memcache from swift.common.memcached import MemcacheRing diff --git a/test/unit/common/middleware/test_proxy_logging.py b/test/unit/common/middleware/test_proxy_logging.py index 3b2f973a96..047a4b6550 100644 --- a/test/unit/common/middleware/test_proxy_logging.py +++ b/test/unit/common/middleware/test_proxy_logging.py @@ -15,9 +15,10 @@ import unittest from urllib import unquote -import cStringIO as StringIO from logging.handlers import SysLogHandler + import mock +from six import BytesIO from test.unit import FakeLogger from swift.common.utils import get_logger @@ -194,7 +195,7 @@ class TestProxyLogging(unittest.TestCase): app.access_logger = FakeLogger() req = Request.blank(path, environ={ 'REQUEST_METHOD': 'GET', - 'wsgi.input': StringIO.StringIO('4321')}) + 'wsgi.input': BytesIO(b'4321')}) stub_times = [18.0, 20.71828182846] iter_response = app(req.environ, lambda *_: None) self.assertEqual('7654321', ''.join(iter_response)) @@ -213,7 +214,7 @@ class TestProxyLogging(unittest.TestCase): req = Request.blank(path, environ={ 'REQUEST_METHOD': 'GET', 'swift.proxy_access_log_made': True, - 'wsgi.input': StringIO.StringIO('4321')}) + 'wsgi.input': BytesIO(b'4321')}) stub_times = [18.0, 20.71828182846] iter_response = app(req.environ, lambda *_: None) self.assertEqual('7654321', ''.join(iter_response)) @@ -229,7 +230,7 @@ class TestProxyLogging(unittest.TestCase): app.access_logger = FakeLogger() req = Request.blank(path, environ={ 'REQUEST_METHOD': 'PUT', - 'wsgi.input': StringIO.StringIO('654321')}) + 'wsgi.input': BytesIO(b'654321')}) # (it's not a GET, so time() doesn't have a 2nd call) stub_times = [58.2, 58.2 + 7.3321] iter_response = app(req.environ, lambda *_: None) @@ -348,7 +349,7 @@ class TestProxyLogging(unittest.TestCase): [x for x in resp] log_parts = self._log_parts(app) headers = unquote(log_parts[14]).split('\n') - self.assert_('Host: localhost:80' in headers) + self.assertTrue('Host: localhost:80' in headers) def test_access_log_headers_only(self): app = proxy_logging.ProxyLoggingMiddleware( @@ -365,10 +366,10 @@ class TestProxyLogging(unittest.TestCase): [x for x in resp] log_parts = self._log_parts(app) headers = unquote(log_parts[14]).split('\n') - self.assert_('First: 1' in headers) - self.assert_('Second: 2' in headers) - self.assert_('Third: 3' not in headers) - self.assert_('Host: localhost:80' not in headers) + self.assertTrue('First: 1' in headers) + self.assertTrue('Second: 2' in headers) + self.assertTrue('Third: 3' not in headers) + self.assertTrue('Host: localhost:80' not in headers) def test_upload_size(self): app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), @@ -377,7 +378,7 @@ class TestProxyLogging(unittest.TestCase): req = Request.blank( '/v1/a/c/o/foo', environ={'REQUEST_METHOD': 'PUT', - 'wsgi.input': StringIO.StringIO('some stuff')}) + 'wsgi.input': BytesIO(b'some stuff')}) resp = app(req.environ, start_response) # exhaust generator [x for x in resp] @@ -395,8 +396,7 @@ class TestProxyLogging(unittest.TestCase): req = Request.blank( '/v1/a/c', environ={'REQUEST_METHOD': 'POST', - 'wsgi.input': StringIO.StringIO( - 'some stuff\nsome other stuff\n')}) + 'wsgi.input': BytesIO(b'some stuff\nsome other stuff\n')}) resp = app(req.environ, start_response) # exhaust generator [x for x in resp] @@ -488,8 +488,8 @@ class TestProxyLogging(unittest.TestCase): def test_filter(self): factory = proxy_logging.filter_factory({}) - self.assert_(callable(factory)) - self.assert_(callable(factory(FakeApp()))) + self.assertTrue(callable(factory)) + self.assertTrue(callable(factory(FakeApp()))) def test_unread_body(self): app = proxy_logging.ProxyLoggingMiddleware( diff --git a/test/unit/common/middleware/test_quotas.py b/test/unit/common/middleware/test_quotas.py index 56a7780e2e..85211257de 100644 --- a/test/unit/common/middleware/test_quotas.py +++ b/test/unit/common/middleware/test_quotas.py @@ -101,8 +101,8 @@ class TestContainerQuotas(unittest.TestCase): req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT', - 'swift.object/a/c2/o2': {'length': 10}, - 'swift.cache': cache}, + 'swift.object/a/c2/o2': {'length': 10}, + 'swift.cache': cache}, headers={'x-copy-from': '/c2/o2'}) res = req.get_response(app) self.assertEquals(res.status_int, 413) @@ -114,8 +114,8 @@ class TestContainerQuotas(unittest.TestCase): req = Request.blank('/v1/a/c2/o2', environ={'REQUEST_METHOD': 'COPY', - 'swift.object/a/c2/o2': {'length': 10}, - 'swift.cache': cache}, + 'swift.object/a/c2/o2': {'length': 10}, + 'swift.cache': cache}, headers={'Destination': '/c/o'}) res = req.get_response(app) self.assertEquals(res.status_int, 413) @@ -136,8 +136,8 @@ class TestContainerQuotas(unittest.TestCase): cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}}) req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT', - 'swift.object/a/c2/o2': {'length': 10}, - 'swift.cache': cache}, + 'swift.object/a/c2/o2': {'length': 10}, + 'swift.cache': cache}, headers={'x-copy-from': '/c2/o2'}) res = req.get_response(app) self.assertEquals(res.status_int, 200) @@ -147,8 +147,8 @@ class TestContainerQuotas(unittest.TestCase): cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}}) req = Request.blank('/v1/a/c2/o2', environ={'REQUEST_METHOD': 'COPY', - 'swift.object/a/c2/o2': {'length': 10}, - 'swift.cache': cache}, + 'swift.object/a/c2/o2': {'length': 10}, + 'swift.cache': cache}, headers={'Destination': '/c/o'}) res = req.get_response(app) self.assertEquals(res.status_int, 200) @@ -158,8 +158,8 @@ class TestContainerQuotas(unittest.TestCase): cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}}) req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT', - 'swift.object/a/c2/o2': {'length': 10}, - 'swift.cache': cache}, + 'swift.object/a/c2/o2': {'length': 10}, + 'swift.cache': cache}, headers={'x-copy-from': '/c2/o3'}) res = req.get_response(app) self.assertEquals(res.status_int, 200) @@ -169,7 +169,7 @@ class TestContainerQuotas(unittest.TestCase): cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}}) req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT', - 'swift.cache': cache}, + 'swift.cache': cache}, headers={'x-copy-from': 'bad_path'}) res = req.get_response(app) self.assertEquals(res.status_int, 412) @@ -179,8 +179,8 @@ class TestContainerQuotas(unittest.TestCase): cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}}) req = Request.blank('/v1/a/c2/o3', environ={'REQUEST_METHOD': 'COPY', - 'swift.object/a/c2/o2': {'length': 10}, - 'swift.cache': cache}, + 'swift.object/a/c2/o2': {'length': 10}, + 'swift.cache': cache}, headers={'Destination': '/c/o'}) res = req.get_response(app) self.assertEquals(res.status_int, 200) @@ -201,8 +201,8 @@ class TestContainerQuotas(unittest.TestCase): cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '1'}}) req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT', - 'swift.object/a/c2/o2': {'length': 10}, - 'swift.cache': cache}, + 'swift.object/a/c2/o2': {'length': 10}, + 'swift.cache': cache}, headers={'x-copy-from': '/c2/o2'}) res = req.get_response(app) self.assertEquals(res.status_int, 413) @@ -213,7 +213,7 @@ class TestContainerQuotas(unittest.TestCase): cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '1'}}) req = Request.blank('/v1/a/c2/o2', environ={'REQUEST_METHOD': 'COPY', - 'swift.cache': cache}, + 'swift.cache': cache}, headers={'Destination': '/c/o'}) res = req.get_response(app) self.assertEquals(res.status_int, 413) @@ -227,10 +227,10 @@ class TestContainerQuotas(unittest.TestCase): 'status': 200, 'object_count': 1} req = Request.blank('/v1/a/c2/o2', environ={'REQUEST_METHOD': 'COPY', - 'swift.container/a/c': a_c_cache, - 'swift.container/a2/c': a2_c_cache}, + 'swift.container/a/c': a_c_cache, + 'swift.container/a2/c': a2_c_cache}, headers={'Destination': '/c/o', - 'Destination-Account': 'a2'}) + 'Destination-Account': 'a2'}) res = req.get_response(app) self.assertEquals(res.status_int, 413) self.assertEquals(res.body, 'Upload exceeds quota.') @@ -243,10 +243,10 @@ class TestContainerQuotas(unittest.TestCase): 'status': 200, 'object_count': 1} req = Request.blank('/v1/a2/c/o', environ={'REQUEST_METHOD': 'PUT', - 'swift.container/a/c': a_c_cache, - 'swift.container/a2/c': a2_c_cache}, + 'swift.container/a/c': a_c_cache, + 'swift.container/a2/c': a2_c_cache}, headers={'X-Copy-From': '/c2/o2', - 'X-Copy-From-Account': 'a'}) + 'X-Copy-From-Account': 'a'}) res = req.get_response(app) self.assertEquals(res.status_int, 413) self.assertEquals(res.body, 'Upload exceeds quota.') @@ -266,7 +266,7 @@ class TestContainerQuotas(unittest.TestCase): cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '2'}}) req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT', - 'swift.cache': cache}, + 'swift.cache': cache}, headers={'x-copy-from': '/c2/o2'}) res = req.get_response(app) self.assertEquals(res.status_int, 200) @@ -276,7 +276,7 @@ class TestContainerQuotas(unittest.TestCase): cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '2'}}) req = Request.blank('/v1/a/c2/o2', environ={'REQUEST_METHOD': 'COPY', - 'swift.cache': cache}, + 'swift.cache': cache}, headers={'Destination': '/c/o'}) res = req.get_response(app) self.assertEquals(res.status_int, 200) diff --git a/test/unit/common/middleware/test_ratelimit.py b/test/unit/common/middleware/test_ratelimit.py index 1f9bfc3994..eb9f5e41ac 100644 --- a/test/unit/common/middleware/test_ratelimit.py +++ b/test/unit/common/middleware/test_ratelimit.py @@ -548,7 +548,7 @@ class TestRateLimit(unittest.TestCase): def __call__(self, *args, **kwargs): pass resp = rate_mid.__call__(env, a_callable()) - self.assert_('fake_app' == resp[0]) + self.assertTrue('fake_app' == resp[0]) def test_no_memcache(self): current_rate = 13 diff --git a/test/unit/common/middleware/test_recon.py b/test/unit/common/middleware/test_recon.py index 05a11ce859..8ea659dcaf 100644 --- a/test/unit/common/middleware/test_recon.py +++ b/test/unit/common/middleware/test_recon.py @@ -175,6 +175,9 @@ class FakeRecon(object): def fake_driveaudit(self): return {'driveaudittest': "1"} + def fake_time(self): + return {'timetest': "1"} + def nocontent(self): return None @@ -503,6 +506,9 @@ class TestReconSuccess(TestCase): "attempted": 1, "diff": 0, "diff_capped": 0, "empty": 0, "failure": 0, "hashmatch": 0, + "failure_nodes": { + "192.168.0.1": 0, + "192.168.0.2": 0}, "no_change": 2, "remote_merge": 0, "remove": 0, "rsync": 0, "start": 1333044050.855202, @@ -520,6 +526,9 @@ class TestReconSuccess(TestCase): "attempted": 1, "diff": 0, "diff_capped": 0, "empty": 0, "failure": 0, "hashmatch": 0, + "failure_nodes": { + "192.168.0.1": 0, + "192.168.0.2": 0}, "no_change": 2, "remote_merge": 0, "remove": 0, "rsync": 0, "start": 1333044050.855202, @@ -534,6 +543,9 @@ class TestReconSuccess(TestCase): "attempted": 179, "diff": 0, "diff_capped": 0, "empty": 0, "failure": 0, "hashmatch": 0, + "failure_nodes": { + "192.168.0.1": 0, + "192.168.0.2": 0}, "no_change": 358, "remote_merge": 0, "remove": 0, "rsync": 0, "start": 5.5, "success": 358, @@ -552,6 +564,9 @@ class TestReconSuccess(TestCase): "attempted": 179, "diff": 0, "diff_capped": 0, "empty": 0, "failure": 0, "hashmatch": 0, + "failure_nodes": { + "192.168.0.1": 0, + "192.168.0.2": 0}, "no_change": 358, "remote_merge": 0, "remove": 0, "rsync": 0, "start": 5.5, "success": 358, @@ -559,17 +574,40 @@ class TestReconSuccess(TestCase): "replication_last": 1357969645.25}) def test_get_replication_object(self): - from_cache_response = {"object_replication_time": 200.0, - "object_replication_last": 1357962809.15} + from_cache_response = { + "replication_time": 0.2615511417388916, + "replication_stats": { + "attempted": 179, + "failure": 0, "hashmatch": 0, + "failure_nodes": { + "192.168.0.1": 0, + "192.168.0.2": 0}, + "remove": 0, "rsync": 0, + "start": 1333044050.855202, "success": 358}, + "replication_last": 1357969645.25, + "object_replication_time": 0.2615511417388916, + "object_replication_last": 1357969645.25} self.fakecache.fakeout_calls = [] self.fakecache.fakeout = from_cache_response rv = self.app.get_replication_info('object') self.assertEquals(self.fakecache.fakeout_calls, - [((['object_replication_time', + [((['replication_time', 'replication_stats', + 'replication_last', 'object_replication_time', 'object_replication_last'], '/var/cache/swift/object.recon'), {})]) - self.assertEquals(rv, {'object_replication_time': 200.0, - 'object_replication_last': 1357962809.15}) + self.assertEquals(rv, { + "replication_time": 0.2615511417388916, + "replication_stats": { + "attempted": 179, + "failure": 0, "hashmatch": 0, + "failure_nodes": { + "192.168.0.1": 0, + "192.168.0.2": 0}, + "remove": 0, "rsync": 0, + "start": 1333044050.855202, "success": 358}, + "replication_last": 1357969645.25, + "object_replication_time": 0.2615511417388916, + "object_replication_last": 1357969645.25}) def test_get_updater_info_container(self): from_cache_response = {"container_updater_sweep": 18.476239919662476} @@ -774,11 +812,11 @@ class TestReconSuccess(TestCase): self.assertEquals(rv, unmounted_resp) def test_get_diskusage(self): - #posix.statvfs_result(f_bsize=4096, f_frsize=4096, f_blocks=1963185, - # f_bfree=1113075, f_bavail=1013351, - # f_files=498736, - # f_ffree=397839, f_favail=397839, f_flag=0, - # f_namemax=255) + # posix.statvfs_result(f_bsize=4096, f_frsize=4096, f_blocks=1963185, + # f_bfree=1113075, f_bavail=1013351, + # f_files=498736, + # f_ffree=397839, f_favail=397839, f_flag=0, + # f_namemax=255) statvfs_content = (4096, 4096, 1963185, 1113075, 1013351, 498736, 397839, 397839, 0, 255) du_resp = [{'device': 'canhazdrive1', 'avail': 4150685696, @@ -815,9 +853,9 @@ class TestReconSuccess(TestCase): self.mockos.ismount_output = True def fake_lstat(*args, **kwargs): - #posix.lstat_result(st_mode=1, st_ino=2, st_dev=3, st_nlink=4, - # st_uid=5, st_gid=6, st_size=7, st_atime=8, - # st_mtime=9, st_ctime=10) + # posix.lstat_result(st_mode=1, st_ino=2, st_dev=3, st_nlink=4, + # st_uid=5, st_gid=6, st_size=7, st_atime=8, + # st_mtime=9, st_ctime=10) return stat_result((1, 2, 3, 4, 5, 6, 7, 8, 9, 10)) def fake_exists(*args, **kwargs): @@ -855,6 +893,15 @@ class TestReconSuccess(TestCase): '/var/cache/swift/drive.recon'), {})]) self.assertEquals(rv, {'drive_audit_errors': 7}) + def test_get_time(self): + def fake_time(): + return 1430000000.0 + + with mock.patch("time.time", fake_time): + now = fake_time() + rv = self.app.get_time() + self.assertEquals(rv, now) + class TestReconMiddleware(unittest.TestCase): @@ -867,7 +914,7 @@ class TestReconMiddleware(unittest.TestCase): os.listdir = self.fake_list self.app = recon.ReconMiddleware(FakeApp(), {'object_recon': "true"}) os.listdir = self.real_listdir - #self.app.object_recon = True + # self.app.object_recon = True self.app.get_mem = self.frecon.fake_mem self.app.get_load = self.frecon.fake_load self.app.get_async_info = self.frecon.fake_async @@ -884,6 +931,7 @@ class TestReconMiddleware(unittest.TestCase): self.app.get_quarantine_count = self.frecon.fake_quarantined self.app.get_socket_info = self.frecon.fake_sockstat self.app.get_driveaudit_error = self.frecon.fake_driveaudit + self.app.get_time = self.frecon.fake_time def test_recon_get_mem(self): get_mem_resp = ['{"memtest": "1"}'] @@ -927,21 +975,21 @@ class TestReconMiddleware(unittest.TestCase): def test_recon_get_replication_all(self): get_replication_resp = ['{"replicationtest": "1"}'] - #test account + # test account req = Request.blank('/recon/replication/account', environ={'REQUEST_METHOD': 'GET'}) resp = self.app(req.environ, start_response) self.assertEquals(resp, get_replication_resp) self.assertEquals(self.frecon.fake_replication_rtype, 'account') self.frecon.fake_replication_rtype = None - #test container + # test container req = Request.blank('/recon/replication/container', environ={'REQUEST_METHOD': 'GET'}) resp = self.app(req.environ, start_response) self.assertEquals(resp, get_replication_resp) self.assertEquals(self.frecon.fake_replication_rtype, 'container') self.frecon.fake_replication_rtype = None - #test object + # test object req = Request.blank('/recon/replication/object', environ={'REQUEST_METHOD': 'GET'}) resp = self.app(req.environ, start_response) @@ -1118,5 +1166,12 @@ class TestReconMiddleware(unittest.TestCase): resp = self.app(req.environ, start_response) self.assertEquals(resp, get_driveaudit_resp) + def test_recon_get_time(self): + get_time_resp = ['{"timetest": "1"}'] + req = Request.blank('/recon/time', + environ={'REQUEST_METHOD': 'GET'}) + resp = self.app(req.environ, start_response) + self.assertEquals(resp, get_time_resp) + if __name__ == '__main__': unittest.main() diff --git a/test/unit/common/middleware/test_slo.py b/test/unit/common/middleware/test_slo.py index 4a79d085ee..f0315c8ed3 100644 --- a/test/unit/common/middleware/test_slo.py +++ b/test/unit/common/middleware/test_slo.py @@ -116,7 +116,7 @@ class TestSloMiddleware(SloTestCase): '/v1/a/c/o', headers={'x-static-large-object': "true"}, environ={'REQUEST_METHOD': 'PUT'}) resp = ''.join(self.slo(req.environ, fake_start_response)) - self.assert_( + self.assertTrue( resp.startswith('X-Static-Large-Object is a reserved header')) def test_parse_input(self): @@ -370,12 +370,13 @@ class TestSloPutManifest(SloTestCase): status, headers, body = self.call_app(req) headers = dict(headers) manifest_data = json.loads(body) - self.assert_(headers['Content-Type'].endswith(';swift_bytes=3')) + self.assertTrue(headers['Content-Type'].endswith(';swift_bytes=3')) self.assertEquals(len(manifest_data), 2) self.assertEquals(manifest_data[0]['hash'], 'a') self.assertEquals(manifest_data[0]['bytes'], 1) - self.assert_(not manifest_data[0]['last_modified'].startswith('2012')) - self.assert_(manifest_data[1]['last_modified'].startswith('2012')) + self.assertTrue( + not manifest_data[0]['last_modified'].startswith('2012')) + self.assertTrue(manifest_data[1]['last_modified'].startswith('2012')) def test_handle_multipart_put_check_data_bad(self): bad_data = json.dumps( diff --git a/test/unit/common/middleware/test_staticweb.py b/test/unit/common/middleware/test_staticweb.py index fe361fde7a..c0836a341b 100644 --- a/test/unit/common/middleware/test_staticweb.py +++ b/test/unit/common/middleware/test_staticweb.py @@ -487,7 +487,7 @@ class TestStaticWeb(unittest.TestCase): def test_container3indexhtml(self): resp = Request.blank('/v1/a/c3/').get_response(self.test_staticweb) self.assertEquals(resp.status_int, 200) - self.assert_('Test main index.html file.' in resp.body) + self.assertTrue('Test main index.html file.' in resp.body) def test_container3subsubdir(self): resp = Request.blank( @@ -504,10 +504,10 @@ class TestStaticWeb(unittest.TestCase): resp = Request.blank( '/v1/a/c3/subdir/').get_response(self.test_staticweb) self.assertEquals(resp.status_int, 200) - self.assert_('Listing of /v1/a/c3/subdir/' in resp.body) - self.assert_('' in resp.body) - self.assert_('' in resp.body) + self.assertTrue('' not in resp.body) - self.assert_('' not in resp.body) + self.assertTrue('c11 subdir index' in resp.body) + self.assertTrue('

c11 subdir index

' in resp.body) def test_container11subdirmarkermatchdirtype(self): resp = Request.blank('/v1/a/c11a/subdir/').get_response( self.test_staticweb) self.assertEquals(resp.status_int, 404) - self.assert_('Index File Not Found' in resp.body) + self.assertTrue('Index File Not Found' in resp.body) def test_container11subdirmarkeraltdirtype(self): resp = Request.blank('/v1/a/c11a/subdir2/').get_response( @@ -678,20 +678,20 @@ class TestStaticWeb(unittest.TestCase): resp = Request.blank('/v1/a/c12/').get_response( self.test_staticweb) self.assertEquals(resp.status_int, 200) - self.assert_('index file' in resp.body) + self.assertTrue('index file' in resp.body) def test_container_404_has_css(self): resp = Request.blank('/v1/a/c13/').get_response( self.test_staticweb) self.assertEquals(resp.status_int, 404) - self.assert_('listing.css' in resp.body) + self.assertTrue('listing.css' in resp.body) def test_container_404_has_no_css(self): resp = Request.blank('/v1/a/c7/').get_response( self.test_staticweb) self.assertEquals(resp.status_int, 404) - self.assert_('listing.css' not in resp.body) - self.assert_(' 0) + self.assertTrue(p.stats is not None) + self.assertTrue(len(p.stats.keys()) > 0) class TestProfileMiddleware(unittest.TestCase): @@ -109,9 +110,9 @@ class TestProfileMiddleware(unittest.TestCase): self.headers = headers def test_combine_body_qs(self): - body = "profile=all&sort=time&limit=-1&fulldirs=1&nfl_filter=__call__"\ - + "&query=query&metric=nc&format=default" - wsgi_input = StringIO.StringIO(body) + body = (b"profile=all&sort=time&limit=-1&fulldirs=1" + b"&nfl_filter=__call__&query=query&metric=nc&format=default") + wsgi_input = BytesIO(body) environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'profile=all&format=json', 'wsgi.input': wsgi_input} @@ -127,19 +128,18 @@ class TestProfileMiddleware(unittest.TestCase): self.assertEqual(query_dict['format'], ['default']) def test_call(self): - body = "sort=time&limit=-1&fulldirs=1&nfl_filter="\ - + "&metric=nc" - wsgi_input = StringIO.StringIO(body + '&query=query') + body = b"sort=time&limit=-1&fulldirs=1&nfl_filter=&metric=nc" + wsgi_input = BytesIO(body + b'&query=query') environ = {'HTTP_HOST': 'localhost:8080', 'PATH_INFO': '/__profile__', 'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'profile=all&format=json', 'wsgi.input': wsgi_input} resp = self.app(environ, self.start_response) - self.assert_(resp[0].find('') > 0, resp) + self.assertTrue(resp[0].find('') > 0, resp) self.assertEqual(self.got_statuses, ['200 OK']) self.assertEqual(self.headers, [('content-type', 'text/html')]) - wsgi_input = StringIO.StringIO(body + '&plot=plot') + wsgi_input = BytesIO(body + b'&plot=plot') environ['wsgi.input'] = wsgi_input if PLOTLIB_INSTALLED: resp = self.app(environ, self.start_response) @@ -148,29 +148,28 @@ class TestProfileMiddleware(unittest.TestCase): else: resp = self.app(environ, self.start_response) self.assertEqual(self.got_statuses, ['500 Internal Server Error']) - wsgi_input = StringIO.StringIO(body + - '&download=download&format=default') + wsgi_input = BytesIO(body + '&download=download&format=default') environ['wsgi.input'] = wsgi_input resp = self.app(environ, self.start_response) self.assertEqual(self.headers, [('content-type', HTMLViewer.format_dict['default'])]) - wsgi_input = StringIO.StringIO(body + '&download=download&format=json') + wsgi_input = BytesIO(body + '&download=download&format=json') environ['wsgi.input'] = wsgi_input resp = self.app(environ, self.start_response) - self.assert_(self.headers == [('content-type', - HTMLViewer.format_dict['json'])]) + self.assertTrue(self.headers == [('content-type', + HTMLViewer.format_dict['json'])]) env2 = environ.copy() env2['REQUEST_METHOD'] = 'DELETE' resp = self.app(env2, self.start_response) self.assertEqual(self.got_statuses, ['405 Method Not Allowed'], resp) # use a totally bogus profile identifier - wsgi_input = StringIO.StringIO(body + '&profile=ABC&download=download') + wsgi_input = BytesIO(body + b'&profile=ABC&download=download') environ['wsgi.input'] = wsgi_input resp = self.app(environ, self.start_response) self.assertEqual(self.got_statuses, ['404 Not Found'], resp) - wsgi_input = StringIO.StringIO(body + '&download=download&format=ods') + wsgi_input = BytesIO(body + b'&download=download&format=ods') environ['wsgi.input'] = wsgi_input resp = self.app(environ, self.start_response) if ODFLIB_INSTALLED: @@ -181,13 +180,13 @@ class TestProfileMiddleware(unittest.TestCase): def test_dump_checkpoint(self): self.app.dump_checkpoint() - self.assert_(self.app.last_dump_at is not None) + self.assertTrue(self.app.last_dump_at is not None) def test_renew_profile(self): old_profiler = self.app.profiler self.app.renew_profile() new_profiler = self.app.profiler - self.assert_(old_profiler != new_profiler) + self.assertTrue(old_profiler != new_profiler) class Test_profile_log(unittest.TestCase): @@ -224,7 +223,7 @@ class Test_profile_log(unittest.TestCase): self.assertEquals(self.profile_log1.get_all_pids(), sorted(self.pids1, reverse=True)) for pid in self.profile_log2.get_all_pids(): - self.assert_(pid.split('-')[0] in self.pids2) + self.assertTrue(pid.split('-')[0] in self.pids2) def test_clear(self): self.profile_log1.clear('123') @@ -262,22 +261,22 @@ class Test_profile_log(unittest.TestCase): self.assertEquals(len(log_files), len(self.pids2)) log_files = self.profile_log2.get_logfiles('current') self.assertEqual(len(log_files), 1) - self.assert_(log_files[0].find(self.log_filename_prefix2 + - str(os.getpid())) > -1) + self.assertTrue(log_files[0].find(self.log_filename_prefix2 + + str(os.getpid())) > -1) log_files = self.profile_log2.get_logfiles(self.pids2[0]) self.assertEqual(len(log_files), 1) - self.assert_(log_files[0].find(self.log_filename_prefix2 + - self.pids2[0]) > -1) + self.assertTrue(log_files[0].find(self.log_filename_prefix2 + + self.pids2[0]) > -1) def test_dump_profile(self): prof = xprofile.get_profiler('eventlet.green.profile') prof.runctx('import os;os.getcwd();', globals(), locals()) prof.create_stats() pfn = self.profile_log1.dump_profile(prof, os.getpid()) - self.assert_(os.path.exists(pfn)) + self.assertTrue(os.path.exists(pfn)) os.remove(pfn) pfn = self.profile_log2.dump_profile(prof, os.getpid()) - self.assert_(os.path.exists(pfn)) + self.assertTrue(os.path.exists(pfn)) os.remove(pfn) @@ -298,9 +297,9 @@ class Test_html_viewer(unittest.TestCase): self.log_files.append(self.profile_log.dump_profile(profiler, pid)) self.viewer = HTMLViewer('__profile__', 'eventlet.green.profile', self.profile_log) - body = "profile=123&profile=456&sort=time&sort=nc&limit=10"\ - + "&fulldirs=1&nfl_filter=getcwd&query=query&metric=nc" - wsgi_input = StringIO.StringIO(body) + body = (b"profile=123&profile=456&sort=time&sort=nc&limit=10" + b"&fulldirs=1&nfl_filter=getcwd&query=query&metric=nc") + wsgi_input = BytesIO(body) environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'profile=all', 'wsgi.input': wsgi_input} @@ -345,12 +344,12 @@ class Test_html_viewer(unittest.TestCase): content, headers = self.viewer.render(url, 'GET', path_entries[0], self.query_dict, None) - self.assert_(content is not None) + self.assertTrue(content is not None) self.assertEqual(headers, [('content-type', 'text/html')]) content, headers = self.viewer.render(url, 'POST', path_entries[0], self.query_dict, None) - self.assert_(content is not None) + self.assertTrue(content is not None) self.assertEqual(headers, [('content-type', 'text/html')]) plot_dict = self.query_dict.copy() @@ -375,12 +374,12 @@ class Test_html_viewer(unittest.TestCase): download_dict['download'] = ['download'] content, headers = self.viewer.render(url, 'POST', path_entries[0], download_dict, None) - self.assert_(headers == [('content-type', - self.viewer.format_dict['default'])]) + self.assertTrue(headers == [('content-type', + self.viewer.format_dict['default'])]) content, headers = self.viewer.render(url, 'GET', path_entries[1], self.query_dict, None) - self.assert_(isinstance(json.loads(content), dict)) + self.assertTrue(isinstance(json.loads(content), dict)) for method in ['HEAD', 'PUT', 'DELETE', 'XYZMethod']: self.assertRaises(MethodNotAllowed, self.viewer.render, url, @@ -390,34 +389,35 @@ class Test_html_viewer(unittest.TestCase): download_dict['format'] = 'default' content, headers = self.viewer.render(url, 'GET', entry, download_dict, None) - self.assert_(('content-type', self.viewer.format_dict['default']) - in headers, entry) + self.assertTrue( + ('content-type', self.viewer.format_dict['default']) + in headers, entry) download_dict['format'] = 'json' content, headers = self.viewer.render(url, 'GET', entry, download_dict, None) - self.assert_(isinstance(json.loads(content), dict)) + self.assertTrue(isinstance(json.loads(content), dict)) def test_index(self): content, headers = self.viewer.index_page(self.log_files[0:1], profile_id='current') - self.assert_(content.find('') > -1) - self.assert_(headers == [('content-type', 'text/html')]) + self.assertTrue(content.find('') > -1) + self.assertTrue(headers == [('content-type', 'text/html')]) def test_index_all(self): content, headers = self.viewer.index_page(self.log_files, profile_id='all') for f in self.log_files: - self.assert_(content.find(f) > 0, content) - self.assert_(headers == [('content-type', 'text/html')]) + self.assertTrue(content.find(f) > 0, content) + self.assertTrue(headers == [('content-type', 'text/html')]) def test_download(self): content, headers = self.viewer.download(self.log_files) - self.assert_(content is not None) + self.assertTrue(content is not None) self.assertEqual(headers, [('content-type', self.viewer.format_dict['default'])]) content, headers = self.viewer.download(self.log_files, sort='calls', limit=10, nfl_filter='os') - self.assert_(content is not None) + self.assertTrue(content is not None) self.assertEqual(headers, [('content-type', self.viewer.format_dict['default'])]) content, headers = self.viewer.download(self.log_files, @@ -426,7 +426,7 @@ class Test_html_viewer(unittest.TestCase): self.viewer.format_dict['default'])]) content, headers = self.viewer.download(self.log_files, output_format='json') - self.assert_(isinstance(json.loads(content), dict)) + self.assertTrue(isinstance(json.loads(content), dict)) self.assertEqual(headers, [('content-type', self.viewer.format_dict['json'])]) content, headers = self.viewer.download(self.log_files, @@ -450,7 +450,7 @@ class Test_html_viewer(unittest.TestCase): def test_plot(self): if PLOTLIB_INSTALLED: content, headers = self.viewer.plot(self.log_files) - self.assert_(content is not None) + self.assertTrue(content is not None) self.assertEqual(headers, [('content-type', 'image/jpg')]) self.assertRaises(NotFoundException, self.viewer.plot, []) else: @@ -459,16 +459,16 @@ class Test_html_viewer(unittest.TestCase): def test_format_source_code(self): nfl_os = '%s:%d(%s)' % (os.__file__[:-1], 136, 'makedirs') - self.assert_('makedirs' in self.viewer.format_source_code(nfl_os)) + self.assertTrue('makedirs' in self.viewer.format_source_code(nfl_os)) self.assertFalse('makedirsXYZ' in self.viewer.format_source_code(nfl_os)) nfl_illegal = '%s:136(makedirs)' % os.__file__ - self.assert_(_('The file type are forbidden to access!') in - self.viewer.format_source_code(nfl_illegal)) + self.assertTrue(_('The file type are forbidden to access!') in + self.viewer.format_source_code(nfl_illegal)) nfl_not_exist = '%s.py:136(makedirs)' % os.__file__ expected_msg = _('Can not access the file %s.') % os.__file__ - self.assert_(expected_msg in - self.viewer.format_source_code(nfl_not_exist)) + self.assertTrue(expected_msg in + self.viewer.format_source_code(nfl_not_exist)) class TestStats2(unittest.TestCase): @@ -500,19 +500,19 @@ class TestStats2(unittest.TestCase): def test_to_json(self): for selection in self.selections: js = self.stats2.to_json(selection) - self.assert_(isinstance(json.loads(js), dict)) - self.assert_(json.loads(js)['stats'] is not None) - self.assert_(json.loads(js)['stats'][0] is not None) + self.assertTrue(isinstance(json.loads(js), dict)) + self.assertTrue(json.loads(js)['stats'] is not None) + self.assertTrue(json.loads(js)['stats'][0] is not None) def test_to_ods(self): if ODFLIB_INSTALLED: for selection in self.selections: - self.assert_(self.stats2.to_ods(selection) is not None) + self.assertTrue(self.stats2.to_ods(selection) is not None) def test_to_csv(self): for selection in self.selections: - self.assert_(self.stats2.to_csv(selection) is not None) - self.assert_('function calls' in self.stats2.to_csv(selection)) + self.assertTrue(self.stats2.to_csv(selection) is not None) + self.assertTrue('function calls' in self.stats2.to_csv(selection)) if __name__ == '__main__': diff --git a/test/unit/common/ring/test_builder.py b/test/unit/common/ring/test_builder.py index 86a03f8abc..367ea05239 100644 --- a/test/unit/common/ring/test_builder.py +++ b/test/unit/common/ring/test_builder.py @@ -19,7 +19,7 @@ import mock import operator import os import unittest -import cPickle as pickle +import six.moves.cPickle as pickle from array import array from collections import defaultdict from math import ceil @@ -120,14 +120,14 @@ class TestRingBuilder(unittest.TestCase): rb.remove_dev(1) rb.rebalance() r = rb.get_ring() - self.assert_(isinstance(r, ring.RingData)) + self.assertTrue(isinstance(r, ring.RingData)) r2 = rb.get_ring() - self.assert_(r is r2) + self.assertTrue(r is r2) rb.rebalance() r3 = rb.get_ring() - self.assert_(r3 is not r2) + self.assertTrue(r3 is not r2) r4 = rb.get_ring() - self.assert_(r3 is r4) + self.assertTrue(r3 is r4) def test_rebalance_with_seed(self): devs = [(0, 10000), (1, 10001), (2, 10002), (1, 10003)] @@ -358,7 +358,7 @@ class TestRingBuilder(unittest.TestCase): _, balance = rb.rebalance(seed=2) # maybe not *perfect*, but should be close - self.assert_(balance <= 1) + self.assertTrue(balance <= 1) def test_multitier_partial(self): # Multitier test, nothing full @@ -1520,8 +1520,8 @@ class TestRingBuilder(unittest.TestCase): self.assertEquals(counts, {0: 128, 1: 128, 2: 256, 3: 256}) dev_usage, worst = rb.validate() - self.assert_(dev_usage is None) - self.assert_(worst is None) + self.assertTrue(dev_usage is None) + self.assertTrue(worst is None) dev_usage, worst = rb.validate(stats=True) self.assertEquals(list(dev_usage), [128, 128, 256, 256]) diff --git a/test/unit/common/ring/test_ring.py b/test/unit/common/ring/test_ring.py index 0e0cfe567c..3fbdd2538e 100644 --- a/test/unit/common/ring/test_ring.py +++ b/test/unit/common/ring/test_ring.py @@ -14,7 +14,7 @@ # limitations under the License. import array -import cPickle as pickle +import six.moves.cPickle as pickle import os import sys import unittest @@ -313,8 +313,8 @@ class TestRing(TestRingBase): def test_reload_old_style_pickled_ring(self): devs = [{'id': 0, 'zone': 0, - 'weight': 1.0, 'ip': '10.1.1.1', - 'port': 6000}, + 'weight': 1.0, 'ip': '10.1.1.1', + 'port': 6000}, {'id': 1, 'zone': 0, 'weight': 1.0, 'ip': '10.1.1.1', 'port': 6000}, @@ -382,72 +382,72 @@ class TestRing(TestRingBase): self.assertEquals(part, 0) self.assertEquals(nodes, [dict(node, index=i) for i, node in enumerate([self.intended_devs[0], - self.intended_devs[3]])]) + self.intended_devs[3]])]) part, nodes = self.ring.get_nodes('a1') self.assertEquals(part, 0) self.assertEquals(nodes, [dict(node, index=i) for i, node in enumerate([self.intended_devs[0], - self.intended_devs[3]])]) + self.intended_devs[3]])]) part, nodes = self.ring.get_nodes('a4') self.assertEquals(part, 1) self.assertEquals(nodes, [dict(node, index=i) for i, node in enumerate([self.intended_devs[1], - self.intended_devs[4]])]) + self.intended_devs[4]])]) part, nodes = self.ring.get_nodes('aa') self.assertEquals(part, 1) self.assertEquals(nodes, [dict(node, index=i) for i, node in enumerate([self.intended_devs[1], - self.intended_devs[4]])]) + self.intended_devs[4]])]) part, nodes = self.ring.get_nodes('a', 'c1') self.assertEquals(part, 0) self.assertEquals(nodes, [dict(node, index=i) for i, node in enumerate([self.intended_devs[0], - self.intended_devs[3]])]) + self.intended_devs[3]])]) part, nodes = self.ring.get_nodes('a', 'c0') self.assertEquals(part, 3) self.assertEquals(nodes, [dict(node, index=i) for i, node in enumerate([self.intended_devs[1], - self.intended_devs[4]])]) + self.intended_devs[4]])]) part, nodes = self.ring.get_nodes('a', 'c3') self.assertEquals(part, 2) self.assertEquals(nodes, [dict(node, index=i) for i, node in enumerate([self.intended_devs[0], - self.intended_devs[3]])]) + self.intended_devs[3]])]) part, nodes = self.ring.get_nodes('a', 'c2') self.assertEquals(nodes, [dict(node, index=i) for i, node in enumerate([self.intended_devs[0], - self.intended_devs[3]])]) + self.intended_devs[3]])]) part, nodes = self.ring.get_nodes('a', 'c', 'o1') self.assertEquals(part, 1) self.assertEquals(nodes, [dict(node, index=i) for i, node in enumerate([self.intended_devs[1], - self.intended_devs[4]])]) + self.intended_devs[4]])]) part, nodes = self.ring.get_nodes('a', 'c', 'o5') self.assertEquals(part, 0) self.assertEquals(nodes, [dict(node, index=i) for i, node in enumerate([self.intended_devs[0], - self.intended_devs[3]])]) + self.intended_devs[3]])]) part, nodes = self.ring.get_nodes('a', 'c', 'o0') self.assertEquals(part, 0) self.assertEquals(nodes, [dict(node, index=i) for i, node in enumerate([self.intended_devs[0], - self.intended_devs[3]])]) + self.intended_devs[3]])]) part, nodes = self.ring.get_nodes('a', 'c', 'o2') self.assertEquals(part, 2) self.assertEquals(nodes, [dict(node, index=i) for i, node in enumerate([self.intended_devs[0], - self.intended_devs[3]])]) + self.intended_devs[3]])]) def add_dev_to_ring(self, new_dev): self.ring.devs.append(new_dev) diff --git a/test/unit/common/test_constraints.py b/test/unit/common/test_constraints.py index 0cca36d8e6..1fd3411ad2 100644 --- a/test/unit/common/test_constraints.py +++ b/test/unit/common/test_constraints.py @@ -49,7 +49,7 @@ class TestConstraints(unittest.TestCase): def test_check_metadata_empty_name(self): headers = {'X-Object-Meta-': 'Value'} - self.assert_(constraints.check_metadata(Request.blank( + self.assertTrue(constraints.check_metadata(Request.blank( '/', headers=headers), 'object'), HTTPBadRequest) def test_check_metadata_name_length(self): @@ -215,7 +215,7 @@ class TestConstraints(unittest.TestCase): resp = constraints.check_object_creation( Request.blank('/', headers=headers), 'object_name') self.assertEquals(resp.status_int, HTTP_BAD_REQUEST) - self.assert_('Content-Type' in resp.body) + self.assertTrue('Content-Type' in resp.body) def test_check_object_creation_bad_delete_headers(self): headers = {'Transfer-Encoding': 'chunked', @@ -224,7 +224,7 @@ class TestConstraints(unittest.TestCase): resp = constraints.check_object_creation( Request.blank('/', headers=headers), 'object_name') self.assertEquals(resp.status_int, HTTP_BAD_REQUEST) - self.assert_('Non-integer X-Delete-After' in resp.body) + self.assertTrue('Non-integer X-Delete-After' in resp.body) t = str(int(time.time() - 60)) headers = {'Transfer-Encoding': 'chunked', @@ -233,7 +233,7 @@ class TestConstraints(unittest.TestCase): resp = constraints.check_object_creation( Request.blank('/', headers=headers), 'object_name') self.assertEquals(resp.status_int, HTTP_BAD_REQUEST) - self.assert_('X-Delete-At in past' in resp.body) + self.assertTrue('X-Delete-At in past' in resp.body) def test_check_delete_headers(self): @@ -515,6 +515,24 @@ class TestConstraints(unittest.TestCase): constraints.check_account_format, req, req.headers['X-Copy-From-Account']) + def test_check_container_format(self): + invalid_versions_locations = ( + 'container/with/slashes', + '', # empty + ) + for versions_location in invalid_versions_locations: + req = Request.blank( + '/v/a/c/o', headers={ + 'X-Versions-Location': versions_location}) + try: + constraints.check_container_format( + req, req.headers['X-Versions-Location']) + except HTTPException as e: + self.assertTrue(e.body.startswith('Container name cannot')) + else: + self.fail('check_container_format did not raise error for %r' % + req.headers['X-Versions-Location']) + class TestConstraintsConfig(unittest.TestCase): diff --git a/test/unit/common/test_daemon.py b/test/unit/common/test_daemon.py index e20fcde62e..cab9d4f2de 100644 --- a/test/unit/common/test_daemon.py +++ b/test/unit/common/test_daemon.py @@ -16,10 +16,10 @@ # TODO(clayg): Test kill_children signal handlers import os +from six import StringIO import unittest from getpass import getuser import logging -from StringIO import StringIO from test.unit import tmpfile from mock import patch @@ -52,7 +52,7 @@ class TestDaemon(unittest.TestCase): def test_create(self): d = daemon.Daemon({}) self.assertEquals(d.conf, {}) - self.assert_(isinstance(d.logger, utils.LogAdapter)) + self.assertTrue(isinstance(d.logger, utils.LogAdapter)) def test_stubs(self): d = daemon.Daemon({}) @@ -88,7 +88,7 @@ class TestRunDaemon(unittest.TestCase): with patch.dict('os.environ', {'TZ': ''}): daemon.run_daemon(MyDaemon, conf_file) self.assertEquals(MyDaemon.forever_called, True) - self.assert_(os.environ['TZ'] is not '') + self.assertTrue(os.environ['TZ'] is not '') daemon.run_daemon(MyDaemon, conf_file, once=True) self.assertEquals(MyDaemon.once_called, True) @@ -104,7 +104,7 @@ class TestRunDaemon(unittest.TestCase): logger.addHandler(logging.StreamHandler(sio)) logger = utils.get_logger(None, 'server', log_route='server') daemon.run_daemon(MyDaemon, conf_file, logger=logger) - self.assert_('user quit' in sio.getvalue().lower()) + self.assertTrue('user quit' in sio.getvalue().lower()) if __name__ == '__main__': diff --git a/test/unit/common/test_db.py b/test/unit/common/test_db.py index 6e0606e79d..cc65d40800 100644 --- a/test/unit/common/test_db.py +++ b/test/unit/common/test_db.py @@ -21,7 +21,7 @@ import unittest from tempfile import mkdtemp from shutil import rmtree, copy from uuid import uuid4 -import cPickle as pickle +import six.moves.cPickle as pickle import simplejson import sqlite3 @@ -51,14 +51,14 @@ class TestDatabaseConnectionError(unittest.TestCase): def test_str(self): err = \ DatabaseConnectionError(':memory:', 'No valid database connection') - self.assert_(':memory:' in str(err)) - self.assert_('No valid database connection' in str(err)) + self.assertTrue(':memory:' in str(err)) + self.assertTrue('No valid database connection' in str(err)) err = DatabaseConnectionError(':memory:', 'No valid database connection', timeout=1357) - self.assert_(':memory:' in str(err)) - self.assert_('No valid database connection' in str(err)) - self.assert_('1357' in str(err)) + self.assertTrue(':memory:' in str(err)) + self.assertTrue('No valid database connection' in str(err)) + self.assertTrue('1357' in str(err)) class TestDictFactory(unittest.TestCase): @@ -157,7 +157,7 @@ class TestGetDBConnection(unittest.TestCase): def test_normal_case(self): conn = get_db_connection(':memory:') - self.assert_(hasattr(conn, 'execute')) + self.assertTrue(hasattr(conn, 'execute')) def test_invalid_path(self): self.assertRaises(DatabaseConnectionError, get_db_connection, @@ -323,12 +323,12 @@ class TestExampleBroker(unittest.TestCase): # delete delete_timestamp = next(self.ts) broker.merge_timestamps(created_at, put_timestamp, delete_timestamp) - self.assert_(broker.is_deleted()) + self.assertTrue(broker.is_deleted()) info = broker.get_info() self.assertEqual(info['created_at'], created_at) self.assertEqual(info['put_timestamp'], put_timestamp) self.assertEqual(info['delete_timestamp'], delete_timestamp) - self.assert_(info['status_changed_at'] > Timestamp(put_timestamp)) + self.assertTrue(info['status_changed_at'] > Timestamp(put_timestamp)) def put_item(self, broker, timestamp): broker.put_test('test', timestamp) @@ -365,7 +365,7 @@ class TestExampleBroker(unittest.TestCase): self.delete_item(broker, next(self.ts)) self.assertEqual(broker.get_info()[ '%s_count' % broker.db_contains_type], 0) - self.assert_(broker.is_deleted()) + self.assertTrue(broker.is_deleted()) def test_merge_timestamps_simple_recreate(self): put_timestamp = next(self.ts) @@ -375,14 +375,14 @@ class TestExampleBroker(unittest.TestCase): created_at = broker.get_info()['created_at'] delete_timestamp = next(self.ts) broker.merge_timestamps(created_at, put_timestamp, delete_timestamp) - self.assert_(broker.is_deleted()) + self.assertTrue(broker.is_deleted()) info = broker.get_info() self.assertEqual(info['created_at'], created_at) self.assertEqual(info['put_timestamp'], put_timestamp) self.assertEqual(info['delete_timestamp'], delete_timestamp) orig_status_changed_at = info['status_changed_at'] - self.assert_(orig_status_changed_at > - Timestamp(virgin_status_changed_at)) + self.assertTrue(orig_status_changed_at > + Timestamp(virgin_status_changed_at)) # recreate recreate_timestamp = next(self.ts) status_changed_at = time.time() @@ -393,7 +393,7 @@ class TestExampleBroker(unittest.TestCase): self.assertEqual(info['created_at'], created_at) self.assertEqual(info['put_timestamp'], recreate_timestamp) self.assertEqual(info['delete_timestamp'], delete_timestamp) - self.assert_(info['status_changed_at'], status_changed_at) + self.assertTrue(info['status_changed_at'], status_changed_at) def test_merge_timestamps_recreate_with_objects(self): put_timestamp = next(self.ts) @@ -403,14 +403,14 @@ class TestExampleBroker(unittest.TestCase): # delete delete_timestamp = next(self.ts) broker.merge_timestamps(created_at, put_timestamp, delete_timestamp) - self.assert_(broker.is_deleted()) + self.assertTrue(broker.is_deleted()) info = broker.get_info() self.assertEqual(info['created_at'], created_at) self.assertEqual(info['put_timestamp'], put_timestamp) self.assertEqual(info['delete_timestamp'], delete_timestamp) orig_status_changed_at = info['status_changed_at'] - self.assert_(Timestamp(orig_status_changed_at) >= - Timestamp(put_timestamp)) + self.assertTrue(Timestamp(orig_status_changed_at) >= + Timestamp(put_timestamp)) # add object self.put_item(broker, next(self.ts)) count_key = '%s_count' % broker.db_contains_type @@ -607,7 +607,7 @@ class TestDatabaseBroker(unittest.TestCase): db_file = os.path.join(self.testdir, '1.db') broker = DatabaseBroker(db_file) self.assertEqual(broker.db_file, db_file) - self.assert_(broker.conn is None) + self.assertTrue(broker.conn is None) def test_disk_preallocate(self): test_size = [-1] @@ -641,7 +641,7 @@ class TestDatabaseBroker(unittest.TestCase): broker = DatabaseBroker(':memory:') broker._initialize = stub broker.initialize(normalize_timestamp('1')) - self.assert_(hasattr(stub_dict['args'][0], 'execute')) + self.assertTrue(hasattr(stub_dict['args'][0], 'execute')) self.assertEquals(stub_dict['args'][1], '0000000001.00000') with broker.get() as conn: conn.execute('SELECT * FROM outgoing_sync') @@ -649,7 +649,7 @@ class TestDatabaseBroker(unittest.TestCase): broker = DatabaseBroker(os.path.join(self.testdir, '1.db')) broker._initialize = stub broker.initialize(normalize_timestamp('1')) - self.assert_(hasattr(stub_dict['args'][0], 'execute')) + self.assertTrue(hasattr(stub_dict['args'][0], 'execute')) self.assertEquals(stub_dict['args'][1], '0000000001.00000') with broker.get() as conn: conn.execute('SELECT * FROM outgoing_sync') @@ -676,11 +676,11 @@ class TestDatabaseBroker(unittest.TestCase): broker._initialize = init_stub # Initializes a good broker for us broker.initialize(normalize_timestamp('1')) - self.assert_(broker.conn is not None) + self.assertTrue(broker.conn is not None) broker._delete_db = delete_stub stub_called[0] = False broker.delete_db('2') - self.assert_(stub_called[0]) + self.assertTrue(stub_called[0]) broker = DatabaseBroker(os.path.join(self.testdir, '1.db')) broker.db_type = 'test' broker._initialize = init_stub @@ -688,12 +688,12 @@ class TestDatabaseBroker(unittest.TestCase): broker._delete_db = delete_stub stub_called[0] = False broker.delete_db('2') - self.assert_(stub_called[0]) + self.assertTrue(stub_called[0]) # ensure that metadata was cleared m2 = broker.metadata - self.assert_(not any(v[0] for v in m2.itervalues())) - self.assert_(all(v[1] == normalize_timestamp('2') - for v in m2.itervalues())) + self.assertTrue(not any(v[0] for v in m2.itervalues())) + self.assertTrue(all(v[1] == normalize_timestamp('2') + for v in m2.itervalues())) def test_get(self): broker = DatabaseBroker(':memory:') @@ -710,7 +710,7 @@ class TestDatabaseBroker(unittest.TestCase): conn.execute('SELECT 1') except Exception: got_exc = True - self.assert_(got_exc) + self.assertTrue(got_exc) def stub(*args, **kwargs): pass @@ -783,7 +783,7 @@ class TestDatabaseBroker(unittest.TestCase): pass except Exception: got_exc = True - self.assert_(got_exc) + self.assertTrue(got_exc) def stub(*args, **kwargs): pass @@ -803,7 +803,7 @@ class TestDatabaseBroker(unittest.TestCase): pass except LockTimeout: got_exc = True - self.assert_(got_exc) + self.assertTrue(got_exc) try: with broker.lock(): raise Exception('test') @@ -1061,61 +1061,61 @@ class TestDatabaseBroker(unittest.TestCase): first_timestamp = normalize_timestamp(1) first_value = '1' broker.update_metadata({'First': [first_value, first_timestamp]}) - self.assert_('First' in broker.metadata) + self.assertTrue('First' in broker.metadata) self.assertEquals(broker.metadata['First'], [first_value, first_timestamp]) # Add our second item second_timestamp = normalize_timestamp(2) second_value = '2' broker.update_metadata({'Second': [second_value, second_timestamp]}) - self.assert_('First' in broker.metadata) + self.assertTrue('First' in broker.metadata) self.assertEquals(broker.metadata['First'], [first_value, first_timestamp]) - self.assert_('Second' in broker.metadata) + self.assertTrue('Second' in broker.metadata) self.assertEquals(broker.metadata['Second'], [second_value, second_timestamp]) # Update our first item first_timestamp = normalize_timestamp(3) first_value = '1b' broker.update_metadata({'First': [first_value, first_timestamp]}) - self.assert_('First' in broker.metadata) + self.assertTrue('First' in broker.metadata) self.assertEquals(broker.metadata['First'], [first_value, first_timestamp]) - self.assert_('Second' in broker.metadata) + self.assertTrue('Second' in broker.metadata) self.assertEquals(broker.metadata['Second'], [second_value, second_timestamp]) # Delete our second item (by setting to empty string) second_timestamp = normalize_timestamp(4) second_value = '' broker.update_metadata({'Second': [second_value, second_timestamp]}) - self.assert_('First' in broker.metadata) + self.assertTrue('First' in broker.metadata) self.assertEquals(broker.metadata['First'], [first_value, first_timestamp]) - self.assert_('Second' in broker.metadata) + self.assertTrue('Second' in broker.metadata) self.assertEquals(broker.metadata['Second'], [second_value, second_timestamp]) # Reclaim at point before second item was deleted reclaim(broker, normalize_timestamp(3)) - self.assert_('First' in broker.metadata) + self.assertTrue('First' in broker.metadata) self.assertEquals(broker.metadata['First'], [first_value, first_timestamp]) - self.assert_('Second' in broker.metadata) + self.assertTrue('Second' in broker.metadata) self.assertEquals(broker.metadata['Second'], [second_value, second_timestamp]) # Reclaim at point second item was deleted reclaim(broker, normalize_timestamp(4)) - self.assert_('First' in broker.metadata) + self.assertTrue('First' in broker.metadata) self.assertEquals(broker.metadata['First'], [first_value, first_timestamp]) - self.assert_('Second' in broker.metadata) + self.assertTrue('Second' in broker.metadata) self.assertEquals(broker.metadata['Second'], [second_value, second_timestamp]) # Reclaim after point second item was deleted reclaim(broker, normalize_timestamp(5)) - self.assert_('First' in broker.metadata) + self.assertTrue('First' in broker.metadata) self.assertEquals(broker.metadata['First'], [first_value, first_timestamp]) - self.assert_('Second' not in broker.metadata) + self.assertTrue('Second' not in broker.metadata) @patch.object(DatabaseBroker, 'validate_metadata') def test_validate_metadata_is_called_from_update_metadata(self, mock): diff --git a/test/unit/common/test_db_replicator.py b/test/unit/common/test_db_replicator.py index 6bdf6f0cdf..91a5adfbfd 100644 --- a/test/unit/common/test_db_replicator.py +++ b/test/unit/common/test_db_replicator.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import print_function import unittest from contextlib import contextmanager import os @@ -385,7 +386,7 @@ class TestDBReplicator(unittest.TestCase): remote_file = 'rsync_ip(127.0.0.1)::container/sda1/tmp/abcd' replicator = MyTestReplicator(broker.db_file, remote_file) replicator._rsync_db(broker, fake_device, ReplHttp(), 'abcd') - self.assert_(replicator._rsync_file_called) + self.assertTrue(replicator._rsync_file_called) with patch('swift.common.db_replicator.rsync_ip', mock_rsync_ip): broker = FakeBroker() @@ -393,7 +394,7 @@ class TestDBReplicator(unittest.TestCase): replicator = MyTestReplicator(broker.db_file, remote_file) replicator.vm_test_mode = True replicator._rsync_db(broker, fake_device, ReplHttp(), 'abcd') - self.assert_(replicator._rsync_file_called) + self.assertTrue(replicator._rsync_file_called) def test_rsync_db_rsync_file_failure(self): class MyTestReplicator(TestReplicator): @@ -588,7 +589,7 @@ class TestDBReplicator(unittest.TestCase): raise OSError(errno.EEXIST, "File already exists") self.assertEquals('/a/b/c/d/e', was) if '-' in new: - self.assert_( + self.assertTrue( new.startswith('/a/quarantined/containers/e-')) else: self.assertEquals('/a/quarantined/containers/e', new) @@ -1304,7 +1305,7 @@ def attach_fake_replication_rpc(rpc, replicate_hook=None): self.host = node['replication_ip'] def replicate(self, op, *sync_args): - print 'REPLICATE: %s, %s, %r' % (self.path, op, sync_args) + print('REPLICATE: %s, %s, %r' % (self.path, op, sync_args)) replicate_args = self.path.lstrip('/').split('/') args = [op] + list(sync_args) swob_response = rpc.dispatch(replicate_args, args) diff --git a/test/unit/common/test_direct_client.py b/test/unit/common/test_direct_client.py index 145ac83c08..38accd014d 100644 --- a/test/unit/common/test_direct_client.py +++ b/test/unit/common/test_direct_client.py @@ -18,11 +18,11 @@ import unittest import os import urllib from contextlib import contextmanager -import StringIO from hashlib import md5 import time import mock +import six from swift.common import direct_client from swift.common.exceptions import ClientException @@ -117,7 +117,7 @@ class TestDirectClient(unittest.TestCase): now = time.time() headers = direct_client.gen_headers(add_ts=True) self.assertEqual(headers['user-agent'], stub_user_agent) - self.assert_(now - 1 < Timestamp(headers['x-timestamp']) < now + 1) + self.assertTrue(now - 1 < Timestamp(headers['x-timestamp']) < now + 1) self.assertEqual(headers['x-timestamp'], Timestamp(headers['x-timestamp']).internal) self.assertEqual(2, len(headers)) @@ -146,7 +146,7 @@ class TestDirectClient(unittest.TestCase): self.assertEqual( headers['x-timestamp'], Timestamp(headers['x-timestamp']).internal) - self.assert_( + self.assertTrue( now - 1 < Timestamp(headers['x-timestamp']) < now + 1) self.assertEqual(expected_header_count, len(headers)) @@ -194,7 +194,8 @@ class TestDirectClient(unittest.TestCase): 'status 500', ) for item in expected_err_msg_parts: - self.assert_(item in str(err), '%r was not in "%s"' % (item, err)) + self.assertTrue( + item in str(err), '%r was not in "%s"' % (item, err)) self.assertEqual(err.http_host, self.node['ip']) self.assertEqual(err.http_port, self.node['port']) self.assertEqual(err.http_device, self.node['device']) @@ -231,7 +232,7 @@ class TestDirectClient(unittest.TestCase): self.assertEqual(conn.method, 'GET') self.assertEqual(conn.path, self.account_path) self.assertEqual(err.http_status, 500) - self.assert_('GET' in str(err)) + self.assertTrue('GET' in str(err)) def test_direct_delete_account(self): node = {'ip': '1.2.3.4', 'port': '6000', 'device': 'sda'} @@ -248,7 +249,7 @@ class TestDirectClient(unittest.TestCase): path = args[3] self.assertEqual('/sda/0/a', path) headers = args[4] - self.assert_('X-Timestamp' in headers) + self.assertTrue('X-Timestamp' in headers) def test_direct_head_container(self): headers = HeaderKeyDict(key='value') @@ -281,7 +282,7 @@ class TestDirectClient(unittest.TestCase): self.assertEqual(conn.req_headers['user-agent'], self.user_agent) self.assertEqual(err.http_status, 503) self.assertEqual(err.http_headers, headers) - self.assert_('HEAD' in str(err)) + self.assertTrue('HEAD' in str(err)) def test_direct_head_container_deleted(self): important_timestamp = Timestamp(time.time()).internal @@ -293,7 +294,7 @@ class TestDirectClient(unittest.TestCase): direct_client.direct_head_container( self.node, self.part, self.account, self.container) except Exception as err: - self.assert_(isinstance(err, ClientException)) + self.assertTrue(isinstance(err, ClientException)) else: self.fail('ClientException not raised') self.assertEqual(conn.method, 'HEAD') @@ -369,7 +370,7 @@ class TestDirectClient(unittest.TestCase): self.assertEqual(conn.path, self.container_path) self.assertEqual(err.http_status, 500) - self.assert_('DELETE' in str(err)) + self.assertTrue('DELETE' in str(err)) def test_direct_put_container_object(self): headers = {'x-foo': 'bar'} @@ -380,7 +381,7 @@ class TestDirectClient(unittest.TestCase): headers=headers) self.assertEqual(conn.method, 'PUT') self.assertEqual(conn.path, self.obj_path) - self.assert_('x-timestamp' in conn.req_headers) + self.assertTrue('x-timestamp' in conn.req_headers) self.assertEqual('bar', conn.req_headers.get('x-foo')) self.assertEqual(rv, None) @@ -400,7 +401,7 @@ class TestDirectClient(unittest.TestCase): self.assertEqual(conn.path, self.obj_path) self.assertEqual(err.http_status, 500) - self.assert_('PUT' in str(err)) + self.assertTrue('PUT' in str(err)) def test_direct_delete_container_object(self): with mocked_http_conn(204) as conn: @@ -426,7 +427,7 @@ class TestDirectClient(unittest.TestCase): self.assertEqual(conn.path, self.obj_path) self.assertEqual(err.http_status, 500) - self.assert_('DELETE' in str(err)) + self.assertTrue('DELETE' in str(err)) def test_direct_head_object(self): headers = HeaderKeyDict({'x-foo': 'bar'}) @@ -440,8 +441,8 @@ class TestDirectClient(unittest.TestCase): self.assertEqual(conn.req_headers['user-agent'], self.user_agent) self.assertEqual('bar', conn.req_headers.get('x-foo')) - self.assert_('x-timestamp' not in conn.req_headers, - 'x-timestamp was in HEAD request headers') + self.assertTrue('x-timestamp' not in conn.req_headers, + 'x-timestamp was in HEAD request headers') self.assertEqual(headers, resp) def test_direct_head_object_error(self): @@ -458,7 +459,7 @@ class TestDirectClient(unittest.TestCase): self.assertEqual(conn.path, self.obj_path) self.assertEqual(err.http_status, 500) - self.assert_('HEAD' in str(err)) + self.assertTrue('HEAD' in str(err)) def test_direct_head_object_not_found(self): important_timestamp = Timestamp(time.time()).internal @@ -480,7 +481,7 @@ class TestDirectClient(unittest.TestCase): important_timestamp) def test_direct_get_object(self): - contents = StringIO.StringIO('123456') + contents = six.StringIO('123456') with mocked_http_conn(200, body=contents) as conn: resp_header, obj_body = direct_client.direct_get_object( @@ -503,7 +504,7 @@ class TestDirectClient(unittest.TestCase): self.assertEqual(conn.path, self.obj_path) self.assertEqual(err.http_status, 500) - self.assert_('GET' in str(err)) + self.assertTrue('GET' in str(err)) def test_direct_post_object(self): headers = {'Key': 'value'} @@ -537,10 +538,10 @@ class TestDirectClient(unittest.TestCase): for header in headers: self.assertEqual(conn.req_headers[header], headers[header]) self.assertEqual(conn.req_headers['user-agent'], self.user_agent) - self.assert_('x-timestamp' in conn.req_headers) + self.assertTrue('x-timestamp' in conn.req_headers) self.assertEqual(err.http_status, 500) - self.assert_('POST' in str(err)) + self.assertTrue('POST' in str(err)) def test_direct_delete_object(self): with mocked_http_conn(200) as conn: @@ -576,10 +577,10 @@ class TestDirectClient(unittest.TestCase): self.assertEqual(conn.method, 'DELETE') self.assertEqual(conn.path, self.obj_path) self.assertEqual(err.http_status, 503) - self.assert_('DELETE' in str(err)) + self.assertTrue('DELETE' in str(err)) def test_direct_put_object_with_content_length(self): - contents = StringIO.StringIO('123456') + contents = six.StringIO('123456') with mocked_http_conn(200) as conn: resp = direct_client.direct_put_object( @@ -590,7 +591,7 @@ class TestDirectClient(unittest.TestCase): self.assertEqual(md5('123456').hexdigest(), resp) def test_direct_put_object_fail(self): - contents = StringIO.StringIO('123456') + contents = six.StringIO('123456') with mocked_http_conn(500) as conn: try: @@ -606,7 +607,7 @@ class TestDirectClient(unittest.TestCase): self.assertEqual(err.http_status, 500) def test_direct_put_object_chunked(self): - contents = StringIO.StringIO('123456') + contents = six.StringIO('123456') with mocked_http_conn(200) as conn: resp = direct_client.direct_put_object( diff --git a/test/unit/common/test_internal_client.py b/test/unit/common/test_internal_client.py index 4c931af549..3c817fbc9b 100644 --- a/test/unit/common/test_internal_client.py +++ b/test/unit/common/test_internal_client.py @@ -15,7 +15,6 @@ import json import mock -from StringIO import StringIO import unittest from urllib import quote import zlib @@ -23,9 +22,9 @@ from textwrap import dedent import os import six +from six import StringIO from six.moves import range from test.unit import FakeLogger -import eventlet from eventlet.green import urllib2 from swift.common import internal_client from swift.common import swob @@ -1265,48 +1264,109 @@ class TestSimpleClient(unittest.TestCase): self.assertEqual(mock_urlopen.call_count, 2) self.assertEqual([None, None], retval) + @mock.patch('eventlet.green.urllib2.urlopen') + def test_request_with_retries_with_HTTPError(self, mock_urlopen): + mock_response = mock.MagicMock() + mock_response.read.return_value = '' + c = internal_client.SimpleClient(url='http://127.0.0.1', token='token') + self.assertEqual(c.retries, 5) + + for request_method in 'GET PUT POST DELETE HEAD COPY'.split(): + mock_urlopen.reset_mock() + mock_urlopen.side_effect = urllib2.HTTPError(*[None] * 5) + with mock.patch('swift.common.internal_client.sleep') \ + as mock_sleep: + self.assertRaises(urllib2.HTTPError, + c.retry_request, request_method, retries=1) + self.assertEqual(mock_sleep.call_count, 1) + self.assertEqual(mock_urlopen.call_count, 2) + + @mock.patch('eventlet.green.urllib2.urlopen') + def test_request_container_with_retries_with_HTTPError(self, + mock_urlopen): + mock_response = mock.MagicMock() + mock_response.read.return_value = '' + c = internal_client.SimpleClient(url='http://127.0.0.1', token='token') + self.assertEqual(c.retries, 5) + + for request_method in 'GET PUT POST DELETE HEAD COPY'.split(): + mock_urlopen.reset_mock() + mock_urlopen.side_effect = urllib2.HTTPError(*[None] * 5) + with mock.patch('swift.common.internal_client.sleep') \ + as mock_sleep: + self.assertRaises(urllib2.HTTPError, + c.retry_request, request_method, + container='con', retries=1) + self.assertEqual(mock_sleep.call_count, 1) + self.assertEqual(mock_urlopen.call_count, 2) + + @mock.patch('eventlet.green.urllib2.urlopen') + def test_request_object_with_retries_with_HTTPError(self, + mock_urlopen): + mock_response = mock.MagicMock() + mock_response.read.return_value = '' + c = internal_client.SimpleClient(url='http://127.0.0.1', token='token') + self.assertEqual(c.retries, 5) + + for request_method in 'GET PUT POST DELETE HEAD COPY'.split(): + mock_urlopen.reset_mock() + mock_urlopen.side_effect = urllib2.HTTPError(*[None] * 5) + with mock.patch('swift.common.internal_client.sleep') \ + as mock_sleep: + self.assertRaises(urllib2.HTTPError, + c.retry_request, request_method, + container='con', name='obj', retries=1) + self.assertEqual(mock_sleep.call_count, 1) + self.assertEqual(mock_urlopen.call_count, 2) + def test_proxy(self): - running = True - - def handle(sock): - while running: - try: - with eventlet.Timeout(0.1): - (conn, addr) = sock.accept() - except eventlet.Timeout: - continue - else: - conn.send('HTTP/1.1 503 Server Error') - conn.close() - sock.close() - - sock = eventlet.listen(('', 0)) - port = sock.getsockname()[1] - proxy = 'http://127.0.0.1:%s' % port + # check that proxy arg is passed through to the urllib Request + scheme = 'http' + proxy_host = '127.0.0.1:80' + proxy = '%s://%s' % (scheme, proxy_host) url = 'https://127.0.0.1:1/a' - server = eventlet.spawn(handle, sock) - try: - headers = {'Content-Length': '0'} - with mock.patch('swift.common.internal_client.sleep'): - try: - internal_client.put_object( - url, container='c', name='o1', headers=headers, - contents='', proxy=proxy, timeout=0.1, retries=0) - except urllib2.HTTPError as e: - self.assertEqual(e.code, 503) - except urllib2.URLError as e: - if 'ECONNREFUSED' in str(e): - self.fail( - "Got %s which probably means the http proxy " - "settings were not used" % e) - else: - raise e - else: - self.fail('Unexpected successful response') - finally: - running = False - server.wait() + class FakeConn(object): + def read(self): + return 'irrelevant' + + mocked = 'swift.common.internal_client.urllib2.urlopen' + + # module level methods + for func in (internal_client.put_object, + internal_client.delete_object): + with mock.patch(mocked) as mock_urlopen: + mock_urlopen.return_value = FakeConn() + func(url, container='c', name='o1', contents='', proxy=proxy, + timeout=0.1, retries=0) + self.assertEqual(1, mock_urlopen.call_count) + args, kwargs = mock_urlopen.call_args + self.assertEqual(1, len(args)) + self.assertEqual(1, len(kwargs)) + self.assertEqual(0.1, kwargs['timeout']) + self.assertTrue(isinstance(args[0], urllib2.Request)) + self.assertEqual(proxy_host, args[0].host) + self.assertEqual(scheme, args[0].type) + + # class methods + content = mock.MagicMock() + cl = internal_client.SimpleClient(url) + scenarios = ((cl.get_account, []), + (cl.get_container, ['c']), + (cl.put_container, ['c']), + (cl.put_object, ['c', 'o', content])) + for scenario in scenarios: + with mock.patch(mocked) as mock_urlopen: + mock_urlopen.return_value = FakeConn() + scenario[0](*scenario[1], proxy=proxy, timeout=0.1) + self.assertEqual(1, mock_urlopen.call_count) + args, kwargs = mock_urlopen.call_args + self.assertEqual(1, len(args)) + self.assertEqual(1, len(kwargs)) + self.assertEqual(0.1, kwargs['timeout']) + self.assertTrue(isinstance(args[0], urllib2.Request)) + self.assertEqual(proxy_host, args[0].host) + self.assertEqual(scheme, args[0].type) if __name__ == '__main__': unittest.main() diff --git a/test/unit/common/test_manager.py b/test/unit/common/test_manager.py index 66b020f8df..ebdab67d26 100644 --- a/test/unit/common/test_manager.py +++ b/test/unit/common/test_manager.py @@ -12,7 +12,7 @@ # implied. # See the License for the specific language governing permissions and # limitations under the License. - +from __future__ import print_function import unittest from test.unit import temptree @@ -140,8 +140,8 @@ class TestManagerModule(unittest.TestCase): self.assertEquals(myfunc(0), 0) self.assertEquals(myfunc(True), 1) self.assertEquals(myfunc(False), 0) - self.assert_(hasattr(myfunc, 'publicly_accessible')) - self.assert_(myfunc.publicly_accessible) + self.assertTrue(hasattr(myfunc, 'publicly_accessible')) + self.assertTrue(myfunc.publicly_accessible) def test_watch_server_pids(self): class MockOs(object): @@ -275,7 +275,7 @@ class TestManagerModule(unittest.TestCase): manager.safe_kill(4, signal.SIGHUP, 'same-procname') def test_exc(self): - self.assert_(issubclass(manager.UnknownCommandError, Exception)) + self.assertTrue(issubclass(manager.UnknownCommandError, Exception)) class TestServer(unittest.TestCase): @@ -307,8 +307,8 @@ class TestServer(unittest.TestCase): def test_server_repr(self): server = manager.Server('proxy') - self.assert_(server.__class__.__name__ in repr(server)) - self.assert_(str(server) in repr(server)) + self.assertTrue(server.__class__.__name__ in repr(server)) + self.assertTrue(str(server) in repr(server)) def test_server_equality(self): server1 = manager.Server('Proxy') @@ -407,7 +407,7 @@ class TestServer(unittest.TestCase): c3 = self.join_swift_dir('object-server/object3.conf') c4 = self.join_swift_dir('object-server/conf/server4.conf') for c in [c1, c2, c3, c4]: - self.assert_(c in conf_files) + self.assertTrue(c in conf_files) # test configs returned sorted sorted_confs = sorted([c1, c2, c3, c4]) self.assertEquals(conf_files, sorted_confs) @@ -462,8 +462,8 @@ class TestServer(unittest.TestCase): # check warn "unable to locate" conf_files = server.conf_files() self.assertFalse(conf_files) - self.assert_('unable to locate config for auth' - in pop_stream(f).lower()) + self.assertTrue('unable to locate config for auth' + in pop_stream(f).lower()) # check quiet will silence warning conf_files = server.conf_files(verbose=True, quiet=True) self.assertEquals(pop_stream(f), '') @@ -473,13 +473,13 @@ class TestServer(unittest.TestCase): self.assertEquals(pop_stream(f), '') # check missing config number warn "unable to locate" conf_files = server.conf_files(number=2) - self.assert_( + self.assertTrue( 'unable to locate config number 2 for ' + 'container-auditor' in pop_stream(f).lower()) # check verbose lists configs conf_files = server.conf_files(number=2, verbose=True) c1 = self.join_swift_dir('container-server/1.conf') - self.assert_(c1 in pop_stream(f)) + self.assertTrue(c1 in pop_stream(f)) finally: sys.stdout = old_stdout @@ -533,7 +533,7 @@ class TestServer(unittest.TestCase): for named_conf in ('server', 'replication'): conf_dir = self.join_swift_dir( 'object-server/object-%s.conf.d' % named_conf) - self.assert_(conf_dir in conf_dirs) + self.assertTrue(conf_dir in conf_dirs) def test_conf_dir(self): conf_files = ( @@ -557,7 +557,7 @@ class TestServer(unittest.TestCase): c3 = self.join_swift_dir('object-server/3.conf.d') c4 = self.join_swift_dir('object-server/4.conf.d') for c in [c1, c2, c3, c4]: - self.assert_(c in conf_dirs) + self.assertTrue(c in conf_dirs) # test configs returned sorted sorted_confs = sorted([c1, c2, c3, c4]) self.assertEquals(conf_dirs, sorted_confs) @@ -583,7 +583,7 @@ class TestServer(unittest.TestCase): for named_pid in ('server', 'replication'): pid_file = self.join_run_dir( 'object-server/object-%s.pid.d' % named_pid) - self.assert_(pid_file in pid_files) + self.assertTrue(pid_file in pid_files) def test_iter_pid_files(self): """ @@ -715,7 +715,7 @@ class TestServer(unittest.TestCase): server = manager.Server('proxy', run_dir=manager.RUN_DIR) pids = server.signal_pids(DUMMY_SIG) self.assertEquals(len(pids), 1) - self.assert_(1 in pids) + self.assertTrue(1 in pids) self.assertEquals(manager.os.pid_sigs[1], [DUMMY_SIG]) # make sure other process not signaled self.assertFalse(2 in pids) @@ -728,8 +728,8 @@ class TestServer(unittest.TestCase): # test print details pids = server.signal_pids(DUMMY_SIG) output = pop_stream(f) - self.assert_('pid: %s' % 1 in output) - self.assert_('signal: %s' % DUMMY_SIG in output) + self.assertTrue('pid: %s' % 1 in output) + self.assertTrue('signal: %s' % DUMMY_SIG in output) # test no details on signal.SIG_DFL pids = server.signal_pids(signal.SIG_DFL) self.assertEquals(pop_stream(f), '') @@ -737,8 +737,8 @@ class TestServer(unittest.TestCase): manager.os = MockOs([2]) # test pid not running pids = server.signal_pids(signal.SIG_DFL) - self.assert_(1 not in pids) - self.assert_(1 not in manager.os.pid_sigs) + self.assertTrue(1 not in pids) + self.assertTrue(1 not in manager.os.pid_sigs) # test remove stale pid file self.assertFalse(os.path.exists( self.join_run_dir('proxy-server.pid'))) @@ -748,9 +748,9 @@ class TestServer(unittest.TestCase): # test verbose warns on removing stale pid file pids = server.signal_pids(signal.SIG_DFL, verbose=True) output = pop_stream(f) - self.assert_('stale pid' in output.lower()) + self.assertTrue('stale pid' in output.lower()) auth_pid = self.join_run_dir('auth-server.pid') - self.assert_(auth_pid in output) + self.assertTrue(auth_pid in output) # reset mock os so only the third server is running manager.os = MockOs([3]) server = manager.Server('one', run_dir=manager.RUN_DIR) @@ -758,9 +758,9 @@ class TestServer(unittest.TestCase): pids = server.signal_pids(signal.SIG_DFL, verbose=True) output = pop_stream(f) old_stdout.write('output %s' % output) - self.assert_('removing pid file' in output.lower()) + self.assertTrue('removing pid file' in output.lower()) one_pid = self.join_run_dir('one-server.pid') - self.assert_(one_pid in output) + self.assertTrue(one_pid in output) server = manager.Server('zero', run_dir=manager.RUN_DIR) self.assertTrue(os.path.exists( @@ -769,7 +769,7 @@ class TestServer(unittest.TestCase): pids = server.signal_pids(signal.SIG_DFL, verbose=True) output = pop_stream(f) old_stdout.write('output %s' % output) - self.assert_('with invalid pid' in output.lower()) + self.assertTrue('with invalid pid' in output.lower()) self.assertFalse(os.path.exists( self.join_run_dir('zero-server.pid'))) server = manager.Server('invalid-server', @@ -780,7 +780,7 @@ class TestServer(unittest.TestCase): pids = server.signal_pids(signal.SIG_DFL, verbose=True) output = pop_stream(f) old_stdout.write('output %s' % output) - self.assert_('with invalid pid' in output.lower()) + self.assertTrue('with invalid pid' in output.lower()) self.assertFalse(os.path.exists( self.join_run_dir('invalid-server.pid'))) @@ -790,8 +790,8 @@ class TestServer(unittest.TestCase): server = manager.Server('object', run_dir=manager.RUN_DIR) pids = server.signal_pids(manager.os.RAISE_EPERM_SIG) output = pop_stream(f) - self.assert_('no permission to signal pid 4' in - output.lower(), output) + self.assertTrue('no permission to signal pid 4' in + output.lower(), output) finally: sys.stdout = old_stdout @@ -813,11 +813,11 @@ class TestServer(unittest.TestCase): manager.os = MockOs([1, 3]) running_pids = server.get_running_pids() self.assertEquals(len(running_pids), 1) - self.assert_(1 in running_pids) - self.assert_(2 not in running_pids) - self.assert_(3 not in running_pids) + self.assertTrue(1 in running_pids) + self.assertTrue(2 not in running_pids) + self.assertTrue(3 not in running_pids) # test persistent running pid files - self.assert_(os.path.exists( + self.assertTrue(os.path.exists( os.path.join(manager.RUN_DIR, 'test-server1.pid'))) # test clean up stale pids pid_two = self.join_swift_dir('test-server2.pid') @@ -850,16 +850,16 @@ class TestServer(unittest.TestCase): running_pids = server.get_running_pids() # only thing-doer.pid, 1 self.assertEquals(len(running_pids), 1) - self.assert_(1 in running_pids) + self.assertTrue(1 in running_pids) # no other pids returned for n in (2, 3, 4): - self.assert_(n not in running_pids) + self.assertTrue(n not in running_pids) # assert stale pids for other servers ignored manager.os = MockOs([1]) # only thing-doer is running running_pids = server.get_running_pids() for f in ('thing-sayer.pid', 'other-doer.pid', 'other-sayer.pid'): # other server pid files persist - self.assert_(os.path.exists, os.path.join(t, f)) + self.assertTrue(os.path.exists, os.path.join(t, f)) # verify that servers are in fact not running for server_name in ('thing-sayer', 'other-doer', 'other-sayer'): server = manager.Server(server_name, run_dir=t) @@ -868,7 +868,7 @@ class TestServer(unittest.TestCase): # and now all OTHER pid files are cleaned out all_pids = os.listdir(t) self.assertEquals(len(all_pids), 1) - self.assert_(os.path.exists(os.path.join(t, 'thing-doer.pid'))) + self.assertTrue(os.path.exists(os.path.join(t, 'thing-doer.pid'))) def test_kill_running_pids(self): pid_files = ( @@ -894,16 +894,16 @@ class TestServer(unittest.TestCase): # test kill one pid pids = server.kill_running_pids() self.assertEquals(len(pids), 1) - self.assert_(1 in pids) + self.assertTrue(1 in pids) self.assertEquals(manager.os.pid_sigs[1], [signal.SIGTERM]) # reset os mock manager.os = MockOs([1]) # test shutdown - self.assert_('object-server' in - manager.GRACEFUL_SHUTDOWN_SERVERS) + self.assertTrue('object-server' in + manager.GRACEFUL_SHUTDOWN_SERVERS) pids = server.kill_running_pids(graceful=True) self.assertEquals(len(pids), 1) - self.assert_(1 in pids) + self.assertTrue(1 in pids) self.assertEquals(manager.os.pid_sigs[1], [signal.SIGHUP]) # start up other servers manager.os = MockOs([11, 12]) @@ -914,11 +914,11 @@ class TestServer(unittest.TestCase): pids = server.kill_running_pids(graceful=True) self.assertEquals(len(pids), 2) for pid in (11, 12): - self.assert_(pid in pids) + self.assertTrue(pid in pids) self.assertEquals(manager.os.pid_sigs[pid], [signal.SIGTERM]) # and the other pid is of course not signaled - self.assert_(1 not in manager.os.pid_sigs) + self.assertTrue(1 not in manager.os.pid_sigs) def test_status(self): conf_files = ( @@ -962,7 +962,7 @@ class TestServer(unittest.TestCase): output = pop_stream(f).strip().splitlines() self.assertEquals(len(output), 4) for line in output: - self.assert_('test-server running' in line) + self.assertTrue('test-server running' in line) # test get single server by number with temptree([], []) as t: manager.PROC_DIR = t @@ -970,9 +970,9 @@ class TestServer(unittest.TestCase): output = pop_stream(f).strip().splitlines() self.assertEquals(len(output), 1) line = output[0] - self.assert_('test-server running' in line) + self.assertTrue('test-server running' in line) conf_four = self.join_swift_dir(conf_files[3]) - self.assert_('4 - %s' % conf_four in line) + self.assertTrue('4 - %s' % conf_four in line) # test some servers not running manager.os = MockOs([1, 2, 3]) proc_files = ( @@ -987,7 +987,7 @@ class TestServer(unittest.TestCase): output = pop_stream(f).strip().splitlines() self.assertEquals(len(output), 3) for line in output: - self.assert_('test-server running' in line) + self.assertTrue('test-server running' in line) # test single server not running manager.os = MockOs([1, 2]) proc_files = ( @@ -1001,16 +1001,16 @@ class TestServer(unittest.TestCase): output = pop_stream(f).strip().splitlines() self.assertEquals(len(output), 1) line = output[0] - self.assert_('not running' in line) + self.assertTrue('not running' in line) conf_three = self.join_swift_dir(conf_files[2]) - self.assert_(conf_three in line) + self.assertTrue(conf_three in line) # test no running pids manager.os = MockOs([]) with temptree([], []) as t: manager.PROC_DIR = t self.assertEquals(server.status(), 1) output = pop_stream(f).lower() - self.assert_('no test-server running' in output) + self.assertTrue('no test-server running' in output) # test use provided pids pids = { 1: '1.pid', @@ -1028,7 +1028,7 @@ class TestServer(unittest.TestCase): output = pop_stream(f).strip().splitlines() self.assertEquals(len(output), 2) for line in output: - self.assert_('test-server running' in line) + self.assertTrue('test-server running' in line) finally: sys.stdout = old_stdout @@ -1078,11 +1078,11 @@ class TestServer(unittest.TestCase): server.spawn(conf_file) # test pid file pid_file = self.join_run_dir('test-server.pid') - self.assert_(os.path.exists(pid_file)) + self.assertTrue(os.path.exists(pid_file)) pid_on_disk = int(open(pid_file).read().strip()) self.assertEquals(pid_on_disk, 1) # assert procs args - self.assert_(server.procs) + self.assertTrue(server.procs) self.assertEquals(len(server.procs), 1) proc = server.procs[0] expected_args = [ @@ -1102,7 +1102,7 @@ class TestServer(unittest.TestCase): server = manager.Server('test', run_dir=t) # test server run once server.spawn(conf1, once=True) - self.assert_(server.procs) + self.assertTrue(server.procs) self.assertEquals(len(server.procs), 1) proc = server.procs[0] expected_args = ['swift-test-server', conf1, 'once'] @@ -1111,7 +1111,7 @@ class TestServer(unittest.TestCase): self.assertEquals(proc.stderr, proc.stdout) # test server not daemon server.spawn(conf2, daemon=False) - self.assert_(server.procs) + self.assertTrue(server.procs) self.assertEquals(len(server.procs), 2) proc = server.procs[1] expected_args = ['swift-test-server', conf2, 'verbose'] @@ -1121,17 +1121,17 @@ class TestServer(unittest.TestCase): self.assertEquals(proc.stderr, None) # test server wait server.spawn(conf3, wait=False) - self.assert_(server.procs) + self.assertTrue(server.procs) self.assertEquals(len(server.procs), 3) proc = server.procs[2] # assert stdout is /dev/null - self.assert_(isinstance(proc.stdout, file)) + self.assertTrue(isinstance(proc.stdout, file)) self.assertEquals(proc.stdout.name, os.devnull) self.assertEquals(proc.stdout.mode, 'w+b') self.assertEquals(proc.stderr, proc.stdout) # test not daemon over-rides wait server.spawn(conf4, wait=False, daemon=False, once=True) - self.assert_(server.procs) + self.assertTrue(server.procs) self.assertEquals(len(server.procs), 4) proc = server.procs[3] expected_args = ['swift-test-server', conf4, 'once', @@ -1188,9 +1188,9 @@ class TestServer(unittest.TestCase): pass def fail(self): - print >>self._stdout, 'mock process started' + print('mock process started', file=self._stdout) sleep(self.delay) # perform setup processing - print >>self._stdout, 'mock process failed to start' + print('mock process failed to start', file=self._stdout) self.close_stdout() def poll(self): @@ -1198,12 +1198,12 @@ class TestServer(unittest.TestCase): return self.returncode or None def run(self): - print >>self._stdout, 'mock process started' + print('mock process started', file=self._stdout) sleep(self.delay) # perform setup processing - print >>self._stdout, 'setup complete!' + print('setup complete!', file=self._stdout) self.close_stdout() sleep(self.delay) # do some more processing - print >>self._stdout, 'mock process finished' + print('mock process finished', file=self._stdout) self.finished = True class MockTime(object): @@ -1230,24 +1230,24 @@ class TestServer(unittest.TestCase): status = server.wait() self.assertEquals(status, 0) # wait should return before process exits - self.assert_(proc.isAlive()) + self.assertTrue(proc.isAlive()) self.assertFalse(proc.finished) - self.assert_(proc.finished) # make sure it did finish... + self.assertTrue(proc.finished) # make sure it did finish # test output kwarg prints subprocess output with MockProcess() as proc: server.procs = [proc] status = server.wait(output=True) output = pop_stream(f) - self.assert_('mock process started' in output) - self.assert_('setup complete' in output) + self.assertTrue('mock process started' in output) + self.assertTrue('setup complete' in output) # make sure we don't get prints after stdout was closed - self.assert_('mock process finished' not in output) + self.assertTrue('mock process finished' not in output) # test process which fails to start with MockProcess(fail_to_start=True) as proc: server.procs = [proc] status = server.wait() self.assertEquals(status, 1) - self.assert_('failed' in pop_stream(f)) + self.assertTrue('failed' in pop_stream(f)) # test multiple procs procs = [MockProcess(delay=.5) for i in range(3)] for proc in procs: @@ -1256,7 +1256,7 @@ class TestServer(unittest.TestCase): status = server.wait() self.assertEquals(status, 0) for proc in procs: - self.assert_(proc.isAlive()) + self.assertTrue(proc.isAlive()) for proc in procs: proc.join() finally: @@ -1287,7 +1287,7 @@ class TestServer(unittest.TestCase): for fail in (False, True, True): procs.append(MockProcess(fail=fail)) server.procs = procs - self.assert_(server.interact() > 0) + self.assertTrue(server.interact() > 0) def test_launch(self): # stubs @@ -1352,13 +1352,13 @@ class TestServer(unittest.TestCase): # can't start server if it's already running self.assertFalse(server.launch()) output = pop_stream(f) - self.assert_('running' in output) + self.assertTrue('running' in output) conf_file = self.join_swift_dir( 'proxy-server.conf') - self.assert_(conf_file in output) + self.assertTrue(conf_file in output) pid_file = self.join_run_dir('proxy-server/2.pid') - self.assert_(pid_file in output) - self.assert_('already started' in output) + self.assertTrue(pid_file in output) + self.assertTrue('already started' in output) # no running pids manager.os = MockOs([]) with temptree([], []) as proc_dir: @@ -1380,8 +1380,8 @@ class TestServer(unittest.TestCase): } self.assertEquals(mock_spawn.kwargs, [expected]) output = pop_stream(f) - self.assert_('Starting' in output) - self.assert_('once' not in output) + self.assertTrue('Starting' in output) + self.assertTrue('once' not in output) # test multi-server kwarg once server = manager.Server('object-replicator') with temptree([], []) as proc_dir: @@ -1430,8 +1430,9 @@ class TestServer(unittest.TestCase): 'blah')]) server.spawn = mock_spawn self.assertEquals(server.launch(), {}) - self.assert_('swift-auth-server does not exist' in - pop_stream(f)) + self.assertTrue( + 'swift-auth-server does not exist' in + pop_stream(f)) finally: sys.stdout = old_stdout @@ -1461,7 +1462,7 @@ class TestServer(unittest.TestCase): pids = server.stop() self.assertEquals(len(pids), 4) for pid in (1, 2, 3, 4): - self.assert_(pid in pids) + self.assertTrue(pid in pids) self.assertEquals(manager.os.pid_sigs[pid], [signal.SIGTERM]) conf1 = self.join_swift_dir('account-reaper/1.conf') @@ -1473,7 +1474,7 @@ class TestServer(unittest.TestCase): pids = server.stop() self.assertEquals(len(pids), 2) for pid in (3, 4): - self.assert_(pid in pids) + self.assertTrue(pid in pids) self.assertEquals(manager.os.pid_sigs[pid], [signal.SIGTERM]) self.assertFalse(os.path.exists(conf1)) @@ -1485,7 +1486,7 @@ class TestServer(unittest.TestCase): expected = { 3: conf3, } - self.assert_(pids, expected) + self.assertTrue(pids, expected) self.assertEquals(manager.os.pid_sigs[3], [signal.SIGTERM]) self.assertFalse(os.path.exists(conf4)) self.assertFalse(os.path.exists(conf3)) @@ -1497,58 +1498,58 @@ class TestManager(unittest.TestCase): m = manager.Manager(['test']) self.assertEquals(len(m.servers), 1) server = m.servers.pop() - self.assert_(isinstance(server, manager.Server)) + self.assertTrue(isinstance(server, manager.Server)) self.assertEquals(server.server, 'test-server') # test multi-server and simple dedupe servers = ['object-replicator', 'object-auditor', 'object-replicator'] m = manager.Manager(servers) self.assertEquals(len(m.servers), 2) for server in m.servers: - self.assert_(server.server in servers) + self.assertTrue(server.server in servers) # test all m = manager.Manager(['all']) self.assertEquals(len(m.servers), len(manager.ALL_SERVERS)) for server in m.servers: - self.assert_(server.server in manager.ALL_SERVERS) + self.assertTrue(server.server in manager.ALL_SERVERS) # test main m = manager.Manager(['main']) self.assertEquals(len(m.servers), len(manager.MAIN_SERVERS)) for server in m.servers: - self.assert_(server.server in manager.MAIN_SERVERS) + self.assertTrue(server.server in manager.MAIN_SERVERS) # test rest m = manager.Manager(['rest']) self.assertEquals(len(m.servers), len(manager.REST_SERVERS)) for server in m.servers: - self.assert_(server.server in manager.REST_SERVERS) + self.assertTrue(server.server in manager.REST_SERVERS) # test main + rest == all m = manager.Manager(['main', 'rest']) self.assertEquals(len(m.servers), len(manager.ALL_SERVERS)) for server in m.servers: - self.assert_(server.server in manager.ALL_SERVERS) + self.assertTrue(server.server in manager.ALL_SERVERS) # test dedupe m = manager.Manager(['main', 'rest', 'proxy', 'object', 'container', 'account']) self.assertEquals(len(m.servers), len(manager.ALL_SERVERS)) for server in m.servers: - self.assert_(server.server in manager.ALL_SERVERS) + self.assertTrue(server.server in manager.ALL_SERVERS) # test glob m = manager.Manager(['object-*']) object_servers = [s for s in manager.ALL_SERVERS if s.startswith('object')] self.assertEquals(len(m.servers), len(object_servers)) for s in m.servers: - self.assert_(str(s) in object_servers) + self.assertTrue(str(s) in object_servers) m = manager.Manager(['*-replicator']) replicators = [s for s in manager.ALL_SERVERS if s.endswith('replicator')] for s in m.servers: - self.assert_(str(s) in replicators) + self.assertTrue(str(s) in replicators) def test_iter(self): m = manager.Manager(['all']) self.assertEquals(len(list(m)), len(manager.ALL_SERVERS)) for server in m: - self.assert_(server.server in manager.ALL_SERVERS) + self.assertTrue(server.server in manager.ALL_SERVERS) def test_status(self): class MockServer(object): @@ -1682,7 +1683,7 @@ class TestManager(unittest.TestCase): for server in init.servers: self.assertEquals(len(server.called['launch']), 1) called_kwargs = server.called['launch'][0] - self.assert_('wait' in called_kwargs) + self.assertTrue('wait' in called_kwargs) self.assertFalse(called_kwargs['wait']) self.assertFalse(server.called['wait']) # test wait with once option @@ -1692,10 +1693,10 @@ class TestManager(unittest.TestCase): for server in init.servers: self.assertEquals(len(server.called['launch']), 1) called_kwargs = server.called['launch'][0] - self.assert_('wait' in called_kwargs) + self.assertTrue('wait' in called_kwargs) self.assertFalse(called_kwargs['wait']) - self.assert_('once' in called_kwargs) - self.assert_(called_kwargs['once']) + self.assertTrue('once' in called_kwargs) + self.assertTrue(called_kwargs['once']) self.assertFalse(server.called['wait']) finally: manager.Server = orig_swift_server @@ -1897,8 +1898,8 @@ class TestManager(unittest.TestCase): try: m = _orig_manager(['auth']) for server in m.servers: - self.assert_(server.server in - manager.GRACEFUL_SHUTDOWN_SERVERS) + self.assertTrue(server.server in + manager.GRACEFUL_SHUTDOWN_SERVERS) manager.Manager = MockManager status = m.reload() self.assertEquals(status, 0) @@ -1912,8 +1913,8 @@ class TestManager(unittest.TestCase): m = _orig_manager(['*-server']) self.assertEquals(len(m.servers), 4) for server in m.servers: - self.assert_(server.server in - manager.GRACEFUL_SHUTDOWN_SERVERS) + self.assertTrue(server.server in + manager.GRACEFUL_SHUTDOWN_SERVERS) manager.Manager = MockManager status = m.reload(graceful=False) self.assertEquals(status, 0) @@ -1952,8 +1953,8 @@ class TestManager(unittest.TestCase): def test_list_commands(self): for cmd, help in manager.Manager.list_commands(): method = getattr(manager.Manager, cmd.replace('-', '_'), None) - self.assert_(method, '%s is not a command' % cmd) - self.assert_(getattr(method, 'publicly_accessible', False)) + self.assertTrue(method, '%s is not a command' % cmd) + self.assertTrue(getattr(method, 'publicly_accessible', False)) self.assertEquals(method.__doc__.strip(), help) def test_run_command(self): diff --git a/test/unit/common/test_memcached.py b/test/unit/common/test_memcached.py index cd251f15d0..02144fdcec 100644 --- a/test/unit/common/test_memcached.py +++ b/test/unit/common/test_memcached.py @@ -177,7 +177,7 @@ class TestMemcached(unittest.TestCase): key = uuid4().hex for conn in memcache_client._get_conns(key): peeripport = '%s:%s' % conn[2].getpeername() - self.assert_(peeripport in (sock1ipport, sock2ipport)) + self.assertTrue(peeripport in (sock1ipport, sock2ipport)) if peeripport == sock1ipport: one = False if peeripport == sock2ipport: @@ -200,7 +200,7 @@ class TestMemcached(unittest.TestCase): # we should expect to have unicode self.assertEquals( memcache_client.get('some_key'), ['simple str', u'utf8 str éà']) - self.assert_(float(mock.cache.values()[0][1]) == 0) + self.assertTrue(float(mock.cache.values()[0][1]) == 0) memcache_client.set('some_key', [1, 2, 3], timeout=10) self.assertEquals(mock.cache.values()[0][1], '10') memcache_client.set('some_key', [1, 2, 3], time=20) @@ -209,9 +209,11 @@ class TestMemcached(unittest.TestCase): sixtydays = 60 * 24 * 60 * 60 esttimeout = time.time() + sixtydays memcache_client.set('some_key', [1, 2, 3], timeout=sixtydays) - self.assert_(-1 <= float(mock.cache.values()[0][1]) - esttimeout <= 1) + self.assertTrue( + -1 <= float(mock.cache.values()[0][1]) - esttimeout <= 1) memcache_client.set('some_key', [1, 2, 3], time=sixtydays) - self.assert_(-1 <= float(mock.cache.values()[0][1]) - esttimeout <= 1) + self.assertTrue( + -1 <= float(mock.cache.values()[0][1]) - esttimeout <= 1) def test_incr(self): memcache_client = memcached.MemcacheRing(['1.2.3.4:11211']) @@ -247,7 +249,8 @@ class TestMemcached(unittest.TestCase): esttimeout = time.time() + fiftydays memcache_client.incr('some_key', delta=5, time=fiftydays) self.assertEquals(memcache_client.get('some_key'), '5') - self.assert_(-1 <= float(mock.cache.values()[0][1]) - esttimeout <= 1) + self.assertTrue( + -1 <= float(mock.cache.values()[0][1]) - esttimeout <= 1) memcache_client.delete('some_key') self.assertEquals(memcache_client.get('some_key'), None) memcache_client.incr('some_key', delta=5) @@ -326,8 +329,10 @@ class TestMemcached(unittest.TestCase): memcache_client.set_multi( {'some_key1': [1, 2, 3], 'some_key2': [4, 5, 6]}, 'multi_key', timeout=fortydays) - self.assert_(-1 <= float(mock.cache.values()[0][1]) - esttimeout <= 1) - self.assert_(-1 <= float(mock.cache.values()[1][1]) - esttimeout <= 1) + self.assertTrue( + -1 <= float(mock.cache.values()[0][1]) - esttimeout <= 1) + self.assertTrue( + -1 <= float(mock.cache.values()[1][1]) - esttimeout <= 1) self.assertEquals(memcache_client.get_multi( ('some_key2', 'some_key1', 'not_exists'), 'multi_key'), [[4, 5, 6], [1, 2, 3], None]) diff --git a/test/unit/common/test_splice.py b/test/unit/common/test_splice.py index 51a2e95f67..745e7ea2a8 100644 --- a/test/unit/common/test_splice.py +++ b/test/unit/common/test_splice.py @@ -64,10 +64,10 @@ class TestSplice(unittest.TestCase): def test_flags(self): '''Test flag attribute availability''' - self.assert_(hasattr(splice, 'SPLICE_F_MOVE')) - self.assert_(hasattr(splice, 'SPLICE_F_NONBLOCK')) - self.assert_(hasattr(splice, 'SPLICE_F_MORE')) - self.assert_(hasattr(splice, 'SPLICE_F_GIFT')) + self.assertTrue(hasattr(splice, 'SPLICE_F_MOVE')) + self.assertTrue(hasattr(splice, 'SPLICE_F_NONBLOCK')) + self.assertTrue(hasattr(splice, 'SPLICE_F_MORE')) + self.assertTrue(hasattr(splice, 'SPLICE_F_GIFT')) @mock.patch('swift.common.splice.splice._c_splice', None) def test_available(self): diff --git a/test/unit/common/test_storage_policy.py b/test/unit/common/test_storage_policy.py index 6e3f217db0..c7c0453c17 100644 --- a/test/unit/common/test_storage_policy.py +++ b/test/unit/common/test_storage_policy.py @@ -12,12 +12,13 @@ # limitations under the License. """ Tests for swift.common.storage_policies """ +import six import unittest -import StringIO -from ConfigParser import ConfigParser import os import mock from functools import partial + +from six.moves.configparser import ConfigParser from tempfile import NamedTemporaryFile from test.unit import patch_policies, FakeRing, temptree from swift.common.storage_policy import ( @@ -46,7 +47,7 @@ class TestStoragePolicies(unittest.TestCase): def _conf(self, conf_str): conf_str = "\n".join(line.strip() for line in conf_str.split("\n")) conf = ConfigParser() - conf.readfp(StringIO.StringIO(conf_str)) + conf.readfp(six.StringIO(conf_str)) return conf def assertRaisesWithMessage(self, exc_class, message, f, *args, **kwargs): @@ -54,8 +55,8 @@ class TestStoragePolicies(unittest.TestCase): f(*args, **kwargs) except exc_class as err: err_msg = str(err) - self.assert_(message in err_msg, 'Error message %r did not ' - 'have expected substring %r' % (err_msg, message)) + self.assertTrue(message in err_msg, 'Error message %r did not ' + 'have expected substring %r' % (err_msg, message)) else: self.fail('%r did not raise %s' % (message, exc_class.__name__)) @@ -133,11 +134,11 @@ class TestStoragePolicies(unittest.TestCase): # test class functions default_policy = POLICIES.default - self.assert_(default_policy.is_default) + self.assertTrue(default_policy.is_default) zero_policy = POLICIES.get_by_index(0) - self.assert_(zero_policy.idx == 0) + self.assertTrue(zero_policy.idx == 0) zero_policy_by_name = POLICIES.get_by_name(zero_policy.name) - self.assert_(zero_policy_by_name.idx == 0) + self.assertTrue(zero_policy_by_name.idx == 0) def test_storage_policy_repr(self): test_policies = [StoragePolicy(0, 'aay', True), @@ -148,24 +149,25 @@ class TestStoragePolicies(unittest.TestCase): policies = StoragePolicyCollection(test_policies) for policy in policies: policy_repr = repr(policy) - self.assert_(policy.__class__.__name__ in policy_repr) - self.assert_('is_default=%s' % policy.is_default in policy_repr) - self.assert_('is_deprecated=%s' % policy.is_deprecated in - policy_repr) - self.assert_(policy.name in policy_repr) + self.assertTrue(policy.__class__.__name__ in policy_repr) + self.assertTrue('is_default=%s' % policy.is_default in policy_repr) + self.assertTrue('is_deprecated=%s' % policy.is_deprecated in + policy_repr) + self.assertTrue(policy.name in policy_repr) if policy.policy_type == EC_POLICY: - self.assert_('ec_type=%s' % policy.ec_type in policy_repr) - self.assert_('ec_ndata=%s' % policy.ec_ndata in policy_repr) - self.assert_('ec_nparity=%s' % - policy.ec_nparity in policy_repr) - self.assert_('ec_segment_size=%s' % - policy.ec_segment_size in policy_repr) + self.assertTrue('ec_type=%s' % policy.ec_type in policy_repr) + self.assertTrue('ec_ndata=%s' % policy.ec_ndata in policy_repr) + self.assertTrue('ec_nparity=%s' % + policy.ec_nparity in policy_repr) + self.assertTrue('ec_segment_size=%s' % + policy.ec_segment_size in policy_repr) collection_repr = repr(policies) collection_repr_lines = collection_repr.splitlines() - self.assert_(policies.__class__.__name__ in collection_repr_lines[0]) + self.assertTrue( + policies.__class__.__name__ in collection_repr_lines[0]) self.assertEqual(len(policies), len(collection_repr_lines[1:-1])) for policy, line in zip(policies, collection_repr_lines[1:-1]): - self.assert_(repr(policy) in line) + self.assertTrue(repr(policy) in line) with patch_policies(policies): self.assertEqual(repr(POLICIES), collection_repr) @@ -362,7 +364,7 @@ class TestStoragePolicies(unittest.TestCase): policies = parse_storage_policies(orig_conf) self.assertEqual(policies.default, policies[1]) - self.assert_(policies[0].name, 'Policy-0') + self.assertTrue(policies[0].name, 'Policy-0') bad_conf = self._conf(""" [storage-policy:0] @@ -388,7 +390,7 @@ class TestStoragePolicies(unittest.TestCase): policies = parse_storage_policies(good_conf) self.assertEqual(policies.default, policies[0]) - self.assert_(policies[1].is_deprecated, True) + self.assertTrue(policies[1].is_deprecated, True) def test_parse_storage_policies(self): # ValueError when deprecating policy 0 @@ -692,8 +694,9 @@ class TestStoragePolicies(unittest.TestCase): 'Duplicate index', ] for expected in parts: - self.assert_(expected in err_msg, '%s was not in %s' % (expected, - err_msg)) + self.assertTrue( + expected in err_msg, '%s was not in %s' % (expected, + err_msg)) def test_storage_policy_ordering(self): test_policies = StoragePolicyCollection([ @@ -727,7 +730,7 @@ class TestStoragePolicies(unittest.TestCase): ring = policies.get_object_ring(int(policy), '/path/not/used') self.assertEqual(ring.ring_name, policy.ring_name) self.assertTrue(policy.object_ring) - self.assert_(isinstance(policy.object_ring, NamedFakeRing)) + self.assertTrue(isinstance(policy.object_ring, NamedFakeRing)) def blow_up(*args, **kwargs): raise Exception('kaboom!') diff --git a/test/unit/common/test_swob.py b/test/unit/common/test_swob.py index 65764e38d9..96fa2ef974 100644 --- a/test/unit/common/test_swob.py +++ b/test/unit/common/test_swob.py @@ -19,9 +19,10 @@ import datetime import unittest import re import time -from StringIO import StringIO from urllib import quote +from six import BytesIO + import swift.common.swob from swift.common import utils, exceptions @@ -58,9 +59,9 @@ class TestHeaderEnvironProxy(unittest.TestCase): proxy['Content-Length'] = 20 proxy['Content-Type'] = 'text/plain' proxy['Something-Else'] = 'somevalue' - self.assert_('content-length' in proxy) - self.assert_('content-type' in proxy) - self.assert_('something-else' in proxy) + self.assertTrue('content-length' in proxy) + self.assertTrue('content-type' in proxy) + self.assertTrue('something-else' in proxy) def test_keys(self): environ = {} @@ -103,9 +104,9 @@ class TestHeaderKeyDict(unittest.TestCase): def test_del_contains(self): headers = swift.common.swob.HeaderKeyDict() headers['Content-Length'] = 0 - self.assert_('Content-Length' in headers) + self.assertTrue('Content-Length' in headers) del headers['Content-Length'] - self.assert_('Content-Length' not in headers) + self.assertTrue('Content-Length' not in headers) def test_update(self): headers = swift.common.swob.HeaderKeyDict() @@ -279,37 +280,37 @@ class TestRange(unittest.TestCase): 6. any combination of the above """ - self.assert_(_check_invalid_range('nonbytes=foobar,10-2')) - self.assert_(_check_invalid_range('bytes=5-3')) - self.assert_(_check_invalid_range('bytes=-')) - self.assert_(_check_invalid_range('bytes=45')) - self.assert_(_check_invalid_range('bytes=foo-bar,3-5')) - self.assert_(_check_invalid_range('bytes=4-10,45')) - self.assert_(_check_invalid_range('bytes=foobar,3-5')) - self.assert_(_check_invalid_range('bytes=nonumber-5')) - self.assert_(_check_invalid_range('bytes=nonumber')) + self.assertTrue(_check_invalid_range('nonbytes=foobar,10-2')) + self.assertTrue(_check_invalid_range('bytes=5-3')) + self.assertTrue(_check_invalid_range('bytes=-')) + self.assertTrue(_check_invalid_range('bytes=45')) + self.assertTrue(_check_invalid_range('bytes=foo-bar,3-5')) + self.assertTrue(_check_invalid_range('bytes=4-10,45')) + self.assertTrue(_check_invalid_range('bytes=foobar,3-5')) + self.assertTrue(_check_invalid_range('bytes=nonumber-5')) + self.assertTrue(_check_invalid_range('bytes=nonumber')) class TestMatch(unittest.TestCase): def test_match(self): match = swift.common.swob.Match('"a", "b"') self.assertEquals(match.tags, set(('a', 'b'))) - self.assert_('a' in match) - self.assert_('b' in match) - self.assert_('c' not in match) + self.assertTrue('a' in match) + self.assertTrue('b' in match) + self.assertTrue('c' not in match) def test_match_star(self): match = swift.common.swob.Match('"a", "*"') - self.assert_('a' in match) - self.assert_('b' in match) - self.assert_('c' in match) + self.assertTrue('a' in match) + self.assertTrue('b' in match) + self.assertTrue('c' in match) def test_match_noquote(self): match = swift.common.swob.Match('a, b') self.assertEquals(match.tags, set(('a', 'b'))) - self.assert_('a' in match) - self.assert_('b' in match) - self.assert_('c' not in match) + self.assertTrue('a' in match) + self.assertTrue('b' in match) + self.assertTrue('c' not in match) class TestAccept(unittest.TestCase): @@ -430,8 +431,8 @@ class TestRequest(unittest.TestCase): self.assertEquals("got unexpected keyword argument 'params'", str(e)) else: - self.assert_(False, "invalid req_environ_property " - "didn't raise error!") + self.assertTrue(False, "invalid req_environ_property " + "didn't raise error!") # regular attribute try: swift.common.swob.Request.blank('/', _params_cache={'a': 'b'}) @@ -439,8 +440,8 @@ class TestRequest(unittest.TestCase): self.assertEquals("got unexpected keyword " "argument '_params_cache'", str(e)) else: - self.assert_(False, "invalid req_environ_property " - "didn't raise error!") + self.assertTrue(False, "invalid req_environ_property " + "didn't raise error!") # non-existent attribute try: swift.common.swob.Request.blank('/', params_cache={'a': 'b'}) @@ -448,8 +449,8 @@ class TestRequest(unittest.TestCase): self.assertEquals("got unexpected keyword " "argument 'params_cache'", str(e)) else: - self.assert_(False, "invalid req_environ_property " - "didn't raise error!") + self.assertTrue(False, "invalid req_environ_property " + "didn't raise error!") # method try: swift.common.swob.Request.blank( @@ -458,8 +459,8 @@ class TestRequest(unittest.TestCase): self.assertEquals("got unexpected keyword " "argument 'as_referer'", str(e)) else: - self.assert_(False, "invalid req_environ_property " - "didn't raise error!") + self.assertTrue(False, "invalid req_environ_property " + "didn't raise error!") def test_blank_path_info_precedence(self): blank = swift.common.swob.Request.blank @@ -475,22 +476,22 @@ class TestRequest(unittest.TestCase): def test_blank_body_precedence(self): req = swift.common.swob.Request.blank( '/', environ={'REQUEST_METHOD': 'POST', - 'wsgi.input': StringIO('')}, + 'wsgi.input': BytesIO(b'')}, headers={'Content-Type': 'text/plain'}, body='hi') self.assertEquals(req.path_info, '/') self.assertEquals(req.body, 'hi') self.assertEquals(req.headers['Content-Type'], 'text/plain') self.assertEquals(req.method, 'POST') - body_file = StringIO('asdf') + body_file = BytesIO(b'asdf') req = swift.common.swob.Request.blank( '/', environ={'REQUEST_METHOD': 'POST', - 'wsgi.input': StringIO('')}, + 'wsgi.input': BytesIO(b'')}, headers={'Content-Type': 'text/plain'}, body='hi', body_file=body_file) - self.assert_(req.body_file is body_file) + self.assertTrue(req.body_file is body_file) req = swift.common.swob.Request.blank( '/', environ={'REQUEST_METHOD': 'POST', - 'wsgi.input': StringIO('')}, + 'wsgi.input': BytesIO(b'')}, headers={'Content-Type': 'text/plain'}, body='hi', content_length=3) self.assertEquals(req.content_length, 3) @@ -595,12 +596,12 @@ class TestRequest(unittest.TestCase): # No request environment resp = swift.common.swob.HTTPUnauthorized() self.assertEquals(resp.status_int, 401) - self.assert_('Www-Authenticate' in resp.headers) + self.assertTrue('Www-Authenticate' in resp.headers) # Request environment req = swift.common.swob.Request.blank('/') resp = swift.common.swob.HTTPUnauthorized(request=req) self.assertEquals(resp.status_int, 401) - self.assert_('Www-Authenticate' in resp.headers) + self.assertTrue('Www-Authenticate' in resp.headers) def test_401_valid_account_path(self): @@ -612,7 +613,7 @@ class TestRequest(unittest.TestCase): req = swift.common.swob.Request.blank('/v1/account-name') resp = req.get_response(test_app) self.assertEquals(resp.status_int, 401) - self.assert_('Www-Authenticate' in resp.headers) + self.assertTrue('Www-Authenticate' in resp.headers) self.assertEquals('Swift realm="account-name"', resp.headers['Www-Authenticate']) @@ -620,7 +621,7 @@ class TestRequest(unittest.TestCase): req = swift.common.swob.Request.blank('/v1/account-name/c') resp = req.get_response(test_app) self.assertEquals(resp.status_int, 401) - self.assert_('Www-Authenticate' in resp.headers) + self.assertTrue('Www-Authenticate' in resp.headers) self.assertEquals('Swift realm="account-name"', resp.headers['Www-Authenticate']) @@ -634,7 +635,7 @@ class TestRequest(unittest.TestCase): req = swift.common.swob.Request.blank('/random') resp = req.get_response(test_app) self.assertEquals(resp.status_int, 401) - self.assert_('Www-Authenticate' in resp.headers) + self.assertTrue('Www-Authenticate' in resp.headers) self.assertEquals('Swift realm="unknown"', resp.headers['Www-Authenticate']) @@ -648,7 +649,7 @@ class TestRequest(unittest.TestCase): req = swift.common.swob.Request.blank('/v1.0/auth') resp = req.get_response(test_app) self.assertEquals(resp.status_int, 401) - self.assert_('Www-Authenticate' in resp.headers) + self.assertTrue('Www-Authenticate' in resp.headers) self.assertEquals('Swift realm="unknown"', resp.headers['Www-Authenticate']) @@ -656,7 +657,7 @@ class TestRequest(unittest.TestCase): req = swift.common.swob.Request.blank('/auth/v1.0') resp = req.get_response(test_app) self.assertEquals(resp.status_int, 401) - self.assert_('Www-Authenticate' in resp.headers) + self.assertTrue('Www-Authenticate' in resp.headers) self.assertEquals('Swift realm="unknown"', resp.headers['Www-Authenticate']) @@ -671,7 +672,7 @@ class TestRequest(unittest.TestCase): req = swift.common.swob.Request.blank('/auth/v1.0') resp = req.get_response(test_app) self.assertEquals(resp.status_int, 401) - self.assert_('Www-Authenticate' in resp.headers) + self.assertTrue('Www-Authenticate' in resp.headers) self.assertEquals('Me realm="whatever"', resp.headers['Www-Authenticate']) @@ -686,14 +687,14 @@ class TestRequest(unittest.TestCase): req = swift.common.swob.Request.blank('/v1/' + hacker) resp = req.get_response(test_app) self.assertEquals(resp.status_int, 401) - self.assert_('Www-Authenticate' in resp.headers) + self.assertTrue('Www-Authenticate' in resp.headers) self.assertEquals('Swift realm="%s"' % quoted_hacker, resp.headers['Www-Authenticate']) req = swift.common.swob.Request.blank('/v1/' + quoted_hacker) resp = req.get_response(test_app) self.assertEquals(resp.status_int, 401) - self.assert_('Www-Authenticate' in resp.headers) + self.assertTrue('Www-Authenticate' in resp.headers) self.assertEquals('Swift realm="%s"' % quoted_hacker, resp.headers['Www-Authenticate']) @@ -706,7 +707,7 @@ class TestRequest(unittest.TestCase): req = swift.common.swob.Request.blank('/') resp = req.get_response(test_app) - self.assert_('Www-Authenticate' not in resp.headers) + self.assertTrue('Www-Authenticate' not in resp.headers) def test_properties(self): req = swift.common.swob.Request.blank('/hi/there', body='hi') @@ -724,15 +725,15 @@ class TestRequest(unittest.TestCase): req.range = 'bytes=1-7' self.assertEquals(req.range.ranges[0], (1, 7)) - self.assert_('Range' in req.headers) + self.assertTrue('Range' in req.headers) req.range = None - self.assert_('Range' not in req.headers) + self.assertTrue('Range' not in req.headers) def test_datetime_properties(self): req = swift.common.swob.Request.blank('/hi/there', body='hi') req.if_unmodified_since = 0 - self.assert_(isinstance(req.if_unmodified_since, datetime.datetime)) + self.assertTrue(isinstance(req.if_unmodified_since, datetime.datetime)) if_unmodified_since = req.if_unmodified_since req.if_unmodified_since = if_unmodified_since self.assertEquals(if_unmodified_since, req.if_unmodified_since) @@ -741,9 +742,9 @@ class TestRequest(unittest.TestCase): self.assertEquals(req.headers['If-Unmodified-Since'], 'something') self.assertEquals(req.if_unmodified_since, None) - self.assert_('If-Unmodified-Since' in req.headers) + self.assertTrue('If-Unmodified-Since' in req.headers) req.if_unmodified_since = None - self.assert_('If-Unmodified-Since' not in req.headers) + self.assertTrue('If-Unmodified-Since' not in req.headers) too_big_date_list = list(datetime.datetime.max.timetuple()) too_big_date_list[0] += 1 # bump up the year @@ -991,11 +992,11 @@ class TestStatusMap(unittest.TestCase): self.assertEquals(resp.status_int, 404) self.assertEquals(resp.title, 'Not Found') body = ''.join(resp({}, start_response)) - self.assert_('The resource could not be found.' in body) + self.assertTrue('The resource could not be found.' in body) self.assertEquals(response_args[0], '404 Not Found') headers = dict(response_args[1]) self.assertEquals(headers['Content-Type'], 'text/html; charset=UTF-8') - self.assert_(int(headers['Content-Length']) > 0) + self.assertTrue(int(headers['Content-Length']) > 0) class TestResponse(unittest.TestCase): @@ -1012,14 +1013,14 @@ class TestResponse(unittest.TestCase): resp.location = 'something' self.assertEquals(resp.location, 'something') - self.assert_('Location' in resp.headers) + self.assertTrue('Location' in resp.headers) resp.location = None - self.assert_('Location' not in resp.headers) + self.assertTrue('Location' not in resp.headers) resp.content_type = 'text/plain' - self.assert_('Content-Type' in resp.headers) + self.assertTrue('Content-Type' in resp.headers) resp.content_type = None - self.assert_('Content-Type' not in resp.headers) + self.assertTrue('Content-Type' not in resp.headers) def test_empty_body(self): resp = self._get_response() @@ -1208,19 +1209,19 @@ class TestResponse(unittest.TestCase): ('0123456789112345678' '92123456789'))) - self.assert_(re.match(('--[a-f0-9]{32}\r\n' - 'Content-Type: text/plain\r\n' - 'Content-Range: bytes ' - '0-9/100\r\n\r\n0123456789\r\n' - '--[a-f0-9]{32}\r\n' - 'Content-Type: text/plain\r\n' - 'Content-Range: bytes ' - '10-19/100\r\n\r\n1123456789\r\n' - '--[a-f0-9]{32}\r\n' - 'Content-Type: text/plain\r\n' - 'Content-Range: bytes ' - '20-29/100\r\n\r\n2123456789\r\n' - '--[a-f0-9]{32}--'), content)) + self.assertTrue(re.match(('--[a-f0-9]{32}\r\n' + 'Content-Type: text/plain\r\n' + 'Content-Range: bytes ' + '0-9/100\r\n\r\n0123456789\r\n' + '--[a-f0-9]{32}\r\n' + 'Content-Type: text/plain\r\n' + 'Content-Range: bytes ' + '10-19/100\r\n\r\n1123456789\r\n' + '--[a-f0-9]{32}\r\n' + 'Content-Type: text/plain\r\n' + 'Content-Range: bytes ' + '20-29/100\r\n\r\n2123456789\r\n' + '--[a-f0-9]{32}--'), content)) def test_multi_response_iter(self): def test_app(environ, start_response): @@ -1337,9 +1338,9 @@ class TestResponse(unittest.TestCase): self.assertEquals(resp.headers['Etag'], '"hi"') self.assertEquals(resp.etag, 'hi') - self.assert_('etag' in resp.headers) + self.assertTrue('etag' in resp.headers) resp.etag = None - self.assert_('etag' not in resp.headers) + self.assertTrue('etag' not in resp.headers) def test_host_url_default(self): resp = self._get_response() diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index be98ed8cfb..653d939cce 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -14,7 +14,7 @@ # limitations under the License. """Tests for swift.common.utils""" - +from __future__ import print_function from test.unit import temptree import ctypes @@ -28,13 +28,15 @@ import os import mock import random import re -from six.moves import range import socket import stat import sys import json import math +from six import StringIO +from six.moves.queue import Queue, Empty +from six.moves import range from textwrap import dedent import tempfile @@ -46,10 +48,8 @@ import fcntl import shutil from contextlib import nested -from Queue import Queue, Empty from getpass import getuser from shutil import rmtree -from StringIO import StringIO from functools import partial from tempfile import TemporaryFile, NamedTemporaryFile, mkdtemp from netifaces import AF_INET6 @@ -944,23 +944,23 @@ class TestUtils(unittest.TestCase): testdir_base = mkdtemp() testroot = os.path.join(testdir_base, 'mkdirs') try: - self.assert_(not os.path.exists(testroot)) + self.assertTrue(not os.path.exists(testroot)) utils.mkdirs(testroot) - self.assert_(os.path.exists(testroot)) + self.assertTrue(os.path.exists(testroot)) utils.mkdirs(testroot) - self.assert_(os.path.exists(testroot)) + self.assertTrue(os.path.exists(testroot)) rmtree(testroot, ignore_errors=1) testdir = os.path.join(testroot, 'one/two/three') - self.assert_(not os.path.exists(testdir)) + self.assertTrue(not os.path.exists(testdir)) utils.mkdirs(testdir) - self.assert_(os.path.exists(testdir)) + self.assertTrue(os.path.exists(testdir)) utils.mkdirs(testdir) - self.assert_(os.path.exists(testdir)) + self.assertTrue(os.path.exists(testdir)) rmtree(testroot, ignore_errors=1) open(testroot, 'wb').close() - self.assert_(not os.path.exists(testdir)) + self.assertTrue(not os.path.exists(testdir)) self.assertRaises(OSError, utils.mkdirs, testdir) os.unlink(testroot) finally: @@ -1047,22 +1047,22 @@ class TestUtils(unittest.TestCase): lfo_stdout = utils.LoggerFileObject(logger) lfo_stderr = utils.LoggerFileObject(logger) lfo_stderr = utils.LoggerFileObject(logger, 'STDERR') - print 'test1' + print('test1') self.assertEquals(sio.getvalue(), '') sys.stdout = lfo_stdout - print 'test2' + print('test2') self.assertEquals(sio.getvalue(), 'STDOUT: test2\n') sys.stderr = lfo_stderr - print >> sys.stderr, 'test4' + print('test4', file=sys.stderr) self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n') sys.stdout = orig_stdout - print 'test5' + print('test5') self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n') - print >> sys.stderr, 'test6' + print('test6', file=sys.stderr) self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n' 'STDERR: test6\n') sys.stderr = orig_stderr - print 'test8' + print('test8') self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n' 'STDERR: test6\n') lfo_stdout.writelines(['a', 'b', 'c']) @@ -1083,14 +1083,14 @@ class TestUtils(unittest.TestCase): pass except Exception: got_exc = True - self.assert_(got_exc) + self.assertTrue(got_exc) got_exc = False try: - for line in lfo.xreadlines(): + for line in lfo: pass except Exception: got_exc = True - self.assert_(got_exc) + self.assertTrue(got_exc) self.assertRaises(IOError, lfo.read) self.assertRaises(IOError, lfo.read, 1024) self.assertRaises(IOError, lfo.readline) @@ -1105,7 +1105,7 @@ class TestUtils(unittest.TestCase): self.assertEquals(conf, conf_file) # assert defaults self.assertEquals(options['verbose'], False) - self.assert_('once' not in options) + self.assertTrue('once' not in options) # assert verbose as option conf, options = utils.parse_options(test_args=[conf_file, '-v']) self.assertEquals(options['verbose'], True) @@ -1132,14 +1132,14 @@ class TestUtils(unittest.TestCase): utils.sys.stderr = stde self.assertRaises(SystemExit, utils.parse_options, once=True, test_args=[]) - self.assert_('missing config' in stdo.getvalue()) + self.assertTrue('missing config' in stdo.getvalue()) # verify conf file must exist, context manager will delete temp file with NamedTemporaryFile() as f: conf_file = f.name self.assertRaises(SystemExit, utils.parse_options, once=True, test_args=[conf_file]) - self.assert_('unable to locate' in stdo.getvalue()) + self.assertTrue('unable to locate' in stdo.getvalue()) # reset stdio utils.sys.stdout = orig_stdout @@ -1294,60 +1294,60 @@ class TestUtils(unittest.TestCase): for en in (errno.EIO, errno.ENOSPC): log_exception(OSError(en, 'my %s error message' % en)) log_msg = strip_value(sio) - self.assert_('Traceback' not in log_msg) - self.assert_('my %s error message' % en in log_msg) + self.assertTrue('Traceback' not in log_msg) + self.assertTrue('my %s error message' % en in log_msg) # unfiltered log_exception(OSError()) - self.assert_('Traceback' in strip_value(sio)) + self.assertTrue('Traceback' in strip_value(sio)) # test socket.error log_exception(socket.error(errno.ECONNREFUSED, 'my error message')) log_msg = strip_value(sio) - self.assert_('Traceback' not in log_msg) - self.assert_('errno.ECONNREFUSED message test' not in log_msg) - self.assert_('Connection refused' in log_msg) + self.assertTrue('Traceback' not in log_msg) + self.assertTrue('errno.ECONNREFUSED message test' not in log_msg) + self.assertTrue('Connection refused' in log_msg) log_exception(socket.error(errno.EHOSTUNREACH, 'my error message')) log_msg = strip_value(sio) - self.assert_('Traceback' not in log_msg) - self.assert_('my error message' not in log_msg) - self.assert_('Host unreachable' in log_msg) + self.assertTrue('Traceback' not in log_msg) + self.assertTrue('my error message' not in log_msg) + self.assertTrue('Host unreachable' in log_msg) log_exception(socket.error(errno.ETIMEDOUT, 'my error message')) log_msg = strip_value(sio) - self.assert_('Traceback' not in log_msg) - self.assert_('my error message' not in log_msg) - self.assert_('Connection timeout' in log_msg) + self.assertTrue('Traceback' not in log_msg) + self.assertTrue('my error message' not in log_msg) + self.assertTrue('Connection timeout' in log_msg) # unfiltered log_exception(socket.error(0, 'my error message')) log_msg = strip_value(sio) - self.assert_('Traceback' in log_msg) - self.assert_('my error message' in log_msg) + self.assertTrue('Traceback' in log_msg) + self.assertTrue('my error message' in log_msg) # test eventlet.Timeout connection_timeout = ConnectionTimeout(42, 'my error message') log_exception(connection_timeout) log_msg = strip_value(sio) - self.assert_('Traceback' not in log_msg) - self.assert_('ConnectionTimeout' in log_msg) - self.assert_('(42s)' in log_msg) - self.assert_('my error message' not in log_msg) + self.assertTrue('Traceback' not in log_msg) + self.assertTrue('ConnectionTimeout' in log_msg) + self.assertTrue('(42s)' in log_msg) + self.assertTrue('my error message' not in log_msg) connection_timeout.cancel() message_timeout = MessageTimeout(42, 'my error message') log_exception(message_timeout) log_msg = strip_value(sio) - self.assert_('Traceback' not in log_msg) - self.assert_('MessageTimeout' in log_msg) - self.assert_('(42s)' in log_msg) - self.assert_('my error message' in log_msg) + self.assertTrue('Traceback' not in log_msg) + self.assertTrue('MessageTimeout' in log_msg) + self.assertTrue('(42s)' in log_msg) + self.assertTrue('my error message' in log_msg) message_timeout.cancel() # test unhandled log_exception(Exception('my error message')) log_msg = strip_value(sio) - self.assert_('Traceback' in log_msg) - self.assert_('my error message' in log_msg) + self.assertTrue('Traceback' in log_msg) + self.assertTrue('my error message' in log_msg) finally: logger.logger.removeHandler(handler) @@ -1425,19 +1425,19 @@ class TestUtils(unittest.TestCase): self.assertFalse(logger.txn_id) logger.error('my error message') log_msg = strip_value(sio) - self.assert_('my error message' in log_msg) - self.assert_('txn' not in log_msg) + self.assertTrue('my error message' in log_msg) + self.assertTrue('txn' not in log_msg) logger.txn_id = '12345' logger.error('test') log_msg = strip_value(sio) - self.assert_('txn' in log_msg) - self.assert_('12345' in log_msg) + self.assertTrue('txn' in log_msg) + self.assertTrue('12345' in log_msg) # test no txn on info message self.assertEquals(logger.txn_id, '12345') logger.info('test') log_msg = strip_value(sio) - self.assert_('txn' not in log_msg) - self.assert_('12345' not in log_msg) + self.assertTrue('txn' not in log_msg) + self.assertTrue('12345' not in log_msg) # test txn already in message self.assertEquals(logger.txn_id, '12345') logger.warn('test 12345 test') @@ -1445,25 +1445,25 @@ class TestUtils(unittest.TestCase): # Test multi line collapsing logger.error('my\nerror\nmessage') log_msg = strip_value(sio) - self.assert_('my#012error#012message' in log_msg) + self.assertTrue('my#012error#012message' in log_msg) # test client_ip self.assertFalse(logger.client_ip) logger.error('my error message') log_msg = strip_value(sio) - self.assert_('my error message' in log_msg) - self.assert_('client_ip' not in log_msg) + self.assertTrue('my error message' in log_msg) + self.assertTrue('client_ip' not in log_msg) logger.client_ip = '1.2.3.4' logger.error('test') log_msg = strip_value(sio) - self.assert_('client_ip' in log_msg) - self.assert_('1.2.3.4' in log_msg) + self.assertTrue('client_ip' in log_msg) + self.assertTrue('1.2.3.4' in log_msg) # test no client_ip on info message self.assertEquals(logger.client_ip, '1.2.3.4') logger.info('test') log_msg = strip_value(sio) - self.assert_('client_ip' not in log_msg) - self.assert_('1.2.3.4' not in log_msg) + self.assertTrue('client_ip' not in log_msg) + self.assertTrue('1.2.3.4' not in log_msg) # test client_ip (and txn) already in message self.assertEquals(logger.client_ip, '1.2.3.4') logger.warn('test 1.2.3.4 test 12345') @@ -1486,8 +1486,8 @@ class TestUtils(unittest.TestCase): def test_whataremyips(self): myips = utils.whataremyips() - self.assert_(len(myips) > 1) - self.assert_('127.0.0.1' in myips) + self.assertTrue(len(myips) > 1) + self.assertTrue('127.0.0.1' in myips) def test_whataremyips_bind_to_all(self): for any_addr in ('0.0.0.0', '0000:0000:0000:0000:0000:0000:0000:0000', @@ -1495,8 +1495,8 @@ class TestUtils(unittest.TestCase): # Wacky parse-error input produces all IPs 'I am a bear'): myips = utils.whataremyips(any_addr) - self.assert_(len(myips) > 1) - self.assert_('127.0.0.1' in myips) + self.assertTrue(len(myips) > 1) + self.assertTrue('127.0.0.1' in myips) def test_whataremyips_bind_ip_specific(self): self.assertEqual(['1.2.3.4'], utils.whataremyips('1.2.3.4')) @@ -1556,9 +1556,9 @@ class TestUtils(unittest.TestCase): utils.HASH_PATH_PREFIX = _prefix def test_load_libc_function(self): - self.assert_(callable( + self.assertTrue(callable( utils.load_libc_function('printf'))) - self.assert_(callable( + self.assertTrue(callable( utils.load_libc_function('some_not_real_function'))) self.assertRaises(AttributeError, utils.load_libc_function, 'some_not_real_function', @@ -1721,7 +1721,7 @@ log_name = %(yarr)s''' # exercise the code utils.drop_privileges(user) for func in required_func_calls: - self.assert_(utils.os.called_funcs[func]) + self.assertTrue(utils.os.called_funcs[func]) import pwd self.assertEquals(pwd.getpwnam(user)[5], utils.os.environ['HOME']) @@ -1736,7 +1736,7 @@ log_name = %(yarr)s''' self.assertFalse(utils.os.called_funcs.get(func, False)) utils.drop_privileges(user) for func in required_func_calls: - self.assert_(utils.os.called_funcs[func]) + self.assertTrue(utils.os.called_funcs[func]) def test_drop_privileges_no_call_setsid(self): user = getuser() @@ -1749,9 +1749,9 @@ log_name = %(yarr)s''' # exercise the code utils.drop_privileges(user, call_setsid=False) for func in required_func_calls: - self.assert_(utils.os.called_funcs[func]) + self.assertTrue(utils.os.called_funcs[func]) for func in bad_func_calls: - self.assert_(func not in utils.os.called_funcs) + self.assertTrue(func not in utils.os.called_funcs) @reset_logger_state def test_capture_stdio(self): @@ -1767,10 +1767,12 @@ log_name = %(yarr)s''' # basic test utils.capture_stdio(logger) - self.assert_(utils.sys.excepthook is not None) + self.assertTrue(utils.sys.excepthook is not None) self.assertEquals(utils.os.closed_fds, utils.sys.stdio_fds) - self.assert_(isinstance(utils.sys.stdout, utils.LoggerFileObject)) - self.assert_(isinstance(utils.sys.stderr, utils.LoggerFileObject)) + self.assertTrue( + isinstance(utils.sys.stdout, utils.LoggerFileObject)) + self.assertTrue( + isinstance(utils.sys.stderr, utils.LoggerFileObject)) # reset; test same args, but exc when trying to close stdio utils.os = MockOs(raise_funcs=('dup2',)) @@ -1778,10 +1780,12 @@ log_name = %(yarr)s''' # test unable to close stdio utils.capture_stdio(logger) - self.assert_(utils.sys.excepthook is not None) + self.assertTrue(utils.sys.excepthook is not None) self.assertEquals(utils.os.closed_fds, []) - self.assert_(isinstance(utils.sys.stdout, utils.LoggerFileObject)) - self.assert_(isinstance(utils.sys.stderr, utils.LoggerFileObject)) + self.assertTrue( + isinstance(utils.sys.stdout, utils.LoggerFileObject)) + self.assertTrue( + isinstance(utils.sys.stderr, utils.LoggerFileObject)) # reset; test some other args utils.os = MockOs() @@ -1791,7 +1795,7 @@ log_name = %(yarr)s''' # test console log utils.capture_stdio(logger, capture_stdout=False, capture_stderr=False) - self.assert_(utils.sys.excepthook is not None) + self.assertTrue(utils.sys.excepthook is not None) # when logging to console, stderr remains open self.assertEquals(utils.os.closed_fds, utils.sys.stdio_fds[:2]) reset_loggers() @@ -1814,7 +1818,7 @@ log_name = %(yarr)s''' logger = utils.get_logger(None, log_to_console=True) console_handlers = [h for h in logger.logger.handlers if isinstance(h, logging.StreamHandler)] - self.assert_(console_handlers) + self.assertTrue(console_handlers) # make sure you can't have two console handlers self.assertEquals(len(console_handlers), 1) old_handler = console_handlers[0] @@ -1970,7 +1974,7 @@ log_name = %(yarr)s''' f3 = os.path.join(t, 'folder/sub/2.txt') f4 = os.path.join(t, 'folder2/3.txt') for f in [f1, f2, f3, f4]: - self.assert_(f in folder_texts) + self.assertTrue(f in folder_texts) def test_search_tree_with_directory_ext_match(self): files = ( @@ -1990,7 +1994,7 @@ log_name = %(yarr)s''' self.assertEquals(len(conf_dirs), 4) for i in range(4): conf_dir = os.path.join(t, 'object-server/%d.conf.d' % (i + 1)) - self.assert_(conf_dir in conf_dirs) + self.assertTrue(conf_dir in conf_dirs) def test_search_tree_conf_dir_with_named_conf_match(self): files = ( @@ -2046,7 +2050,7 @@ log_name = %(yarr)s''' self.assertEquals(utils.remove_file(file_name), None) with open(file_name, 'w') as f: f.write('1') - self.assert_(os.path.exists(file_name)) + self.assertTrue(os.path.exists(file_name)) self.assertEquals(utils.remove_file(file_name), None) self.assertFalse(os.path.exists(file_name)) @@ -2541,8 +2545,8 @@ cluster_dfw1 = http://dfw1.host/v1/ pass except LockTimeout: timedout = True - self.assert_(timedout) - self.assert_(os.path.exists(nt.name)) + self.assertTrue(timedout) + self.assertTrue(os.path.exists(nt.name)) def test_ismount_path_does_not_exist(self): tmpdir = mkdtemp() @@ -3438,8 +3442,8 @@ class TestStatsdLogging(unittest.TestCase): logger = utils.get_logger({'log_statsd_host': 'some.host.com'}, 'some-name', log_route='some-route') # white-box construction validation - self.assert_(isinstance(logger.logger.statsd_client, - utils.StatsdClient)) + self.assertTrue(isinstance(logger.logger.statsd_client, + utils.StatsdClient)) self.assertEqual(logger.logger.statsd_client._host, 'some.host.com') self.assertEqual(logger.logger.statsd_client._port, 8125) self.assertEqual(logger.logger.statsd_client._prefix, 'some-name.') @@ -3563,35 +3567,35 @@ class TestStatsdLogging(unittest.TestCase): self.assertEquals(mock_controller.called, 'timing') self.assertEquals(len(mock_controller.args), 2) self.assertEquals(mock_controller.args[0], 'METHOD.timing') - self.assert_(mock_controller.args[1] > 0) + self.assertTrue(mock_controller.args[1] > 0) mock_controller = MockController(404) METHOD(mock_controller) self.assertEquals(len(mock_controller.args), 2) self.assertEquals(mock_controller.called, 'timing') self.assertEquals(mock_controller.args[0], 'METHOD.timing') - self.assert_(mock_controller.args[1] > 0) + self.assertTrue(mock_controller.args[1] > 0) mock_controller = MockController(412) METHOD(mock_controller) self.assertEquals(len(mock_controller.args), 2) self.assertEquals(mock_controller.called, 'timing') self.assertEquals(mock_controller.args[0], 'METHOD.timing') - self.assert_(mock_controller.args[1] > 0) + self.assertTrue(mock_controller.args[1] > 0) mock_controller = MockController(416) METHOD(mock_controller) self.assertEquals(len(mock_controller.args), 2) self.assertEquals(mock_controller.called, 'timing') self.assertEquals(mock_controller.args[0], 'METHOD.timing') - self.assert_(mock_controller.args[1] > 0) + self.assertTrue(mock_controller.args[1] > 0) mock_controller = MockController(401) METHOD(mock_controller) self.assertEquals(len(mock_controller.args), 2) self.assertEquals(mock_controller.called, 'timing') self.assertEquals(mock_controller.args[0], 'METHOD.errors.timing') - self.assert_(mock_controller.args[1] > 0) + self.assertTrue(mock_controller.args[1] > 0) class UnsafeXrange(object): @@ -3649,14 +3653,14 @@ class TestAffinityKeyFunction(unittest.TestCase): def test_empty_value(self): # Empty's okay, it just means no preference keyfn = utils.affinity_key_function("") - self.assert_(callable(keyfn)) + self.assertTrue(callable(keyfn)) ids = [n['id'] for n in sorted(self.nodes, key=keyfn)] self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids) def test_all_whitespace_value(self): # Empty's okay, it just means no preference keyfn = utils.affinity_key_function(" \n") - self.assert_(callable(keyfn)) + self.assertTrue(callable(keyfn)) ids = [n['id'] for n in sorted(self.nodes, key=keyfn)] self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids) @@ -3689,23 +3693,23 @@ class TestAffinityLocalityPredicate(unittest.TestCase): def test_empty(self): pred = utils.affinity_locality_predicate('') - self.assert_(pred is None) + self.assertTrue(pred is None) def test_region(self): pred = utils.affinity_locality_predicate('r1') - self.assert_(callable(pred)) + self.assertTrue(callable(pred)) ids = [n['id'] for n in self.nodes if pred(n)] self.assertEqual([0, 1], ids) def test_zone(self): pred = utils.affinity_locality_predicate('r1z1') - self.assert_(callable(pred)) + self.assertTrue(callable(pred)) ids = [n['id'] for n in self.nodes if pred(n)] self.assertEqual([0], ids) def test_multiple(self): pred = utils.affinity_locality_predicate('r1, r3, r4z0') - self.assert_(callable(pred)) + self.assertTrue(callable(pred)) ids = [n['id'] for n in self.nodes if pred(n)] self.assertEqual([0, 1, 4, 5, 6], ids) @@ -3865,8 +3869,8 @@ class TestStatsdLoggingDelegation(unittest.TestCase): def assertStatMatches(self, expected_regexp, sender_fn, *args, **kwargs): got = self._send_and_get(sender_fn, *args, **kwargs) - return self.assert_(re.search(expected_regexp, got), - [got, expected_regexp]) + return self.assertTrue(re.search(expected_regexp, got), + [got, expected_regexp]) def test_methods_are_no_ops_when_not_enabled(self): logger = utils.get_logger({ @@ -4228,7 +4232,7 @@ class TestThreadPool(unittest.TestCase): except ZeroDivisionError: # NB: format is (filename, line number, function name, text) tb_func = [elem[2] for elem - in traceback.extract_tb(sys.exc_traceback)] + in traceback.extract_tb(sys.exc_info()[2])] else: self.fail("Expected ZeroDivisionError") @@ -4294,7 +4298,7 @@ class TestAuditLocationGenerator(unittest.TestCase): else: return orig_listdir(path) - #Check Raise on Bad partition + # Check Raise on Bad partition tmpdir = mkdtemp() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) @@ -4311,7 +4315,7 @@ class TestAuditLocationGenerator(unittest.TestCase): self.assertRaises(OSError, audit) rmtree(tmpdir) - #Check Raise on Bad Suffix + # Check Raise on Bad Suffix tmpdir = mkdtemp() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) @@ -4330,7 +4334,7 @@ class TestAuditLocationGenerator(unittest.TestCase): self.assertRaises(OSError, audit) rmtree(tmpdir) - #Check Raise on Bad Hash + # Check Raise on Bad Hash tmpdir = mkdtemp() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) @@ -4354,14 +4358,14 @@ class TestAuditLocationGenerator(unittest.TestCase): logger = FakeLogger() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) - #Create a file, that represents a non-dir drive + # Create a file, that represents a non-dir drive open(os.path.join(tmpdir, 'asdf'), 'w') locations = utils.audit_location_generator( tmpdir, "data", mount_check=False, logger=logger ) self.assertEqual(list(locations), []) self.assertEqual(1, len(logger.get_lines_for_level('warning'))) - #Test without the logger + # Test without the logger locations = utils.audit_location_generator( tmpdir, "data", mount_check=False ) @@ -4372,7 +4376,7 @@ class TestAuditLocationGenerator(unittest.TestCase): logger = FakeLogger() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) - #Create a file, that represents a non-dir drive + # Create a file, that represents a non-dir drive open(os.path.join(tmpdir, 'asdf'), 'w') locations = utils.audit_location_generator( tmpdir, "data", mount_check=True, logger=logger @@ -4380,7 +4384,7 @@ class TestAuditLocationGenerator(unittest.TestCase): self.assertEqual(list(locations), []) self.assertEqual(2, len(logger.get_lines_for_level('warning'))) - #Test without the logger + # Test without the logger locations = utils.audit_location_generator( tmpdir, "data", mount_check=True ) @@ -4412,7 +4416,7 @@ class TestAuditLocationGenerator(unittest.TestCase): logger = FakeLogger() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) - #Create a file, that represents a non-dir drive + # Create a file, that represents a non-dir drive open(os.path.join(tmpdir, 'asdf'), 'w') partition = os.path.join(data, "partition1") os.makedirs(partition) @@ -4526,6 +4530,22 @@ class TestGreenAsyncPile(unittest.TestCase): self.assertEqual(pile.waitall(0.5), [0.1, 0.1]) self.assertEqual(completed[0], 2) + def test_pending(self): + pile = utils.GreenAsyncPile(3) + self.assertEqual(0, pile._pending) + for repeats in range(2): + # repeat to verify that pending will go again up after going down + for i in range(4): + pile.spawn(lambda: i) + self.assertEqual(4, pile._pending) + for i in range(3, -1, -1): + pile.next() + self.assertEqual(i, pile._pending) + # sanity check - the pile is empty + self.assertRaises(StopIteration, pile.next) + # pending remains 0 + self.assertEqual(0, pile._pending) + class TestLRUCache(unittest.TestCase): diff --git a/test/unit/common/test_wsgi.py b/test/unit/common/test_wsgi.py index 27e39206c2..1e786e273c 100644 --- a/test/unit/common/test_wsgi.py +++ b/test/unit/common/test_wsgi.py @@ -23,11 +23,12 @@ import unittest import os from textwrap import dedent from contextlib import nested -from StringIO import StringIO from collections import defaultdict from urllib import quote from eventlet import listen +from six import BytesIO +from six import StringIO import mock @@ -130,19 +131,24 @@ class TestWSGI(unittest.TestCase): conf_file, 'proxy-server') # verify pipeline is catch_errors -> dlo -> proxy-server expected = swift.common.middleware.catch_errors.CatchErrorMiddleware - self.assert_(isinstance(app, expected)) + self.assertTrue(isinstance(app, expected)) app = app.app expected = swift.common.middleware.gatekeeper.GatekeeperMiddleware - self.assert_(isinstance(app, expected)) + self.assertTrue(isinstance(app, expected)) app = app.app expected = swift.common.middleware.dlo.DynamicLargeObject - self.assert_(isinstance(app, expected)) + self.assertTrue(isinstance(app, expected)) + + app = app.app + expected = \ + swift.common.middleware.versioned_writes.VersionedWritesMiddleware + self.assertIsInstance(app, expected) app = app.app expected = swift.proxy.server.Application - self.assert_(isinstance(app, expected)) + self.assertTrue(isinstance(app, expected)) # config settings applied to app instance self.assertEquals(0.2, app.conn_timeout) # appconfig returns values from 'proxy-server' section @@ -206,8 +212,8 @@ class TestWSGI(unittest.TestCase): conf_dir, 'proxy-server') # verify pipeline is catch_errors -> proxy-server expected = swift.common.middleware.catch_errors.CatchErrorMiddleware - self.assert_(isinstance(app, expected)) - self.assert_(isinstance(app.app, swift.proxy.server.Application)) + self.assertTrue(isinstance(app, expected)) + self.assertTrue(isinstance(app.app, swift.proxy.server.Application)) # config settings applied to app instance self.assertEquals(0.2, app.app.conn_timeout) # appconfig returns values from 'proxy-server' section @@ -268,7 +274,7 @@ class TestWSGI(unittest.TestCase): # test sock = wsgi.get_socket(conf) # assert - self.assert_(isinstance(sock, MockSocket)) + self.assertTrue(isinstance(sock, MockSocket)) expected_socket_opts = { socket.SOL_SOCKET: { socket.SO_REUSEADDR: 1, @@ -380,10 +386,10 @@ class TestWSGI(unittest.TestCase): args, kwargs = _wsgi.server.call_args server_sock, server_app, server_logger = args self.assertEquals(sock, server_sock) - self.assert_(isinstance(server_app, swift.proxy.server.Application)) + self.assertTrue(isinstance(server_app, swift.proxy.server.Application)) self.assertEquals(20, server_app.client_timeout) - self.assert_(isinstance(server_logger, wsgi.NullLogger)) - self.assert_('custom_pool' in kwargs) + self.assertTrue(isinstance(server_logger, wsgi.NullLogger)) + self.assertTrue('custom_pool' in kwargs) self.assertEquals(1000, kwargs['custom_pool'].size) def test_run_server_with_latest_eventlet(self): @@ -455,7 +461,7 @@ class TestWSGI(unittest.TestCase): logger = logging.getLogger('test') sock = listen(('localhost', 0)) wsgi.run_server(conf, logger, sock) - self.assert_(os.environ['TZ'] is not '') + self.assertTrue(os.environ['TZ'] is not '') self.assertEquals('HTTP/1.0', _wsgi.HttpProtocol.default_request_version) @@ -468,9 +474,9 @@ class TestWSGI(unittest.TestCase): args, kwargs = _wsgi.server.call_args server_sock, server_app, server_logger = args self.assertEquals(sock, server_sock) - self.assert_(isinstance(server_app, swift.proxy.server.Application)) - self.assert_(isinstance(server_logger, wsgi.NullLogger)) - self.assert_('custom_pool' in kwargs) + self.assertTrue(isinstance(server_app, swift.proxy.server.Application)) + self.assertTrue(isinstance(server_logger, wsgi.NullLogger)) + self.assertTrue('custom_pool' in kwargs) def test_run_server_debug(self): config = """ @@ -519,10 +525,10 @@ class TestWSGI(unittest.TestCase): args, kwargs = mock_server.call_args server_sock, server_app, server_logger = args self.assertEquals(sock, server_sock) - self.assert_(isinstance(server_app, swift.proxy.server.Application)) + self.assertTrue(isinstance(server_app, swift.proxy.server.Application)) self.assertEquals(20, server_app.client_timeout) self.assertEqual(server_logger, None) - self.assert_('custom_pool' in kwargs) + self.assertTrue('custom_pool' in kwargs) self.assertEquals(1000, kwargs['custom_pool'].size) def test_appconfig_dir_ignores_hidden_files(self): @@ -556,7 +562,7 @@ class TestWSGI(unittest.TestCase): self.assertTrue('wsgi.input' in newenv) self.assertEquals(newenv['wsgi.input'].read(), '') - oldenv = {'wsgi.input': StringIO('original wsgi.input')} + oldenv = {'wsgi.input': BytesIO(b'original wsgi.input')} newenv = wsgi.make_pre_authed_env(oldenv) self.assertTrue('wsgi.input' in newenv) self.assertEquals(newenv['wsgi.input'].read(), '') @@ -810,6 +816,16 @@ class TestWSGI(unittest.TestCase): self.assertEquals(r.environ['SCRIPT_NAME'], '') self.assertEquals(r.environ['PATH_INFO'], '/override') + def test_make_env_keep_user_project_id(self): + oldenv = {'HTTP_X_USER_ID': '1234', 'HTTP_X_PROJECT_ID': '5678'} + newenv = wsgi.make_env(oldenv) + + self.assertTrue('HTTP_X_USER_ID' in newenv) + self.assertEquals(newenv['HTTP_X_USER_ID'], '1234') + + self.assertTrue('HTTP_X_PROJECT_ID' in newenv) + self.assertEquals(newenv['HTTP_X_PROJECT_ID'], '5678') + class TestServersPerPortStrategy(unittest.TestCase): def setUp(self): @@ -1089,9 +1105,10 @@ class TestWorkersStrategy(unittest.TestCase): self.addCleanup(patcher.stop) def test_loop_timeout(self): - # This strategy should block in the green.os.wait() until a worker - # process exits. - self.assertEqual(None, self.strategy.loop_timeout()) + # This strategy should sit in the green.os.wait() for a bit (to avoid + # busy-waiting) but not forever (so the keep-running flag actually + # gets checked). + self.assertEqual(0.5, self.strategy.loop_timeout()) def test_binding(self): self.assertEqual(None, self.strategy.bind_ports()) @@ -1412,6 +1429,7 @@ class TestPipelineModification(unittest.TestCase): ['swift.common.middleware.catch_errors', 'swift.common.middleware.gatekeeper', 'swift.common.middleware.dlo', + 'swift.common.middleware.versioned_writes', 'swift.proxy.server']) def test_proxy_modify_wsgi_pipeline(self): @@ -1442,6 +1460,7 @@ class TestPipelineModification(unittest.TestCase): ['swift.common.middleware.catch_errors', 'swift.common.middleware.gatekeeper', 'swift.common.middleware.dlo', + 'swift.common.middleware.versioned_writes', 'swift.common.middleware.healthcheck', 'swift.proxy.server']) @@ -1539,6 +1558,7 @@ class TestPipelineModification(unittest.TestCase): 'swift.common.middleware.catch_errors', 'swift.common.middleware.gatekeeper', 'swift.common.middleware.dlo', + 'swift.common.middleware.versioned_writes', 'swift.common.middleware.healthcheck', 'swift.proxy.server']) @@ -1552,6 +1572,7 @@ class TestPipelineModification(unittest.TestCase): 'swift.common.middleware.healthcheck', 'swift.common.middleware.catch_errors', 'swift.common.middleware.dlo', + 'swift.common.middleware.versioned_writes', 'swift.proxy.server']) def test_catch_errors_gatekeeper_configured_not_at_start(self): @@ -1564,6 +1585,7 @@ class TestPipelineModification(unittest.TestCase): 'swift.common.middleware.catch_errors', 'swift.common.middleware.gatekeeper', 'swift.common.middleware.dlo', + 'swift.common.middleware.versioned_writes', 'swift.proxy.server']) @with_tempdir @@ -1596,7 +1618,7 @@ class TestPipelineModification(unittest.TestCase): tempdir, policy.ring_name + '.ring.gz') app = wsgi.loadapp(conf_path) - proxy_app = app.app.app.app.app + proxy_app = app.app.app.app.app.app self.assertEqual(proxy_app.account_ring.serialized_path, account_ring_path) self.assertEqual(proxy_app.container_ring.serialized_path, @@ -1650,7 +1672,7 @@ class TestPipelineModification(unittest.TestCase): self.assertRaises(AttributeError, getattr, filtered_app, 'foo') # set the attribute - self.assert_(isinstance(app, FakeApp)) + self.assertTrue(isinstance(app, FakeApp)) app.foo = 'bar' self.assertEqual(filtered_app.foo, 'bar') diff --git a/test/unit/container/test_auditor.py b/test/unit/container/test_auditor.py index d1df39d2cd..6d9b28d640 100644 --- a/test/unit/container/test_auditor.py +++ b/test/unit/container/test_auditor.py @@ -90,8 +90,8 @@ class TestAuditor(unittest.TestCase): with mock.patch('swift.container.auditor.audit_location_generator', fake_audit_location_generator): self.assertRaises(ValueError, test_auditor.run_forever) - self.assertEquals(test_auditor.container_failures, 2 * call_times) - self.assertEquals(test_auditor.container_passes, 3 * call_times) + self.assertEqual(test_auditor.container_failures, 2 * call_times) + self.assertEqual(test_auditor.container_passes, 3 * call_times) @mock.patch('swift.container.auditor.ContainerBroker', FakeContainerBroker) def test_run_once(self): @@ -105,8 +105,8 @@ class TestAuditor(unittest.TestCase): with mock.patch('swift.container.auditor.audit_location_generator', fake_audit_location_generator): test_auditor.run_once() - self.assertEquals(test_auditor.container_failures, 2) - self.assertEquals(test_auditor.container_passes, 3) + self.assertEqual(test_auditor.container_failures, 2) + self.assertEqual(test_auditor.container_passes, 3) @mock.patch('swift.container.auditor.ContainerBroker', FakeContainerBroker) def test_container_auditor(self): @@ -116,8 +116,8 @@ class TestAuditor(unittest.TestCase): for f in files: path = os.path.join(self.testdir, f) test_auditor.container_audit(path) - self.assertEquals(test_auditor.container_failures, 2) - self.assertEquals(test_auditor.container_passes, 3) + self.assertEqual(test_auditor.container_failures, 2) + self.assertEqual(test_auditor.container_passes, 3) class TestAuditorMigrations(unittest.TestCase): @@ -136,8 +136,8 @@ class TestAuditorMigrations(unittest.TestCase): conn.execute('SELECT storage_policy_index ' 'FROM container_stat') except Exception as err: - self.assert_('no such column: storage_policy_index' in - str(err)) + self.assertTrue('no such column: storage_policy_index' in + str(err)) else: self.fail('TestContainerBrokerBeforeSPI broker class ' 'was already migrated') diff --git a/test/unit/container/test_backend.py b/test/unit/container/test_backend.py index 7955b2ac7a..2a7aafc10e 100644 --- a/test/unit/container/test_backend.py +++ b/test/unit/container/test_backend.py @@ -84,23 +84,23 @@ class TestContainerBroker(unittest.TestCase): first_conn = conn try: with broker.get() as conn: - self.assertEquals(first_conn, conn) + self.assertEqual(first_conn, conn) raise Exception('OMG') except Exception: pass - self.assert_(broker.conn is None) + self.assertTrue(broker.conn is None) def test_empty(self): # Test ContainerBroker.empty broker = ContainerBroker(':memory:', account='a', container='c') broker.initialize(Timestamp('1').internal, 0) - self.assert_(broker.empty()) + self.assertTrue(broker.empty()) broker.put_object('o', Timestamp(time()).internal, 0, 'text/plain', 'd41d8cd98f00b204e9800998ecf8427e') - self.assert_(not broker.empty()) + self.assertTrue(not broker.empty()) sleep(.00001) broker.delete_object('o', Timestamp(time()).internal) - self.assert_(broker.empty()) + self.assertTrue(broker.empty()) def test_reclaim(self): broker = ContainerBroker(':memory:', account='test_account', @@ -109,44 +109,44 @@ class TestContainerBroker(unittest.TestCase): broker.put_object('o', Timestamp(time()).internal, 0, 'text/plain', 'd41d8cd98f00b204e9800998ecf8427e') with broker.get() as conn: - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT count(*) FROM object " "WHERE deleted = 0").fetchone()[0], 1) - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT count(*) FROM object " "WHERE deleted = 1").fetchone()[0], 0) broker.reclaim(Timestamp(time() - 999).internal, time()) with broker.get() as conn: - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT count(*) FROM object " "WHERE deleted = 0").fetchone()[0], 1) - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT count(*) FROM object " "WHERE deleted = 1").fetchone()[0], 0) sleep(.00001) broker.delete_object('o', Timestamp(time()).internal) with broker.get() as conn: - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT count(*) FROM object " "WHERE deleted = 0").fetchone()[0], 0) - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT count(*) FROM object " "WHERE deleted = 1").fetchone()[0], 1) broker.reclaim(Timestamp(time() - 999).internal, time()) with broker.get() as conn: - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT count(*) FROM object " "WHERE deleted = 0").fetchone()[0], 0) - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT count(*) FROM object " "WHERE deleted = 1").fetchone()[0], 1) sleep(.00001) broker.reclaim(Timestamp(time()).internal, time()) with broker.get() as conn: - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT count(*) FROM object " "WHERE deleted = 0").fetchone()[0], 0) - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT count(*) FROM object " "WHERE deleted = 1").fetchone()[0], 0) # Test the return values of reclaim() @@ -174,7 +174,7 @@ class TestContainerBroker(unittest.TestCase): self.assertEqual(is_deleted, False) # sanity self.assertEqual(info, broker.get_info()) self.assertEqual(info['put_timestamp'], Timestamp(start).internal) - self.assert_(Timestamp(info['created_at']) >= start) + self.assertTrue(Timestamp(info['created_at']) >= start) self.assertEqual(info['delete_timestamp'], '0') if self.__class__ in (TestContainerBrokerBeforeMetadata, TestContainerBrokerBeforeXSync, @@ -192,7 +192,7 @@ class TestContainerBroker(unittest.TestCase): self.assertEqual(is_deleted, broker.is_deleted()) self.assertEqual(info, broker.get_info()) self.assertEqual(info['put_timestamp'], Timestamp(start).internal) - self.assert_(Timestamp(info['created_at']) >= start) + self.assertTrue(Timestamp(info['created_at']) >= start) self.assertEqual(info['delete_timestamp'], delete_timestamp) self.assertEqual(info['status_changed_at'], delete_timestamp) @@ -204,7 +204,7 @@ class TestContainerBroker(unittest.TestCase): self.assertEqual(is_deleted, broker.is_deleted()) self.assertEqual(info, broker.get_info()) self.assertEqual(info['put_timestamp'], Timestamp(start).internal) - self.assert_(Timestamp(info['created_at']) >= start) + self.assertTrue(Timestamp(info['created_at']) >= start) self.assertEqual(info['delete_timestamp'], delete_timestamp) self.assertEqual(info['status_changed_at'], delete_timestamp) @@ -215,19 +215,19 @@ class TestContainerBroker(unittest.TestCase): broker.put_object('o', Timestamp(time()).internal, 0, 'text/plain', 'd41d8cd98f00b204e9800998ecf8427e') with broker.get() as conn: - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT count(*) FROM object " "WHERE deleted = 0").fetchone()[0], 1) - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT count(*) FROM object " "WHERE deleted = 1").fetchone()[0], 0) sleep(.00001) broker.delete_object('o', Timestamp(time()).internal) with broker.get() as conn: - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT count(*) FROM object " "WHERE deleted = 0").fetchone()[0], 0) - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT count(*) FROM object " "WHERE deleted = 1").fetchone()[0], 1) @@ -242,20 +242,20 @@ class TestContainerBroker(unittest.TestCase): 'application/x-test', '5af83e3196bf99f440f31f2e1a6c9afe') with broker.get() as conn: - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT name FROM object").fetchone()[0], '"{}"') - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT created_at FROM object").fetchone()[0], timestamp) - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT size FROM object").fetchone()[0], 123) - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT content_type FROM object").fetchone()[0], 'application/x-test') - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT etag FROM object").fetchone()[0], '5af83e3196bf99f440f31f2e1a6c9afe') - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT deleted FROM object").fetchone()[0], 0) # Reput same event @@ -263,20 +263,20 @@ class TestContainerBroker(unittest.TestCase): 'application/x-test', '5af83e3196bf99f440f31f2e1a6c9afe') with broker.get() as conn: - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT name FROM object").fetchone()[0], '"{}"') - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT created_at FROM object").fetchone()[0], timestamp) - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT size FROM object").fetchone()[0], 123) - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT content_type FROM object").fetchone()[0], 'application/x-test') - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT etag FROM object").fetchone()[0], '5af83e3196bf99f440f31f2e1a6c9afe') - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT deleted FROM object").fetchone()[0], 0) # Put new event @@ -286,20 +286,20 @@ class TestContainerBroker(unittest.TestCase): 'application/x-test', 'aa0749bacbc79ec65fe206943d8fe449') with broker.get() as conn: - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT name FROM object").fetchone()[0], '"{}"') - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT created_at FROM object").fetchone()[0], timestamp) - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT size FROM object").fetchone()[0], 124) - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT content_type FROM object").fetchone()[0], 'application/x-test') - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT etag FROM object").fetchone()[0], 'aa0749bacbc79ec65fe206943d8fe449') - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT deleted FROM object").fetchone()[0], 0) # Put old event @@ -308,20 +308,20 @@ class TestContainerBroker(unittest.TestCase): 'application/x-test', 'aa0749bacbc79ec65fe206943d8fe449') with broker.get() as conn: - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT name FROM object").fetchone()[0], '"{}"') - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT created_at FROM object").fetchone()[0], timestamp) - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT size FROM object").fetchone()[0], 124) - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT content_type FROM object").fetchone()[0], 'application/x-test') - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT etag FROM object").fetchone()[0], 'aa0749bacbc79ec65fe206943d8fe449') - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT deleted FROM object").fetchone()[0], 0) # Put old delete event @@ -329,20 +329,20 @@ class TestContainerBroker(unittest.TestCase): broker.put_object('"{}"', dtimestamp, 0, '', '', deleted=1) with broker.get() as conn: - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT name FROM object").fetchone()[0], '"{}"') - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT created_at FROM object").fetchone()[0], timestamp) - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT size FROM object").fetchone()[0], 124) - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT content_type FROM object").fetchone()[0], 'application/x-test') - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT etag FROM object").fetchone()[0], 'aa0749bacbc79ec65fe206943d8fe449') - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT deleted FROM object").fetchone()[0], 0) # Put new delete event @@ -351,12 +351,12 @@ class TestContainerBroker(unittest.TestCase): broker.put_object('"{}"', timestamp, 0, '', '', deleted=1) with broker.get() as conn: - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT name FROM object").fetchone()[0], '"{}"') - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT created_at FROM object").fetchone()[0], timestamp) - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT deleted FROM object").fetchone()[0], 1) # Put new event @@ -366,20 +366,20 @@ class TestContainerBroker(unittest.TestCase): 'application/x-test', '5af83e3196bf99f440f31f2e1a6c9afe') with broker.get() as conn: - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT name FROM object").fetchone()[0], '"{}"') - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT created_at FROM object").fetchone()[0], timestamp) - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT size FROM object").fetchone()[0], 123) - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT content_type FROM object").fetchone()[0], 'application/x-test') - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT etag FROM object").fetchone()[0], '5af83e3196bf99f440f31f2e1a6c9afe') - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT deleted FROM object").fetchone()[0], 0) # We'll use this later @@ -391,21 +391,21 @@ class TestContainerBroker(unittest.TestCase): previous_timestamp = timestamp timestamp = Timestamp(time()).internal with broker.get() as conn: - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT name FROM object").fetchone()[0], '"{}"') - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT created_at FROM object").fetchone()[0], previous_timestamp) - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT size FROM object").fetchone()[0], 123) - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT content_type FROM object").fetchone()[0], 'application/x-test') - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT etag FROM object").fetchone()[0], '5af83e3196bf99f440f31f2e1a6c9afe') - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT deleted FROM object").fetchone()[0], 0) # Put event from after last put but before last post @@ -414,20 +414,20 @@ class TestContainerBroker(unittest.TestCase): 'application/x-test3', '6af83e3196bf99f440f31f2e1a6c9afe') with broker.get() as conn: - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT name FROM object").fetchone()[0], '"{}"') - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT created_at FROM object").fetchone()[0], timestamp) - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT size FROM object").fetchone()[0], 456) - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT content_type FROM object").fetchone()[0], 'application/x-test3') - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT etag FROM object").fetchone()[0], '6af83e3196bf99f440f31f2e1a6c9afe') - self.assertEquals(conn.execute( + self.assertEqual(conn.execute( "SELECT deleted FROM object").fetchone()[0], 0) @patch_policies @@ -480,7 +480,7 @@ class TestContainerBroker(unittest.TestCase): broker.put_object('wrong_o', next(ts), 123, 'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe', storage_policy_index=other_policy.idx) - self.assert_(broker.has_multiple_policies()) + self.assertTrue(broker.has_multiple_policies()) @patch_policies def test_get_policy_info(self): @@ -563,35 +563,35 @@ class TestContainerBroker(unittest.TestCase): broker.initialize(Timestamp(1).internal) info = broker.get_info() - self.assertEquals(info['account'], 'test1') - self.assertEquals(info['container'], 'test2') - self.assertEquals(info['hash'], '00000000000000000000000000000000') + self.assertEqual(info['account'], 'test1') + self.assertEqual(info['container'], 'test2') + self.assertEqual(info['hash'], '00000000000000000000000000000000') self.assertEqual(info['put_timestamp'], Timestamp(1).internal) self.assertEqual(info['delete_timestamp'], '0') info = broker.get_info() - self.assertEquals(info['object_count'], 0) - self.assertEquals(info['bytes_used'], 0) + self.assertEqual(info['object_count'], 0) + self.assertEqual(info['bytes_used'], 0) policy_stats = broker.get_policy_stats() # Act as policy-0 self.assertTrue(0 in policy_stats) - self.assertEquals(policy_stats[0]['bytes_used'], 0) - self.assertEquals(policy_stats[0]['object_count'], 0) + self.assertEqual(policy_stats[0]['bytes_used'], 0) + self.assertEqual(policy_stats[0]['object_count'], 0) broker.put_object('o1', Timestamp(time()).internal, 123, 'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe') info = broker.get_info() - self.assertEquals(info['object_count'], 1) - self.assertEquals(info['bytes_used'], 123) + self.assertEqual(info['object_count'], 1) + self.assertEqual(info['bytes_used'], 123) policy_stats = broker.get_policy_stats() self.assertTrue(0 in policy_stats) - self.assertEquals(policy_stats[0]['object_count'], 1) - self.assertEquals(policy_stats[0]['bytes_used'], 123) + self.assertEqual(policy_stats[0]['object_count'], 1) + self.assertEqual(policy_stats[0]['bytes_used'], 123) def test_get_info(self): # Test ContainerBroker.get_info @@ -600,9 +600,9 @@ class TestContainerBroker(unittest.TestCase): broker.initialize(Timestamp('1').internal, 0) info = broker.get_info() - self.assertEquals(info['account'], 'test1') - self.assertEquals(info['container'], 'test2') - self.assertEquals(info['hash'], '00000000000000000000000000000000') + self.assertEqual(info['account'], 'test1') + self.assertEqual(info['container'], 'test2') + self.assertEqual(info['hash'], '00000000000000000000000000000000') self.assertEqual(info['put_timestamp'], Timestamp(1).internal) self.assertEqual(info['delete_timestamp'], '0') if self.__class__ in (TestContainerBrokerBeforeMetadata, @@ -614,44 +614,44 @@ class TestContainerBroker(unittest.TestCase): Timestamp(1).internal) info = broker.get_info() - self.assertEquals(info['object_count'], 0) - self.assertEquals(info['bytes_used'], 0) + self.assertEqual(info['object_count'], 0) + self.assertEqual(info['bytes_used'], 0) broker.put_object('o1', Timestamp(time()).internal, 123, 'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe') info = broker.get_info() - self.assertEquals(info['object_count'], 1) - self.assertEquals(info['bytes_used'], 123) + self.assertEqual(info['object_count'], 1) + self.assertEqual(info['bytes_used'], 123) sleep(.00001) broker.put_object('o2', Timestamp(time()).internal, 123, 'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe') info = broker.get_info() - self.assertEquals(info['object_count'], 2) - self.assertEquals(info['bytes_used'], 246) + self.assertEqual(info['object_count'], 2) + self.assertEqual(info['bytes_used'], 246) sleep(.00001) broker.put_object('o2', Timestamp(time()).internal, 1000, 'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe') info = broker.get_info() - self.assertEquals(info['object_count'], 2) - self.assertEquals(info['bytes_used'], 1123) + self.assertEqual(info['object_count'], 2) + self.assertEqual(info['bytes_used'], 1123) sleep(.00001) broker.delete_object('o1', Timestamp(time()).internal) info = broker.get_info() - self.assertEquals(info['object_count'], 1) - self.assertEquals(info['bytes_used'], 1000) + self.assertEqual(info['object_count'], 1) + self.assertEqual(info['bytes_used'], 1000) sleep(.00001) broker.delete_object('o2', Timestamp(time()).internal) info = broker.get_info() - self.assertEquals(info['object_count'], 0) - self.assertEquals(info['bytes_used'], 0) + self.assertEqual(info['object_count'], 0) + self.assertEqual(info['bytes_used'], 0) info = broker.get_info() - self.assertEquals(info['x_container_sync_point1'], -1) - self.assertEquals(info['x_container_sync_point2'], -1) + self.assertEqual(info['x_container_sync_point1'], -1) + self.assertEqual(info['x_container_sync_point2'], -1) def test_set_x_syncs(self): broker = ContainerBroker(':memory:', account='test1', @@ -659,13 +659,13 @@ class TestContainerBroker(unittest.TestCase): broker.initialize(Timestamp('1').internal, 0) info = broker.get_info() - self.assertEquals(info['x_container_sync_point1'], -1) - self.assertEquals(info['x_container_sync_point2'], -1) + self.assertEqual(info['x_container_sync_point1'], -1) + self.assertEqual(info['x_container_sync_point2'], -1) broker.set_x_container_sync_points(1, 2) info = broker.get_info() - self.assertEquals(info['x_container_sync_point1'], 1) - self.assertEquals(info['x_container_sync_point2'], 2) + self.assertEqual(info['x_container_sync_point1'], 1) + self.assertEqual(info['x_container_sync_point2'], 2) def test_get_report_info(self): broker = ContainerBroker(':memory:', account='test1', @@ -673,66 +673,66 @@ class TestContainerBroker(unittest.TestCase): broker.initialize(Timestamp('1').internal, 0) info = broker.get_info() - self.assertEquals(info['account'], 'test1') - self.assertEquals(info['container'], 'test2') - self.assertEquals(info['object_count'], 0) - self.assertEquals(info['bytes_used'], 0) - self.assertEquals(info['reported_object_count'], 0) - self.assertEquals(info['reported_bytes_used'], 0) + self.assertEqual(info['account'], 'test1') + self.assertEqual(info['container'], 'test2') + self.assertEqual(info['object_count'], 0) + self.assertEqual(info['bytes_used'], 0) + self.assertEqual(info['reported_object_count'], 0) + self.assertEqual(info['reported_bytes_used'], 0) broker.put_object('o1', Timestamp(time()).internal, 123, 'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe') info = broker.get_info() - self.assertEquals(info['object_count'], 1) - self.assertEquals(info['bytes_used'], 123) - self.assertEquals(info['reported_object_count'], 0) - self.assertEquals(info['reported_bytes_used'], 0) + self.assertEqual(info['object_count'], 1) + self.assertEqual(info['bytes_used'], 123) + self.assertEqual(info['reported_object_count'], 0) + self.assertEqual(info['reported_bytes_used'], 0) sleep(.00001) broker.put_object('o2', Timestamp(time()).internal, 123, 'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe') info = broker.get_info() - self.assertEquals(info['object_count'], 2) - self.assertEquals(info['bytes_used'], 246) - self.assertEquals(info['reported_object_count'], 0) - self.assertEquals(info['reported_bytes_used'], 0) + self.assertEqual(info['object_count'], 2) + self.assertEqual(info['bytes_used'], 246) + self.assertEqual(info['reported_object_count'], 0) + self.assertEqual(info['reported_bytes_used'], 0) sleep(.00001) broker.put_object('o2', Timestamp(time()).internal, 1000, 'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe') info = broker.get_info() - self.assertEquals(info['object_count'], 2) - self.assertEquals(info['bytes_used'], 1123) - self.assertEquals(info['reported_object_count'], 0) - self.assertEquals(info['reported_bytes_used'], 0) + self.assertEqual(info['object_count'], 2) + self.assertEqual(info['bytes_used'], 1123) + self.assertEqual(info['reported_object_count'], 0) + self.assertEqual(info['reported_bytes_used'], 0) put_timestamp = Timestamp(time()).internal sleep(.001) delete_timestamp = Timestamp(time()).internal broker.reported(put_timestamp, delete_timestamp, 2, 1123) info = broker.get_info() - self.assertEquals(info['object_count'], 2) - self.assertEquals(info['bytes_used'], 1123) - self.assertEquals(info['reported_put_timestamp'], put_timestamp) - self.assertEquals(info['reported_delete_timestamp'], delete_timestamp) - self.assertEquals(info['reported_object_count'], 2) - self.assertEquals(info['reported_bytes_used'], 1123) + self.assertEqual(info['object_count'], 2) + self.assertEqual(info['bytes_used'], 1123) + self.assertEqual(info['reported_put_timestamp'], put_timestamp) + self.assertEqual(info['reported_delete_timestamp'], delete_timestamp) + self.assertEqual(info['reported_object_count'], 2) + self.assertEqual(info['reported_bytes_used'], 1123) sleep(.00001) broker.delete_object('o1', Timestamp(time()).internal) info = broker.get_info() - self.assertEquals(info['object_count'], 1) - self.assertEquals(info['bytes_used'], 1000) - self.assertEquals(info['reported_object_count'], 2) - self.assertEquals(info['reported_bytes_used'], 1123) + self.assertEqual(info['object_count'], 1) + self.assertEqual(info['bytes_used'], 1000) + self.assertEqual(info['reported_object_count'], 2) + self.assertEqual(info['reported_bytes_used'], 1123) sleep(.00001) broker.delete_object('o2', Timestamp(time()).internal) info = broker.get_info() - self.assertEquals(info['object_count'], 0) - self.assertEquals(info['bytes_used'], 0) - self.assertEquals(info['reported_object_count'], 2) - self.assertEquals(info['reported_bytes_used'], 1123) + self.assertEqual(info['object_count'], 0) + self.assertEqual(info['bytes_used'], 0) + self.assertEqual(info['reported_object_count'], 2) + self.assertEqual(info['reported_bytes_used'], 1123) def test_list_objects_iter(self): # Test ContainerBroker.list_objects_iter @@ -754,103 +754,103 @@ class TestContainerBroker(unittest.TestCase): 'd41d8cd98f00b204e9800998ecf8427e') listing = broker.list_objects_iter(100, '', None, None, '') - self.assertEquals(len(listing), 100) - self.assertEquals(listing[0][0], '0/0000') - self.assertEquals(listing[-1][0], '0/0099') + self.assertEqual(len(listing), 100) + self.assertEqual(listing[0][0], '0/0000') + self.assertEqual(listing[-1][0], '0/0099') listing = broker.list_objects_iter(100, '', '0/0050', None, '') - self.assertEquals(len(listing), 50) - self.assertEquals(listing[0][0], '0/0000') - self.assertEquals(listing[-1][0], '0/0049') + self.assertEqual(len(listing), 50) + self.assertEqual(listing[0][0], '0/0000') + self.assertEqual(listing[-1][0], '0/0049') listing = broker.list_objects_iter(100, '0/0099', None, None, '') - self.assertEquals(len(listing), 100) - self.assertEquals(listing[0][0], '0/0100') - self.assertEquals(listing[-1][0], '1/0074') + self.assertEqual(len(listing), 100) + self.assertEqual(listing[0][0], '0/0100') + self.assertEqual(listing[-1][0], '1/0074') listing = broker.list_objects_iter(55, '1/0074', None, None, '') - self.assertEquals(len(listing), 55) - self.assertEquals(listing[0][0], '1/0075') - self.assertEquals(listing[-1][0], '2/0004') + self.assertEqual(len(listing), 55) + self.assertEqual(listing[0][0], '1/0075') + self.assertEqual(listing[-1][0], '2/0004') listing = broker.list_objects_iter(10, '', None, '0/01', '') - self.assertEquals(len(listing), 10) - self.assertEquals(listing[0][0], '0/0100') - self.assertEquals(listing[-1][0], '0/0109') + self.assertEqual(len(listing), 10) + self.assertEqual(listing[0][0], '0/0100') + self.assertEqual(listing[-1][0], '0/0109') listing = broker.list_objects_iter(10, '', None, '0/', '/') - self.assertEquals(len(listing), 10) - self.assertEquals(listing[0][0], '0/0000') - self.assertEquals(listing[-1][0], '0/0009') + self.assertEqual(len(listing), 10) + self.assertEqual(listing[0][0], '0/0000') + self.assertEqual(listing[-1][0], '0/0009') # Same as above, but using the path argument. listing = broker.list_objects_iter(10, '', None, None, '', '0') - self.assertEquals(len(listing), 10) - self.assertEquals(listing[0][0], '0/0000') - self.assertEquals(listing[-1][0], '0/0009') + self.assertEqual(len(listing), 10) + self.assertEqual(listing[0][0], '0/0000') + self.assertEqual(listing[-1][0], '0/0009') listing = broker.list_objects_iter(10, '', None, '', '/') - self.assertEquals(len(listing), 4) - self.assertEquals([row[0] for row in listing], - ['0/', '1/', '2/', '3/']) + self.assertEqual(len(listing), 4) + self.assertEqual([row[0] for row in listing], + ['0/', '1/', '2/', '3/']) listing = broker.list_objects_iter(10, '2', None, None, '/') - self.assertEquals(len(listing), 2) - self.assertEquals([row[0] for row in listing], ['2/', '3/']) + self.assertEqual(len(listing), 2) + self.assertEqual([row[0] for row in listing], ['2/', '3/']) listing = broker.list_objects_iter(10, '2/', None, None, '/') - self.assertEquals(len(listing), 1) - self.assertEquals([row[0] for row in listing], ['3/']) + self.assertEqual(len(listing), 1) + self.assertEqual([row[0] for row in listing], ['3/']) listing = broker.list_objects_iter(10, '2/0050', None, '2/', '/') - self.assertEquals(len(listing), 10) - self.assertEquals(listing[0][0], '2/0051') - self.assertEquals(listing[1][0], '2/0051/') - self.assertEquals(listing[2][0], '2/0052') - self.assertEquals(listing[-1][0], '2/0059') + self.assertEqual(len(listing), 10) + self.assertEqual(listing[0][0], '2/0051') + self.assertEqual(listing[1][0], '2/0051/') + self.assertEqual(listing[2][0], '2/0052') + self.assertEqual(listing[-1][0], '2/0059') listing = broker.list_objects_iter(10, '3/0045', None, '3/', '/') - self.assertEquals(len(listing), 10) - self.assertEquals([row[0] for row in listing], - ['3/0045/', '3/0046', '3/0046/', '3/0047', - '3/0047/', '3/0048', '3/0048/', '3/0049', - '3/0049/', '3/0050']) + self.assertEqual(len(listing), 10) + self.assertEqual([row[0] for row in listing], + ['3/0045/', '3/0046', '3/0046/', '3/0047', + '3/0047/', '3/0048', '3/0048/', '3/0049', + '3/0049/', '3/0050']) broker.put_object('3/0049/', Timestamp(time()).internal, 0, 'text/plain', 'd41d8cd98f00b204e9800998ecf8427e') listing = broker.list_objects_iter(10, '3/0048', None, None, None) - self.assertEquals(len(listing), 10) - self.assertEquals( + self.assertEqual(len(listing), 10) + self.assertEqual( [row[0] for row in listing], ['3/0048/0049', '3/0049', '3/0049/', '3/0049/0049', '3/0050', '3/0050/0049', '3/0051', '3/0051/0049', '3/0052', '3/0052/0049']) listing = broker.list_objects_iter(10, '3/0048', None, '3/', '/') - self.assertEquals(len(listing), 10) - self.assertEquals( + self.assertEqual(len(listing), 10) + self.assertEqual( [row[0] for row in listing], ['3/0048/', '3/0049', '3/0049/', '3/0050', '3/0050/', '3/0051', '3/0051/', '3/0052', '3/0052/', '3/0053']) listing = broker.list_objects_iter(10, None, None, '3/0049/', '/') - self.assertEquals(len(listing), 2) - self.assertEquals( + self.assertEqual(len(listing), 2) + self.assertEqual( [row[0] for row in listing], ['3/0049/', '3/0049/0049']) listing = broker.list_objects_iter(10, None, None, None, None, '3/0049') - self.assertEquals(len(listing), 1) - self.assertEquals([row[0] for row in listing], ['3/0049/0049']) + self.assertEqual(len(listing), 1) + self.assertEqual([row[0] for row in listing], ['3/0049/0049']) listing = broker.list_objects_iter(2, None, None, '3/', '/') - self.assertEquals(len(listing), 2) - self.assertEquals([row[0] for row in listing], ['3/0000', '3/0000/']) + self.assertEqual(len(listing), 2) + self.assertEqual([row[0] for row in listing], ['3/0000', '3/0000/']) listing = broker.list_objects_iter(2, None, None, None, None, '3') - self.assertEquals(len(listing), 2) - self.assertEquals([row[0] for row in listing], ['3/0000', '3/0001']) + self.assertEqual(len(listing), 2) + self.assertEqual([row[0] for row in listing], ['3/0000', '3/0001']) def test_list_objects_iter_non_slash(self): # Test ContainerBroker.list_objects_iter using a @@ -873,87 +873,87 @@ class TestContainerBroker(unittest.TestCase): 'd41d8cd98f00b204e9800998ecf8427e') listing = broker.list_objects_iter(100, '', None, None, '') - self.assertEquals(len(listing), 100) - self.assertEquals(listing[0][0], '0:0000') - self.assertEquals(listing[-1][0], '0:0099') + self.assertEqual(len(listing), 100) + self.assertEqual(listing[0][0], '0:0000') + self.assertEqual(listing[-1][0], '0:0099') listing = broker.list_objects_iter(100, '', '0:0050', None, '') - self.assertEquals(len(listing), 50) - self.assertEquals(listing[0][0], '0:0000') - self.assertEquals(listing[-1][0], '0:0049') + self.assertEqual(len(listing), 50) + self.assertEqual(listing[0][0], '0:0000') + self.assertEqual(listing[-1][0], '0:0049') listing = broker.list_objects_iter(100, '0:0099', None, None, '') - self.assertEquals(len(listing), 100) - self.assertEquals(listing[0][0], '0:0100') - self.assertEquals(listing[-1][0], '1:0074') + self.assertEqual(len(listing), 100) + self.assertEqual(listing[0][0], '0:0100') + self.assertEqual(listing[-1][0], '1:0074') listing = broker.list_objects_iter(55, '1:0074', None, None, '') - self.assertEquals(len(listing), 55) - self.assertEquals(listing[0][0], '1:0075') - self.assertEquals(listing[-1][0], '2:0004') + self.assertEqual(len(listing), 55) + self.assertEqual(listing[0][0], '1:0075') + self.assertEqual(listing[-1][0], '2:0004') listing = broker.list_objects_iter(10, '', None, '0:01', '') - self.assertEquals(len(listing), 10) - self.assertEquals(listing[0][0], '0:0100') - self.assertEquals(listing[-1][0], '0:0109') + self.assertEqual(len(listing), 10) + self.assertEqual(listing[0][0], '0:0100') + self.assertEqual(listing[-1][0], '0:0109') listing = broker.list_objects_iter(10, '', None, '0:', ':') - self.assertEquals(len(listing), 10) - self.assertEquals(listing[0][0], '0:0000') - self.assertEquals(listing[-1][0], '0:0009') + self.assertEqual(len(listing), 10) + self.assertEqual(listing[0][0], '0:0000') + self.assertEqual(listing[-1][0], '0:0009') # Same as above, but using the path argument, so nothing should be # returned since path uses a '/' as a delimiter. listing = broker.list_objects_iter(10, '', None, None, '', '0') - self.assertEquals(len(listing), 0) + self.assertEqual(len(listing), 0) listing = broker.list_objects_iter(10, '', None, '', ':') - self.assertEquals(len(listing), 4) - self.assertEquals([row[0] for row in listing], - ['0:', '1:', '2:', '3:']) + self.assertEqual(len(listing), 4) + self.assertEqual([row[0] for row in listing], + ['0:', '1:', '2:', '3:']) listing = broker.list_objects_iter(10, '2', None, None, ':') - self.assertEquals(len(listing), 2) - self.assertEquals([row[0] for row in listing], ['2:', '3:']) + self.assertEqual(len(listing), 2) + self.assertEqual([row[0] for row in listing], ['2:', '3:']) listing = broker.list_objects_iter(10, '2:', None, None, ':') - self.assertEquals(len(listing), 1) - self.assertEquals([row[0] for row in listing], ['3:']) + self.assertEqual(len(listing), 1) + self.assertEqual([row[0] for row in listing], ['3:']) listing = broker.list_objects_iter(10, '2:0050', None, '2:', ':') - self.assertEquals(len(listing), 10) - self.assertEquals(listing[0][0], '2:0051') - self.assertEquals(listing[1][0], '2:0051:') - self.assertEquals(listing[2][0], '2:0052') - self.assertEquals(listing[-1][0], '2:0059') + self.assertEqual(len(listing), 10) + self.assertEqual(listing[0][0], '2:0051') + self.assertEqual(listing[1][0], '2:0051:') + self.assertEqual(listing[2][0], '2:0052') + self.assertEqual(listing[-1][0], '2:0059') listing = broker.list_objects_iter(10, '3:0045', None, '3:', ':') - self.assertEquals(len(listing), 10) - self.assertEquals([row[0] for row in listing], - ['3:0045:', '3:0046', '3:0046:', '3:0047', - '3:0047:', '3:0048', '3:0048:', '3:0049', - '3:0049:', '3:0050']) + self.assertEqual(len(listing), 10) + self.assertEqual([row[0] for row in listing], + ['3:0045:', '3:0046', '3:0046:', '3:0047', + '3:0047:', '3:0048', '3:0048:', '3:0049', + '3:0049:', '3:0050']) broker.put_object('3:0049:', Timestamp(time()).internal, 0, 'text/plain', 'd41d8cd98f00b204e9800998ecf8427e') listing = broker.list_objects_iter(10, '3:0048', None, None, None) - self.assertEquals(len(listing), 10) - self.assertEquals( + self.assertEqual(len(listing), 10) + self.assertEqual( [row[0] for row in listing], ['3:0048:0049', '3:0049', '3:0049:', '3:0049:0049', '3:0050', '3:0050:0049', '3:0051', '3:0051:0049', '3:0052', '3:0052:0049']) listing = broker.list_objects_iter(10, '3:0048', None, '3:', ':') - self.assertEquals(len(listing), 10) - self.assertEquals( + self.assertEqual(len(listing), 10) + self.assertEqual( [row[0] for row in listing], ['3:0048:', '3:0049', '3:0049:', '3:0050', '3:0050:', '3:0051', '3:0051:', '3:0052', '3:0052:', '3:0053']) listing = broker.list_objects_iter(10, None, None, '3:0049:', ':') - self.assertEquals(len(listing), 2) - self.assertEquals( + self.assertEqual(len(listing), 2) + self.assertEqual( [row[0] for row in listing], ['3:0049:', '3:0049:0049']) @@ -961,14 +961,14 @@ class TestContainerBroker(unittest.TestCase): # returned since path uses a '/' as a delimiter. listing = broker.list_objects_iter(10, None, None, None, None, '3:0049') - self.assertEquals(len(listing), 0) + self.assertEqual(len(listing), 0) listing = broker.list_objects_iter(2, None, None, '3:', ':') - self.assertEquals(len(listing), 2) - self.assertEquals([row[0] for row in listing], ['3:0000', '3:0000:']) + self.assertEqual(len(listing), 2) + self.assertEqual([row[0] for row in listing], ['3:0000', '3:0000:']) listing = broker.list_objects_iter(2, None, None, None, None, '3') - self.assertEquals(len(listing), 0) + self.assertEqual(len(listing), 0) def test_list_objects_iter_prefix_delim(self): # Test ContainerBroker.list_objects_iter @@ -994,17 +994,17 @@ class TestContainerBroker(unittest.TestCase): '/snakes', Timestamp(0).internal, 0, 'text/plain', 'd41d8cd98f00b204e9800998ecf8427e') - #def list_objects_iter(self, limit, marker, prefix, delimiter, - # path=None, format=None): + # def list_objects_iter(self, limit, marker, prefix, delimiter, + # path=None, format=None): listing = broker.list_objects_iter(100, None, None, '/pets/f', '/') - self.assertEquals([row[0] for row in listing], - ['/pets/fish/', '/pets/fish_info.txt']) + self.assertEqual([row[0] for row in listing], + ['/pets/fish/', '/pets/fish_info.txt']) listing = broker.list_objects_iter(100, None, None, '/pets/fish', '/') - self.assertEquals([row[0] for row in listing], - ['/pets/fish/', '/pets/fish_info.txt']) + self.assertEqual([row[0] for row in listing], + ['/pets/fish/', '/pets/fish_info.txt']) listing = broker.list_objects_iter(100, None, None, '/pets/fish/', '/') - self.assertEquals([row[0] for row in listing], - ['/pets/fish/a', '/pets/fish/b']) + self.assertEqual([row[0] for row in listing], + ['/pets/fish/a', '/pets/fish/b']) def test_double_check_trailing_delimiter(self): # Test ContainerBroker.list_objects_iter for a @@ -1056,35 +1056,35 @@ class TestContainerBroker(unittest.TestCase): broker.put_object('1/0', Timestamp(time()).internal, 0, 'text/plain', 'd41d8cd98f00b204e9800998ecf8427e') listing = broker.list_objects_iter(25, None, None, None, None) - self.assertEquals(len(listing), 22) - self.assertEquals( + self.assertEqual(len(listing), 22) + self.assertEqual( [row[0] for row in listing], ['0', '0/', '0/0', '0/00', '0/1', '0/1/', '0/1/0', '00', '1', '1/', '1/0', 'a', 'a/', 'a/0', 'a/a', 'a/a/a', 'a/a/b', 'a/b', 'b', 'b/a', 'b/b', 'c']) listing = broker.list_objects_iter(25, None, None, '', '/') - self.assertEquals(len(listing), 10) - self.assertEquals( + self.assertEqual(len(listing), 10) + self.assertEqual( [row[0] for row in listing], ['0', '0/', '00', '1', '1/', 'a', 'a/', 'b', 'b/', 'c']) listing = broker.list_objects_iter(25, None, None, 'a/', '/') - self.assertEquals(len(listing), 5) - self.assertEquals( + self.assertEqual(len(listing), 5) + self.assertEqual( [row[0] for row in listing], ['a/', 'a/0', 'a/a', 'a/a/', 'a/b']) listing = broker.list_objects_iter(25, None, None, '0/', '/') - self.assertEquals(len(listing), 5) - self.assertEquals( + self.assertEqual(len(listing), 5) + self.assertEqual( [row[0] for row in listing], ['0/', '0/0', '0/00', '0/1', '0/1/']) listing = broker.list_objects_iter(25, None, None, '0/1/', '/') - self.assertEquals(len(listing), 2) - self.assertEquals( + self.assertEqual(len(listing), 2) + self.assertEqual( [row[0] for row in listing], ['0/1/', '0/1/0']) listing = broker.list_objects_iter(25, None, None, 'b/', '/') - self.assertEquals(len(listing), 2) - self.assertEquals([row[0] for row in listing], ['b/a', 'b/b']) + self.assertEqual(len(listing), 2) + self.assertEqual([row[0] for row in listing], ['b/a', 'b/b']) def test_double_check_trailing_delimiter_non_slash(self): # Test ContainerBroker.list_objects_iter for a @@ -1136,35 +1136,35 @@ class TestContainerBroker(unittest.TestCase): broker.put_object('1:0', Timestamp(time()).internal, 0, 'text/plain', 'd41d8cd98f00b204e9800998ecf8427e') listing = broker.list_objects_iter(25, None, None, None, None) - self.assertEquals(len(listing), 22) - self.assertEquals( + self.assertEqual(len(listing), 22) + self.assertEqual( [row[0] for row in listing], ['0', '00', '0:', '0:0', '0:00', '0:1', '0:1:', '0:1:0', '1', '1:', '1:0', 'a', 'a:', 'a:0', 'a:a', 'a:a:a', 'a:a:b', 'a:b', 'b', 'b:a', 'b:b', 'c']) listing = broker.list_objects_iter(25, None, None, '', ':') - self.assertEquals(len(listing), 10) - self.assertEquals( + self.assertEqual(len(listing), 10) + self.assertEqual( [row[0] for row in listing], ['0', '00', '0:', '1', '1:', 'a', 'a:', 'b', 'b:', 'c']) listing = broker.list_objects_iter(25, None, None, 'a:', ':') - self.assertEquals(len(listing), 5) - self.assertEquals( + self.assertEqual(len(listing), 5) + self.assertEqual( [row[0] for row in listing], ['a:', 'a:0', 'a:a', 'a:a:', 'a:b']) listing = broker.list_objects_iter(25, None, None, '0:', ':') - self.assertEquals(len(listing), 5) - self.assertEquals( + self.assertEqual(len(listing), 5) + self.assertEqual( [row[0] for row in listing], ['0:', '0:0', '0:00', '0:1', '0:1:']) listing = broker.list_objects_iter(25, None, None, '0:1:', ':') - self.assertEquals(len(listing), 2) - self.assertEquals( + self.assertEqual(len(listing), 2) + self.assertEqual( [row[0] for row in listing], ['0:1:', '0:1:0']) listing = broker.list_objects_iter(25, None, None, 'b:', ':') - self.assertEquals(len(listing), 2) - self.assertEquals([row[0] for row in listing], ['b:a', 'b:b']) + self.assertEqual(len(listing), 2) + self.assertEqual([row[0] for row in listing], ['b:a', 'b:b']) def test_chexor(self): broker = ContainerBroker(':memory:', account='a', container='c') @@ -1177,13 +1177,13 @@ class TestContainerBroker(unittest.TestCase): hashb = hashlib.md5('%s-%s' % ('b', Timestamp(2).internal)).digest() hashc = ''.join( ('%02x' % (ord(a) ^ ord(b)) for a, b in zip(hasha, hashb))) - self.assertEquals(broker.get_info()['hash'], hashc) + self.assertEqual(broker.get_info()['hash'], hashc) broker.put_object('b', Timestamp(3).internal, 0, 'text/plain', 'd41d8cd98f00b204e9800998ecf8427e') hashb = hashlib.md5('%s-%s' % ('b', Timestamp(3).internal)).digest() hashc = ''.join( ('%02x' % (ord(a) ^ ord(b)) for a, b in zip(hasha, hashb))) - self.assertEquals(broker.get_info()['hash'], hashc) + self.assertEqual(broker.get_info()['hash'], hashc) def test_newid(self): # test DatabaseBroker.newid @@ -1203,8 +1203,8 @@ class TestContainerBroker(unittest.TestCase): broker.put_object('b', Timestamp(2).internal, 0, 'text/plain', 'd41d8cd98f00b204e9800998ecf8427e') items = broker.get_items_since(max_row, 1000) - self.assertEquals(len(items), 1) - self.assertEquals(items[0]['name'], 'b') + self.assertEqual(len(items), 1) + self.assertEqual(items[0]['name'], 'b') def test_sync_merging(self): # exercise the DatabaseBroker sync functions a bit @@ -1212,10 +1212,10 @@ class TestContainerBroker(unittest.TestCase): broker1.initialize(Timestamp('1').internal, 0) broker2 = ContainerBroker(':memory:', account='a', container='c') broker2.initialize(Timestamp('1').internal, 0) - self.assertEquals(broker2.get_sync('12345'), -1) + self.assertEqual(broker2.get_sync('12345'), -1) broker1.merge_syncs([{'sync_point': 3, 'remote_id': '12345'}]) broker2.merge_syncs(broker1.get_syncs()) - self.assertEquals(broker2.get_sync('12345'), 3) + self.assertEqual(broker2.get_sync('12345'), 3) def test_merge_items(self): broker1 = ContainerBroker(':memory:', account='a', container='c') @@ -1230,16 +1230,16 @@ class TestContainerBroker(unittest.TestCase): broker2.merge_items(broker1.get_items_since( broker2.get_sync(id), 1000), id) items = broker2.get_items_since(-1, 1000) - self.assertEquals(len(items), 2) - self.assertEquals(['a', 'b'], sorted([rec['name'] for rec in items])) + self.assertEqual(len(items), 2) + self.assertEqual(['a', 'b'], sorted([rec['name'] for rec in items])) broker1.put_object('c', Timestamp(3).internal, 0, 'text/plain', 'd41d8cd98f00b204e9800998ecf8427e') broker2.merge_items(broker1.get_items_since( broker2.get_sync(id), 1000), id) items = broker2.get_items_since(-1, 1000) - self.assertEquals(len(items), 3) - self.assertEquals(['a', 'b', 'c'], - sorted([rec['name'] for rec in items])) + self.assertEqual(len(items), 3) + self.assertEqual(['a', 'b', 'c'], + sorted([rec['name'] for rec in items])) def test_merge_items_overwrite_unicode(self): # test DatabaseBroker.merge_items @@ -1260,13 +1260,13 @@ class TestContainerBroker(unittest.TestCase): broker2.merge_items(json.loads(json.dumps(broker1.get_items_since( broker2.get_sync(id), 1000))), id) items = broker2.get_items_since(-1, 1000) - self.assertEquals(['b', snowman], - sorted([rec['name'] for rec in items])) + self.assertEqual(['b', snowman], + sorted([rec['name'] for rec in items])) for rec in items: if rec['name'] == snowman: - self.assertEquals(rec['created_at'], Timestamp(4).internal) + self.assertEqual(rec['created_at'], Timestamp(4).internal) if rec['name'] == 'b': - self.assertEquals(rec['created_at'], Timestamp(3).internal) + self.assertEqual(rec['created_at'], Timestamp(3).internal) def test_merge_items_overwrite(self): # test DatabaseBroker.merge_items @@ -1286,12 +1286,12 @@ class TestContainerBroker(unittest.TestCase): broker2.merge_items(broker1.get_items_since( broker2.get_sync(id), 1000), id) items = broker2.get_items_since(-1, 1000) - self.assertEquals(['a', 'b'], sorted([rec['name'] for rec in items])) + self.assertEqual(['a', 'b'], sorted([rec['name'] for rec in items])) for rec in items: if rec['name'] == 'a': - self.assertEquals(rec['created_at'], Timestamp(4).internal) + self.assertEqual(rec['created_at'], Timestamp(4).internal) if rec['name'] == 'b': - self.assertEquals(rec['created_at'], Timestamp(3).internal) + self.assertEqual(rec['created_at'], Timestamp(3).internal) def test_merge_items_post_overwrite_out_of_order(self): # test DatabaseBroker.merge_items @@ -1311,32 +1311,32 @@ class TestContainerBroker(unittest.TestCase): broker2.merge_items(broker1.get_items_since( broker2.get_sync(id), 1000), id) items = broker2.get_items_since(-1, 1000) - self.assertEquals(['a', 'b'], sorted([rec['name'] for rec in items])) + self.assertEqual(['a', 'b'], sorted([rec['name'] for rec in items])) for rec in items: if rec['name'] == 'a': - self.assertEquals(rec['created_at'], Timestamp(4).internal) + self.assertEqual(rec['created_at'], Timestamp(4).internal) if rec['name'] == 'b': - self.assertEquals(rec['created_at'], Timestamp(3).internal) - self.assertEquals(rec['content_type'], 'text/plain') + self.assertEqual(rec['created_at'], Timestamp(3).internal) + self.assertEqual(rec['content_type'], 'text/plain') items = broker2.get_items_since(-1, 1000) - self.assertEquals(['a', 'b'], sorted([rec['name'] for rec in items])) + self.assertEqual(['a', 'b'], sorted([rec['name'] for rec in items])) for rec in items: if rec['name'] == 'a': - self.assertEquals(rec['created_at'], Timestamp(4).internal) + self.assertEqual(rec['created_at'], Timestamp(4).internal) if rec['name'] == 'b': - self.assertEquals(rec['created_at'], Timestamp(3).internal) + self.assertEqual(rec['created_at'], Timestamp(3).internal) broker1.put_object('b', Timestamp(5).internal, 0, 'text/plain', 'd41d8cd98f00b204e9800998ecf8427e') broker2.merge_items(broker1.get_items_since( broker2.get_sync(id), 1000), id) items = broker2.get_items_since(-1, 1000) - self.assertEquals(['a', 'b'], sorted([rec['name'] for rec in items])) + self.assertEqual(['a', 'b'], sorted([rec['name'] for rec in items])) for rec in items: if rec['name'] == 'a': - self.assertEquals(rec['created_at'], Timestamp(4).internal) + self.assertEqual(rec['created_at'], Timestamp(4).internal) if rec['name'] == 'b': - self.assertEquals(rec['created_at'], Timestamp(5).internal) - self.assertEquals(rec['content_type'], 'text/plain') + self.assertEqual(rec['created_at'], Timestamp(5).internal) + self.assertEqual(rec['content_type'], 'text/plain') def test_set_storage_policy_index(self): ts = (Timestamp(t).internal for t in @@ -1407,9 +1407,9 @@ class TestContainerBroker(unittest.TestCase): broker = ContainerBroker(':memory:', account='test_account', container='test_container') broker.initialize(Timestamp('1').internal, 0) - self.assertEquals(-1, broker.get_reconciler_sync()) + self.assertEqual(-1, broker.get_reconciler_sync()) broker.update_reconciler_sync(10) - self.assertEquals(10, broker.get_reconciler_sync()) + self.assertEqual(10, broker.get_reconciler_sync()) @with_tempdir def test_legacy_pending_files(self, tempdir): @@ -1572,7 +1572,7 @@ class TestContainerBrokerBeforeMetadata(ContainerBrokerMigrationMixin, conn.execute('SELECT metadata FROM container_stat') except BaseException as err: exc = err - self.assert_('no such column: metadata' in str(exc)) + self.assertTrue('no such column: metadata' in str(exc)) def tearDown(self): super(TestContainerBrokerBeforeMetadata, self).tearDown() @@ -1647,7 +1647,7 @@ class TestContainerBrokerBeforeXSync(ContainerBrokerMigrationMixin, FROM container_stat''') except BaseException as err: exc = err - self.assert_('no such column: x_container_sync_point1' in str(exc)) + self.assertTrue('no such column: x_container_sync_point1' in str(exc)) def tearDown(self): super(TestContainerBrokerBeforeXSync, self).tearDown() @@ -1762,7 +1762,7 @@ class TestContainerBrokerBeforeSPI(ContainerBrokerMigrationMixin, FROM container_stat''') except BaseException as err: exc = err - self.assert_('no such column: storage_policy_index' in str(exc)) + self.assertTrue('no such column: storage_policy_index' in str(exc)) def tearDown(self): super(TestContainerBrokerBeforeSPI, self).tearDown() @@ -1787,8 +1787,8 @@ class TestContainerBrokerBeforeSPI(ContainerBrokerMigrationMixin, ''').fetchone()[0] except sqlite3.OperationalError as err: # confirm that the table doesn't have this column - self.assert_('no such column: storage_policy_index' in - str(err)) + self.assertTrue('no such column: storage_policy_index' in + str(err)) else: self.fail('broker did not raise sqlite3.OperationalError ' 'trying to select from storage_policy_index ' @@ -1833,7 +1833,8 @@ class TestContainerBrokerBeforeSPI(ContainerBrokerMigrationMixin, self.assertEqual(info[k], v, 'The value for %s was %r not %r' % ( k, info[k], v)) - self.assert_(Timestamp(info['created_at']) > Timestamp(put_timestamp)) + self.assertTrue( + Timestamp(info['created_at']) > Timestamp(put_timestamp)) self.assertNotEqual(int(info['hash'], 16), 0) orig_hash = info['hash'] # get_replication_info @@ -1842,7 +1843,8 @@ class TestContainerBrokerBeforeSPI(ContainerBrokerMigrationMixin, expected['count'] = expected.pop('object_count') for k, v in expected.items(): self.assertEqual(info[k], v) - self.assert_(Timestamp(info['created_at']) > Timestamp(put_timestamp)) + self.assertTrue( + Timestamp(info['created_at']) > Timestamp(put_timestamp)) self.assertEqual(info['hash'], orig_hash) self.assertEqual(info['max_row'], 1) self.assertEqual(info['metadata'], '') @@ -1866,8 +1868,8 @@ class TestContainerBrokerBeforeSPI(ContainerBrokerMigrationMixin, ''').fetchone()[0] except sqlite3.OperationalError as err: # confirm that the table doesn't have this column - self.assert_('no such column: storage_policy_index' in - str(err)) + self.assertTrue('no such column: storage_policy_index' in + str(err)) else: self.fail('broker did not raise sqlite3.OperationalError ' 'trying to select from storage_policy_index ' @@ -1881,8 +1883,8 @@ class TestContainerBrokerBeforeSPI(ContainerBrokerMigrationMixin, ''').fetchone()[0] except sqlite3.OperationalError as err: # confirm that the table doesn't have this column - self.assert_('no such column: storage_policy_index' in - str(err)) + self.assertTrue('no such column: storage_policy_index' in + str(err)) else: self.fail('broker did not raise sqlite3.OperationalError ' 'trying to select from storage_policy_index ' @@ -1896,7 +1898,7 @@ class TestContainerBrokerBeforeSPI(ContainerBrokerMigrationMixin, ''').fetchone()[0] except sqlite3.OperationalError as err: # confirm that the table does not exist yet - self.assert_('no such table: policy_stat' in str(err)) + self.assertTrue('no such table: policy_stat' in str(err)) else: self.fail('broker did not raise sqlite3.OperationalError ' 'trying to select from storage_policy_index ' diff --git a/test/unit/container/test_reconciler.py b/test/unit/container/test_reconciler.py index 9466fbcb94..1b41227608 100644 --- a/test/unit/container/test_reconciler.py +++ b/test/unit/container/test_reconciler.py @@ -207,8 +207,8 @@ class TestReconcilerUtils(unittest.TestCase): self.assertEqual(got['account'], 'AUTH_bob') self.assertEqual(got['container'], 'con') self.assertEqual(got['obj'], 'obj') - self.assertEqual(got['q_ts'], 0000001234.20190) - self.assertEqual(got['q_record'], 0000001234.20192) + self.assertEqual(got['q_ts'], 1234.20190) + self.assertEqual(got['q_record'], 1234.20192) self.assertEqual(got['q_op'], 'PUT') # negative test @@ -606,9 +606,9 @@ class TestReconcilerUtils(unittest.TestCase): self.assertEqual(args['headers']['X-Content-Type'], 'application/x-delete') for header in required_headers: - self.assert_(header in args['headers'], - '%r was missing request headers %r' % ( - header, args['headers'])) + self.assertTrue(header in args['headers'], + '%r was missing request headers %r' % ( + header, args['headers'])) def test_add_to_reconciler_queue_force(self): mock_path = 'swift.common.direct_client.http_connect' @@ -646,9 +646,9 @@ class TestReconcilerUtils(unittest.TestCase): self.assertEqual(args['path'], '/.misplaced_objects/5947200/17:/a/c/o') for header in required_headers: - self.assert_(header in args['headers'], - '%r was missing request headers %r' % ( - header, args['headers'])) + self.assertTrue(header in args['headers'], + '%r was missing request headers %r' % ( + header, args['headers'])) def test_add_to_reconciler_queue_fails(self): mock_path = 'swift.common.direct_client.http_connect' diff --git a/test/unit/container/test_replicator.py b/test/unit/container/test_replicator.py index 86a8880737..4980bef2ed 100644 --- a/test/unit/container/test_replicator.py +++ b/test/unit/container/test_replicator.py @@ -132,9 +132,9 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync): debug_lines)) for metric in expected_timings: expected = 'replicator-rpc-sync time for %s:' % metric - self.assert_(any(expected in line for line in debug_lines), - 'debug timing %r was not in %r' % ( - expected, debug_lines)) + self.assertTrue(any(expected in line for line in debug_lines), + 'debug timing %r was not in %r' % ( + expected, debug_lines)) def test_sync_remote_missing(self): broker = self._get_broker('a', 'c', node_index=0) @@ -358,7 +358,7 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync): name, ts, size, content_type, etag = item remote_names.add(name) self.assertEqual(content_type, 'content-type-new') - self.assert_('o101' in remote_names) + self.assertTrue('o101' in remote_names) self.assertEqual(len(remote_names), 101) self.assertEqual(remote_broker.get_info()['object_count'], 101) @@ -384,18 +384,18 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync): self.assertTrue(remote_broker.is_deleted()) info = broker.get_info() remote_info = remote_broker.get_info() - self.assert_(Timestamp(remote_info['status_changed_at']) > - Timestamp(remote_info['put_timestamp']), - 'remote status_changed_at (%s) is not ' - 'greater than put_timestamp (%s)' % ( - remote_info['status_changed_at'], - remote_info['put_timestamp'])) - self.assert_(Timestamp(remote_info['status_changed_at']) > - Timestamp(info['status_changed_at']), - 'remote status_changed_at (%s) is not ' - 'greater than local status_changed_at (%s)' % ( - remote_info['status_changed_at'], - info['status_changed_at'])) + self.assertTrue(Timestamp(remote_info['status_changed_at']) > + Timestamp(remote_info['put_timestamp']), + 'remote status_changed_at (%s) is not ' + 'greater than put_timestamp (%s)' % ( + remote_info['status_changed_at'], + remote_info['put_timestamp'])) + self.assertTrue(Timestamp(remote_info['status_changed_at']) > + Timestamp(info['status_changed_at']), + 'remote status_changed_at (%s) is not ' + 'greater than local status_changed_at (%s)' % ( + remote_info['status_changed_at'], + info['status_changed_at'])) @contextmanager def _wrap_merge_timestamps(self, broker, calls): @@ -851,7 +851,7 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync): part, node = self._get_broker_part_node(broker) daemon = self._run_once(node) # push to remote, and third node was missing (also maybe reconciler) - self.assert_(2 < daemon.stats['rsync'] <= 3) + self.assertTrue(2 < daemon.stats['rsync'] <= 3) # grab the rsynced instance of remote_broker remote_broker = self._get_broker('a', 'c', node_index=1) diff --git a/test/unit/container/test_server.py b/test/unit/container/test_server.py index c62ccc6464..820bdebcc0 100644 --- a/test/unit/container/test_server.py +++ b/test/unit/container/test_server.py @@ -20,7 +20,6 @@ import unittest import itertools from contextlib import contextmanager from shutil import rmtree -from StringIO import StringIO from tempfile import mkdtemp from test.unit import FakeLogger from time import gmtime @@ -30,6 +29,8 @@ import random from eventlet import spawn, Timeout, listen import simplejson +from six import BytesIO +from six import StringIO from swift import __version__ as swift_version from swift.common.swob import Request, HeaderKeyDict @@ -69,7 +70,7 @@ class TestContainerController(unittest.TestCase): self.controller = container_server.ContainerController( {'devices': self.testdir, 'mount_check': 'false'}) # some of the policy tests want at least two policies - self.assert_(len(POLICIES) > 1) + self.assertTrue(len(POLICIES) > 1) def tearDown(self): rmtree(os.path.dirname(self.testdir), ignore_errors=1) @@ -105,7 +106,7 @@ class TestContainerController(unittest.TestCase): }) resp = req.get_response(self.controller) self.assertEqual(400, resp.status_int) - self.assert_('invalid' in resp.body.lower()) + self.assertTrue('invalid' in resp.body.lower()) # good policies for policy in POLICIES: @@ -123,53 +124,53 @@ class TestContainerController(unittest.TestCase): '/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': '0'}) resp = req.get_response(self.controller) - self.assert_(resp.status.startswith('201')) + self.assertTrue(resp.status.startswith('201')) req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'}) response = req.get_response(self.controller) - self.assert_(response.status.startswith('204')) - self.assert_('x-container-read' not in response.headers) - self.assert_('x-container-write' not in response.headers) + self.assertTrue(response.status.startswith('204')) + self.assertTrue('x-container-read' not in response.headers) + self.assertTrue('x-container-write' not in response.headers) # Ensure POSTing acls works req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': '1', 'X-Container-Read': '.r:*', 'X-Container-Write': 'account:user'}) resp = req.get_response(self.controller) - self.assert_(resp.status.startswith('204')) + self.assertTrue(resp.status.startswith('204')) req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'}) response = req.get_response(self.controller) - self.assert_(response.status.startswith('204')) - self.assertEquals(response.headers.get('x-container-read'), '.r:*') - self.assertEquals(response.headers.get('x-container-write'), - 'account:user') + self.assertTrue(response.status.startswith('204')) + self.assertEqual(response.headers.get('x-container-read'), '.r:*') + self.assertEqual(response.headers.get('x-container-write'), + 'account:user') # Ensure we can clear acls on POST req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': '3', 'X-Container-Read': '', 'X-Container-Write': ''}) resp = req.get_response(self.controller) - self.assert_(resp.status.startswith('204')) + self.assertTrue(resp.status.startswith('204')) req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'}) response = req.get_response(self.controller) - self.assert_(response.status.startswith('204')) - self.assert_('x-container-read' not in response.headers) - self.assert_('x-container-write' not in response.headers) + self.assertTrue(response.status.startswith('204')) + self.assertTrue('x-container-read' not in response.headers) + self.assertTrue('x-container-write' not in response.headers) # Ensure PUTing acls works req = Request.blank( '/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': '4', 'X-Container-Read': '.r:*', 'X-Container-Write': 'account:user'}) resp = req.get_response(self.controller) - self.assert_(resp.status.startswith('201')) + self.assertTrue(resp.status.startswith('201')) req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'HEAD'}) response = req.get_response(self.controller) - self.assert_(response.status.startswith('204')) - self.assertEquals(response.headers.get('x-container-read'), '.r:*') - self.assertEquals(response.headers.get('x-container-write'), - 'account:user') + self.assertTrue(response.status.startswith('204')) + self.assertEqual(response.headers.get('x-container-read'), '.r:*') + self.assertEqual(response.headers.get('x-container-write'), + 'account:user') def test_HEAD(self): start = int(time.time()) @@ -201,7 +202,7 @@ class TestContainerController(unittest.TestCase): created_at_header = Timestamp(response.headers['x-timestamp']) self.assertEqual(response.headers['x-timestamp'], created_at_header.normal) - self.assert_(created_at_header >= start) + self.assertTrue(created_at_header >= start) self.assertEqual(response.headers['x-put-timestamp'], Timestamp(start).normal) @@ -209,7 +210,7 @@ class TestContainerController(unittest.TestCase): self.assertEqual(int(response.headers ['X-Backend-Storage-Policy-Index']), int(POLICIES.default)) - self.assert_( + self.assertTrue( Timestamp(response.headers['x-backend-timestamp']) >= start) self.assertEqual(response.headers['x-backend-put-timestamp'], Timestamp(start).internal) @@ -259,8 +260,8 @@ class TestContainerController(unittest.TestCase): self.assertEqual(int(resp.headers[ 'X-Backend-Storage-Policy-Index']), int(POLICIES.default)) - self.assert_(Timestamp(resp.headers['x-backend-timestamp']) >= - Timestamp(request_method_times['PUT'])) + self.assertTrue(Timestamp(resp.headers['x-backend-timestamp']) >= + Timestamp(request_method_times['PUT'])) self.assertEqual(resp.headers['x-backend-put-timestamp'], request_method_times['PUT']) self.assertEqual(resp.headers['x-backend-delete-timestamp'], @@ -276,7 +277,7 @@ class TestContainerController(unittest.TestCase): req = Request.blank('/sda1/./a/c', environ={'REQUEST_METHOD': 'HEAD', 'HTTP_X_TIMESTAMP': '1'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) def test_HEAD_insufficient_storage(self): self.controller = container_server.ContainerController( @@ -285,14 +286,14 @@ class TestContainerController(unittest.TestCase): '/sda-null/p/a/c', environ={'REQUEST_METHOD': 'HEAD', 'HTTP_X_TIMESTAMP': '1'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 507) + self.assertEqual(resp.status_int, 507) def test_HEAD_invalid_content_type(self): req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'}, headers={'Accept': 'application/plain'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 406) + self.assertEqual(resp.status_int, 406) def test_HEAD_invalid_format(self): format = '%D1%BD%8A9' # invalid UTF-8; should be %E1%BD%8A9 (E -> D) @@ -300,7 +301,7 @@ class TestContainerController(unittest.TestCase): '/sda1/p/a/c?format=' + format, environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) def test_OPTIONS(self): server_handler = container_server.ContainerController( @@ -308,25 +309,25 @@ class TestContainerController(unittest.TestCase): req = Request.blank('/sda1/p/a/c/o', {'REQUEST_METHOD': 'OPTIONS'}) req.content_length = 0 resp = server_handler.OPTIONS(req) - self.assertEquals(200, resp.status_int) + self.assertEqual(200, resp.status_int) for verb in 'OPTIONS GET POST PUT DELETE HEAD REPLICATE'.split(): self.assertTrue( verb in resp.headers['Allow'].split(', ')) - self.assertEquals(len(resp.headers['Allow'].split(', ')), 7) - self.assertEquals(resp.headers['Server'], - (self.controller.server_type + '/' + swift_version)) + self.assertEqual(len(resp.headers['Allow'].split(', ')), 7) + self.assertEqual(resp.headers['Server'], + (self.controller.server_type + '/' + swift_version)) def test_PUT(self): req = Request.blank( - '/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', - 'HTTP_X_TIMESTAMP': '1'}) + '/sda1/p/a/c', + environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank( - '/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', - 'HTTP_X_TIMESTAMP': '2'}) + '/sda1/p/a/c', + environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '2'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 202) + self.assertEqual(resp.status_int, 202) def test_PUT_simulated_create_race(self): state = ['initial'] @@ -358,14 +359,14 @@ class TestContainerController(unittest.TestCase): with mock.patch("swift.container.server.ContainerBroker", InterceptedCoBr): req = Request.blank( - '/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', - 'HTTP_X_TIMESTAMP': '1'}) + '/sda1/p/a/c', + environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1'}) resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 201) state[0] = "race" req = Request.blank( - '/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', - 'HTTP_X_TIMESTAMP': '1'}) + '/sda1/p/a/c', + environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1'}) resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 202) @@ -375,7 +376,7 @@ class TestContainerController(unittest.TestCase): headers={'X-Timestamp': '1', 'X-Size': '0', 'X-Content-Type': 'text/plain', 'X-ETag': 'e'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) def test_PUT_good_policy_specified(self): policy = random.choice(list(POLICIES)) @@ -385,30 +386,30 @@ class TestContainerController(unittest.TestCase): 'X-Backend-Storage-Policy-Index': policy.idx}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers.get('X-Backend-Storage-Policy-Index'), - str(policy.idx)) + self.assertEqual(resp.status_int, 201) + self.assertEqual(resp.headers.get('X-Backend-Storage-Policy-Index'), + str(policy.idx)) # now make sure we read it back req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) - self.assertEquals(resp.headers.get('X-Backend-Storage-Policy-Index'), - str(policy.idx)) + self.assertEqual(resp.headers.get('X-Backend-Storage-Policy-Index'), + str(policy.idx)) def test_PUT_no_policy_specified(self): # Set metadata header req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': Timestamp(1).internal}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers.get('X-Backend-Storage-Policy-Index'), - str(POLICIES.default.idx)) + self.assertEqual(resp.status_int, 201) + self.assertEqual(resp.headers.get('X-Backend-Storage-Policy-Index'), + str(POLICIES.default.idx)) # now make sure the default was used (pol 1) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) - self.assertEquals(resp.headers.get('X-Backend-Storage-Policy-Index'), - str(POLICIES.default.idx)) + self.assertEqual(resp.headers.get('X-Backend-Storage-Policy-Index'), + str(POLICIES.default.idx)) def test_PUT_bad_policy_specified(self): # Set metadata header @@ -417,7 +418,7 @@ class TestContainerController(unittest.TestCase): 'X-Backend-Storage-Policy-Index': 'nada'}) resp = req.get_response(self.controller) # make sure we get bad response - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) self.assertFalse('X-Backend-Storage-Policy-Index' in resp.headers) def test_PUT_no_policy_change(self): @@ -428,13 +429,13 @@ class TestContainerController(unittest.TestCase): 'X-Timestamp': next(ts), 'X-Backend-Storage-Policy-Index': policy.idx}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank('/sda1/p/a/c') resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) # make sure we get the right index back - self.assertEquals(resp.headers.get('X-Backend-Storage-Policy-Index'), - str(policy.idx)) + self.assertEqual(resp.headers.get('X-Backend-Storage-Policy-Index'), + str(policy.idx)) # now try to update w/o changing the policy for method in ('POST', 'PUT'): @@ -443,13 +444,13 @@ class TestContainerController(unittest.TestCase): 'X-Backend-Storage-Policy-Index': policy.idx }) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int // 100, 2) + self.assertEqual(resp.status_int // 100, 2) # make sure we get the right index back req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) - self.assertEquals(resp.headers.get('X-Backend-Storage-Policy-Index'), - str(policy.idx)) + self.assertEqual(resp.status_int, 204) + self.assertEqual(resp.headers.get('X-Backend-Storage-Policy-Index'), + str(policy.idx)) def test_PUT_bad_policy_change(self): ts = (Timestamp(t).internal for t in itertools.count(time.time())) @@ -459,13 +460,13 @@ class TestContainerController(unittest.TestCase): 'X-Timestamp': next(ts), 'X-Backend-Storage-Policy-Index': policy.idx}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank('/sda1/p/a/c') resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) # make sure we get the right index back - self.assertEquals(resp.headers.get('X-Backend-Storage-Policy-Index'), - str(policy.idx)) + self.assertEqual(resp.headers.get('X-Backend-Storage-Policy-Index'), + str(policy.idx)) other_policies = [p for p in POLICIES if p != policy] for other_policy in other_policies: @@ -475,18 +476,18 @@ class TestContainerController(unittest.TestCase): 'X-Backend-Storage-Policy-Index': other_policy.idx }) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 409) - self.assertEquals( + self.assertEqual(resp.status_int, 409) + self.assertEqual( resp.headers.get('X-Backend-Storage-Policy-Index'), str(policy.idx)) # and make sure there is no change! req = Request.blank('/sda1/p/a/c') resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) # make sure we get the right index back - self.assertEquals(resp.headers.get('X-Backend-Storage-Policy-Index'), - str(policy.idx)) + self.assertEqual(resp.headers.get('X-Backend-Storage-Policy-Index'), + str(policy.idx)) def test_POST_ignores_policy_change(self): ts = (Timestamp(t).internal for t in itertools.count(time.time())) @@ -495,13 +496,13 @@ class TestContainerController(unittest.TestCase): 'X-Timestamp': next(ts), 'X-Backend-Storage-Policy-Index': policy.idx}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank('/sda1/p/a/c') resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) # make sure we get the right index back - self.assertEquals(resp.headers.get('X-Backend-Storage-Policy-Index'), - str(policy.idx)) + self.assertEqual(resp.headers.get('X-Backend-Storage-Policy-Index'), + str(policy.idx)) other_policies = [p for p in POLICIES if p != policy] for other_policy in other_policies: @@ -512,16 +513,16 @@ class TestContainerController(unittest.TestCase): }) resp = req.get_response(self.controller) # valid request - self.assertEquals(resp.status_int // 100, 2) + self.assertEqual(resp.status_int // 100, 2) # but it does nothing req = Request.blank('/sda1/p/a/c') resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) # make sure we get the right index back - self.assertEquals(resp.headers.get - ('X-Backend-Storage-Policy-Index'), - str(policy.idx)) + self.assertEqual(resp.headers.get + ('X-Backend-Storage-Policy-Index'), + str(policy.idx)) def test_PUT_no_policy_for_existing_default(self): ts = (Timestamp(t).internal for t in @@ -630,58 +631,58 @@ class TestContainerController(unittest.TestCase): headers={'X-Timestamp': Timestamp(1).internal, 'X-Container-Meta-Test': 'Value'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) - self.assertEquals(resp.headers.get('x-container-meta-test'), 'Value') + self.assertEqual(resp.status_int, 204) + self.assertEqual(resp.headers.get('x-container-meta-test'), 'Value') # Set another metadata header, ensuring old one doesn't disappear req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': Timestamp(1).internal, 'X-Container-Meta-Test2': 'Value2'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) - self.assertEquals(resp.headers.get('x-container-meta-test'), 'Value') - self.assertEquals(resp.headers.get('x-container-meta-test2'), 'Value2') + self.assertEqual(resp.status_int, 204) + self.assertEqual(resp.headers.get('x-container-meta-test'), 'Value') + self.assertEqual(resp.headers.get('x-container-meta-test2'), 'Value2') # Update metadata header req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': Timestamp(3).internal, 'X-Container-Meta-Test': 'New Value'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 202) + self.assertEqual(resp.status_int, 202) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) - self.assertEquals(resp.headers.get('x-container-meta-test'), - 'New Value') + self.assertEqual(resp.status_int, 204) + self.assertEqual(resp.headers.get('x-container-meta-test'), + 'New Value') # Send old update to metadata header req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': Timestamp(2).internal, 'X-Container-Meta-Test': 'Old Value'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 202) + self.assertEqual(resp.status_int, 202) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) - self.assertEquals(resp.headers.get('x-container-meta-test'), - 'New Value') + self.assertEqual(resp.status_int, 204) + self.assertEqual(resp.headers.get('x-container-meta-test'), + 'New Value') # Remove metadata header (by setting it to empty) req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': Timestamp(4).internal, 'X-Container-Meta-Test': ''}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 202) + self.assertEqual(resp.status_int, 202) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) - self.assert_('x-container-meta-test' not in resp.headers) + self.assertEqual(resp.status_int, 204) + self.assertTrue('x-container-meta-test' not in resp.headers) def test_PUT_GET_sys_metadata(self): prefix = get_sys_meta_prefix('container') @@ -692,60 +693,60 @@ class TestContainerController(unittest.TestCase): headers={'X-Timestamp': Timestamp(1).internal, key: 'Value'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) - self.assertEquals(resp.headers.get(key.lower()), 'Value') + self.assertEqual(resp.status_int, 204) + self.assertEqual(resp.headers.get(key.lower()), 'Value') # Set another metadata header, ensuring old one doesn't disappear req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': Timestamp(1).internal, key2: 'Value2'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) - self.assertEquals(resp.headers.get(key.lower()), 'Value') - self.assertEquals(resp.headers.get(key2.lower()), 'Value2') + self.assertEqual(resp.status_int, 204) + self.assertEqual(resp.headers.get(key.lower()), 'Value') + self.assertEqual(resp.headers.get(key2.lower()), 'Value2') # Update metadata header req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': Timestamp(3).internal, key: 'New Value'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 202) + self.assertEqual(resp.status_int, 202) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) - self.assertEquals(resp.headers.get(key.lower()), - 'New Value') + self.assertEqual(resp.status_int, 204) + self.assertEqual(resp.headers.get(key.lower()), + 'New Value') # Send old update to metadata header req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': Timestamp(2).internal, key: 'Old Value'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 202) + self.assertEqual(resp.status_int, 202) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) - self.assertEquals(resp.headers.get(key.lower()), - 'New Value') + self.assertEqual(resp.status_int, 204) + self.assertEqual(resp.headers.get(key.lower()), + 'New Value') # Remove metadata header (by setting it to empty) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': Timestamp(4).internal, key: ''}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 202) + self.assertEqual(resp.status_int, 202) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) - self.assert_(key.lower() not in resp.headers) + self.assertEqual(resp.status_int, 204) + self.assertTrue(key.lower() not in resp.headers) def test_PUT_invalid_partition(self): req = Request.blank('/sda1/./a/c', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) def test_PUT_timestamp_not_float(self): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', @@ -754,7 +755,7 @@ class TestContainerController(unittest.TestCase): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': 'not-float'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) def test_PUT_insufficient_storage(self): self.controller = container_server.ContainerController( @@ -763,60 +764,60 @@ class TestContainerController(unittest.TestCase): '/sda-null/p/a/c', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 507) + self.assertEqual(resp.status_int, 507) def test_POST_HEAD_metadata(self): req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': Timestamp(1).internal}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) # Set metadata header req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': Timestamp(1).internal, 'X-Container-Meta-Test': 'Value'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) - self.assertEquals(resp.headers.get('x-container-meta-test'), 'Value') + self.assertEqual(resp.status_int, 204) + self.assertEqual(resp.headers.get('x-container-meta-test'), 'Value') # Update metadata header req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': Timestamp(3).internal, 'X-Container-Meta-Test': 'New Value'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) - self.assertEquals(resp.headers.get('x-container-meta-test'), - 'New Value') + self.assertEqual(resp.status_int, 204) + self.assertEqual(resp.headers.get('x-container-meta-test'), + 'New Value') # Send old update to metadata header req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': Timestamp(2).internal, 'X-Container-Meta-Test': 'Old Value'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) - self.assertEquals(resp.headers.get('x-container-meta-test'), - 'New Value') + self.assertEqual(resp.status_int, 204) + self.assertEqual(resp.headers.get('x-container-meta-test'), + 'New Value') # Remove metadata header (by setting it to empty) req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': Timestamp(4).internal, 'X-Container-Meta-Test': ''}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) - self.assert_('x-container-meta-test' not in resp.headers) + self.assertEqual(resp.status_int, 204) + self.assertTrue('x-container-meta-test' not in resp.headers) def test_POST_HEAD_sys_metadata(self): prefix = get_sys_meta_prefix('container') @@ -824,55 +825,55 @@ class TestContainerController(unittest.TestCase): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': Timestamp(1).internal}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) # Set metadata header req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': Timestamp(1).internal, key: 'Value'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) - self.assertEquals(resp.headers.get(key.lower()), 'Value') + self.assertEqual(resp.status_int, 204) + self.assertEqual(resp.headers.get(key.lower()), 'Value') # Update metadata header req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': Timestamp(3).internal, key: 'New Value'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) - self.assertEquals(resp.headers.get(key.lower()), - 'New Value') + self.assertEqual(resp.status_int, 204) + self.assertEqual(resp.headers.get(key.lower()), + 'New Value') # Send old update to metadata header req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': Timestamp(2).internal, key: 'Old Value'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) - self.assertEquals(resp.headers.get(key.lower()), - 'New Value') + self.assertEqual(resp.status_int, 204) + self.assertEqual(resp.headers.get(key.lower()), + 'New Value') # Remove metadata header (by setting it to empty) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': Timestamp(4).internal, key: ''}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) - self.assert_(key.lower() not in resp.headers) + self.assertEqual(resp.status_int, 204) + self.assertTrue(key.lower() not in resp.headers) def test_POST_invalid_partition(self): req = Request.blank('/sda1/./a/c', environ={'REQUEST_METHOD': 'POST', 'HTTP_X_TIMESTAMP': '1'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) def test_POST_timestamp_not_float(self): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', @@ -881,7 +882,7 @@ class TestContainerController(unittest.TestCase): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': 'not-float'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) def test_POST_insufficient_storage(self): self.controller = container_server.ContainerController( @@ -890,7 +891,7 @@ class TestContainerController(unittest.TestCase): '/sda-null/p/a/c', environ={'REQUEST_METHOD': 'POST', 'HTTP_X_TIMESTAMP': '1'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 507) + self.assertEqual(resp.status_int, 507) def test_POST_invalid_container_sync_to(self): self.controller = container_server.ContainerController( @@ -900,7 +901,7 @@ class TestContainerController(unittest.TestCase): 'HTTP_X_TIMESTAMP': '1'}, headers={'x-container-sync-to': '192.168.0.1'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) def test_POST_after_DELETE_not_found(self): req = Request.blank('/sda1/p/a/c', @@ -915,7 +916,7 @@ class TestContainerController(unittest.TestCase): environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': '3'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) def test_DELETE_obj_not_found(self): req = Request.blank( @@ -923,27 +924,27 @@ class TestContainerController(unittest.TestCase): environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': '1'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) def test_DELETE_container_not_found(self): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE', 'HTTP_X_TIMESTAMP': '1'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) def test_PUT_utf8(self): snowman = u'\u2603' container_name = snowman.encode('utf-8') req = Request.blank( - '/sda1/p/a/%s' % container_name, environ={ - 'REQUEST_METHOD': 'PUT', - 'HTTP_X_TIMESTAMP': '1'}) + '/sda1/p/a/%s' % container_name, + environ={'REQUEST_METHOD': 'PUT', + 'HTTP_X_TIMESTAMP': '1'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) def test_account_update_mismatched_host_device(self): req = Request.blank( @@ -956,7 +957,7 @@ class TestContainerController(unittest.TestCase): 'X-Account-Device': 'sda1,sda2'}) broker = self.controller._get_container_broker('sda1', 'p', 'a', 'c') resp = self.controller.account_update(req, 'a', 'c', broker) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) def test_account_update_account_override_deleted(self): bindsock = listen(('127.0.0.1', 0)) @@ -974,7 +975,7 @@ class TestContainerController(unittest.TestCase): new_connect = fake_http_connect(200, count=123) swift.container.server.http_connect = new_connect resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) def test_PUT_account_update(self): bindsock = listen(('127.0.0.1', 0)) @@ -988,16 +989,16 @@ class TestContainerController(unittest.TestCase): out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' % return_code) out.flush() - self.assertEquals(inc.readline(), - 'PUT /sda1/123/a/c HTTP/1.1\r\n') + self.assertEqual(inc.readline(), + 'PUT /sda1/123/a/c HTTP/1.1\r\n') headers = {} line = inc.readline() while line and line != '\r\n': headers[line.split(':')[0].lower()] = \ line.split(':')[1].strip() line = inc.readline() - self.assertEquals(headers['x-put-timestamp'], - expected_timestamp) + self.assertEqual(headers['x-put-timestamp'], + expected_timestamp) except BaseException as err: return err return None @@ -1013,7 +1014,7 @@ class TestContainerController(unittest.TestCase): try: with Timeout(3): resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) finally: err = event.wait() if err: @@ -1023,7 +1024,7 @@ class TestContainerController(unittest.TestCase): environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': '2'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, @@ -1035,7 +1036,7 @@ class TestContainerController(unittest.TestCase): try: with Timeout(3): resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) finally: err = event.wait() if err: @@ -1058,7 +1059,7 @@ class TestContainerController(unittest.TestCase): err = event.wait() if err: raise Exception(err) - self.assert_(not got_exc) + self.assertTrue(not got_exc) def test_PUT_reset_container_sync(self): req = Request.blank( @@ -1066,37 +1067,37 @@ class TestContainerController(unittest.TestCase): headers={'x-timestamp': '1', 'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) db = self.controller._get_container_broker('sda1', 'p', 'a', 'c') info = db.get_info() - self.assertEquals(info['x_container_sync_point1'], -1) - self.assertEquals(info['x_container_sync_point2'], -1) + self.assertEqual(info['x_container_sync_point1'], -1) + self.assertEqual(info['x_container_sync_point2'], -1) db.set_x_container_sync_points(123, 456) info = db.get_info() - self.assertEquals(info['x_container_sync_point1'], 123) - self.assertEquals(info['x_container_sync_point2'], 456) + self.assertEqual(info['x_container_sync_point1'], 123) + self.assertEqual(info['x_container_sync_point2'], 456) # Set to same value req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'x-timestamp': '1', 'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 202) + self.assertEqual(resp.status_int, 202) db = self.controller._get_container_broker('sda1', 'p', 'a', 'c') info = db.get_info() - self.assertEquals(info['x_container_sync_point1'], 123) - self.assertEquals(info['x_container_sync_point2'], 456) + self.assertEqual(info['x_container_sync_point1'], 123) + self.assertEqual(info['x_container_sync_point2'], 456) # Set to new value req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'x-timestamp': '1', 'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c2'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 202) + self.assertEqual(resp.status_int, 202) db = self.controller._get_container_broker('sda1', 'p', 'a', 'c') info = db.get_info() - self.assertEquals(info['x_container_sync_point1'], -1) - self.assertEquals(info['x_container_sync_point2'], -1) + self.assertEqual(info['x_container_sync_point1'], -1) + self.assertEqual(info['x_container_sync_point2'], -1) def test_POST_reset_container_sync(self): req = Request.blank( @@ -1104,68 +1105,68 @@ class TestContainerController(unittest.TestCase): headers={'x-timestamp': '1', 'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) db = self.controller._get_container_broker('sda1', 'p', 'a', 'c') info = db.get_info() - self.assertEquals(info['x_container_sync_point1'], -1) - self.assertEquals(info['x_container_sync_point2'], -1) + self.assertEqual(info['x_container_sync_point1'], -1) + self.assertEqual(info['x_container_sync_point2'], -1) db.set_x_container_sync_points(123, 456) info = db.get_info() - self.assertEquals(info['x_container_sync_point1'], 123) - self.assertEquals(info['x_container_sync_point2'], 456) + self.assertEqual(info['x_container_sync_point1'], 123) + self.assertEqual(info['x_container_sync_point2'], 456) # Set to same value req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'x-timestamp': '1', 'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) db = self.controller._get_container_broker('sda1', 'p', 'a', 'c') info = db.get_info() - self.assertEquals(info['x_container_sync_point1'], 123) - self.assertEquals(info['x_container_sync_point2'], 456) + self.assertEqual(info['x_container_sync_point1'], 123) + self.assertEqual(info['x_container_sync_point2'], 456) # Set to new value req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'x-timestamp': '1', 'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c2'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) db = self.controller._get_container_broker('sda1', 'p', 'a', 'c') info = db.get_info() - self.assertEquals(info['x_container_sync_point1'], -1) - self.assertEquals(info['x_container_sync_point2'], -1) + self.assertEqual(info['x_container_sync_point1'], -1) + self.assertEqual(info['x_container_sync_point2'], -1) def test_DELETE(self): req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': '1'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': '2'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'}, headers={'X-Timestamp': '3'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) def test_DELETE_PUT_recreate(self): path = '/sda1/p/a/c' req = Request.blank(path, method='PUT', headers={'X-Timestamp': '1'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank(path, method='DELETE', headers={'X-Timestamp': '2'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) req = Request.blank(path, method='GET') resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 404) # sanity + self.assertEqual(resp.status_int, 404) # sanity # backend headers expectations = { 'x-backend-put-timestamp': Timestamp(1).internal, @@ -1179,20 +1180,20 @@ class TestContainerController(unittest.TestCase): db = self.controller._get_container_broker('sda1', 'p', 'a', 'c') self.assertEqual(True, db.is_deleted()) info = db.get_info() - self.assertEquals(info['put_timestamp'], Timestamp('1').internal) - self.assertEquals(info['delete_timestamp'], Timestamp('2').internal) - self.assertEquals(info['status_changed_at'], Timestamp('2').internal) + self.assertEqual(info['put_timestamp'], Timestamp('1').internal) + self.assertEqual(info['delete_timestamp'], Timestamp('2').internal) + self.assertEqual(info['status_changed_at'], Timestamp('2').internal) # recreate req = Request.blank(path, method='PUT', headers={'X-Timestamp': '4'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) db = self.controller._get_container_broker('sda1', 'p', 'a', 'c') self.assertEqual(False, db.is_deleted()) info = db.get_info() - self.assertEquals(info['put_timestamp'], Timestamp('4').internal) - self.assertEquals(info['delete_timestamp'], Timestamp('2').internal) - self.assertEquals(info['status_changed_at'], Timestamp('4').internal) + self.assertEqual(info['put_timestamp'], Timestamp('4').internal) + self.assertEqual(info['delete_timestamp'], Timestamp('2').internal) + self.assertEqual(info['status_changed_at'], Timestamp('4').internal) for method in ('GET', 'HEAD'): req = Request.blank(path) resp = req.get_response(self.controller) @@ -1213,15 +1214,15 @@ class TestContainerController(unittest.TestCase): req = Request.blank(path, method='PUT', headers={'X-Timestamp': '1'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) db = self.controller._get_container_broker('sda1', 'p', 'a', 'c') req = Request.blank(path, method='DELETE', headers={'X-Timestamp': '2'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) req = Request.blank(path, method='GET') resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 404) # sanity + self.assertEqual(resp.status_int, 404) # sanity self.assertEqual(True, db.is_deleted()) # now save a copy of this db (and remove it from the "current node") db = self.controller._get_container_broker('sda1', 'p', 'a', 'c') @@ -1231,7 +1232,7 @@ class TestContainerController(unittest.TestCase): # that should make it missing on this node req = Request.blank(path, method='GET') resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 404) # sanity + self.assertEqual(resp.status_int, 404) # sanity # setup the race in os.path.exists (first time no, then yes) mock_called = [] @@ -1260,8 +1261,8 @@ class TestContainerController(unittest.TestCase): [(exists, db.db_file) for exists in (False, True)]) # info was updated info = db.get_info() - self.assertEquals(info['put_timestamp'], Timestamp('4').internal) - self.assertEquals(info['delete_timestamp'], Timestamp('2').internal) + self.assertEqual(info['put_timestamp'], Timestamp('4').internal) + self.assertEqual(info['delete_timestamp'], Timestamp('2').internal) def test_DELETE_not_found(self): # Even if the container wasn't previously heard of, the container @@ -1271,7 +1272,7 @@ class TestContainerController(unittest.TestCase): '/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE', 'HTTP_X_TIMESTAMP': '1'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) def test_change_storage_policy_via_DELETE_then_PUT(self): ts = (Timestamp(t).internal for t in @@ -1343,33 +1344,33 @@ class TestContainerController(unittest.TestCase): '/sda1/p/a/c', method='PUT', headers={ 'X-Timestamp': Timestamp(2).internal}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank( '/sda1/p/a/c/o', method='PUT', headers={ 'X-Timestamp': Timestamp(0).internal, 'X-Size': 1, 'X-Content-Type': 'text/plain', 'X-Etag': 'x'}) self._update_object_put_headers(req) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) ts = (Timestamp(t).internal for t in itertools.count(3)) req = Request.blank('/sda1/p/a/c', method='DELETE', headers={ 'X-Timestamp': next(ts)}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 409) + self.assertEqual(resp.status_int, 409) req = Request.blank('/sda1/p/a/c/o', method='DELETE', headers={ 'X-Timestamp': next(ts)}) self._update_object_put_headers(req) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) req = Request.blank('/sda1/p/a/c', method='DELETE', headers={ 'X-Timestamp': next(ts)}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) req = Request.blank('/sda1/p/a/c', method='GET', headers={ 'X-Timestamp': next(ts)}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) def test_object_update_with_offset(self): ts = (Timestamp(t).internal for t in @@ -1393,7 +1394,7 @@ class TestContainerController(unittest.TestCase): 'X-Content-Type': 'text/plain', 'X-Etag': 'x'}) self._update_object_put_headers(req) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) # check listing req = Request.blank('/sda1/p/a/c', method='GET', query_string='format=json') @@ -1416,7 +1417,7 @@ class TestContainerController(unittest.TestCase): 'X-Content-Type': 'text/html', 'X-Etag': 'y'}) self._update_object_put_headers(req) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) # check updated listing req = Request.blank('/sda1/p/a/c', method='GET', query_string='format=json') @@ -1438,7 +1439,7 @@ class TestContainerController(unittest.TestCase): 'X-Timestamp': delete_timestamp}) self._update_object_put_headers(req) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) # check empty listing req = Request.blank('/sda1/p/a/c', method='GET', query_string='format=json') @@ -1456,7 +1457,7 @@ class TestContainerController(unittest.TestCase): 'X-Content-Type': 'text/enriched', 'X-Etag': 'z'}) self._update_object_put_headers(req) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) # check un-deleted listing req = Request.blank('/sda1/p/a/c', method='GET', query_string='format=json') @@ -1478,7 +1479,7 @@ class TestContainerController(unittest.TestCase): 'X-Timestamp': delete_timestamp}) self._update_object_put_headers(req) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) # check empty listing req = Request.blank('/sda1/p/a/c', method='GET', query_string='format=json') @@ -1501,16 +1502,16 @@ class TestContainerController(unittest.TestCase): out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' % return_code) out.flush() - self.assertEquals(inc.readline(), - 'PUT /sda1/123/a/c HTTP/1.1\r\n') + self.assertEqual(inc.readline(), + 'PUT /sda1/123/a/c HTTP/1.1\r\n') headers = {} line = inc.readline() while line and line != '\r\n': headers[line.split(':')[0].lower()] = \ line.split(':')[1].strip() line = inc.readline() - self.assertEquals(headers['x-delete-timestamp'], - expected_timestamp) + self.assertEqual(headers['x-delete-timestamp'], + expected_timestamp) except BaseException as err: return err return None @@ -1519,7 +1520,7 @@ class TestContainerController(unittest.TestCase): '/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': '1'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE'}, @@ -1531,7 +1532,7 @@ class TestContainerController(unittest.TestCase): try: with Timeout(3): resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) finally: err = event.wait() if err: @@ -1540,7 +1541,7 @@ class TestContainerController(unittest.TestCase): '/sda1/p/a/c', method='PUT', headers={ 'X-Timestamp': Timestamp(2).internal}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE'}, @@ -1552,7 +1553,7 @@ class TestContainerController(unittest.TestCase): try: with Timeout(3): resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) finally: err = event.wait() if err: @@ -1561,7 +1562,7 @@ class TestContainerController(unittest.TestCase): '/sda1/p/a/c', method='PUT', headers={ 'X-Timestamp': Timestamp(4).internal}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE'}, @@ -1580,14 +1581,14 @@ class TestContainerController(unittest.TestCase): err = event.wait() if err: raise Exception(err) - self.assert_(not got_exc) + self.assertTrue(not got_exc) def test_DELETE_invalid_partition(self): req = Request.blank( '/sda1/./a/c', environ={'REQUEST_METHOD': 'DELETE', 'HTTP_X_TIMESTAMP': '1'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) def test_DELETE_timestamp_not_float(self): req = Request.blank( @@ -1598,7 +1599,7 @@ class TestContainerController(unittest.TestCase): '/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': 'not-float'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) def test_DELETE_insufficient_storage(self): self.controller = container_server.ContainerController( @@ -1607,7 +1608,7 @@ class TestContainerController(unittest.TestCase): '/sda-null/p/a/c', environ={'REQUEST_METHOD': 'DELETE', 'HTTP_X_TIMESTAMP': '1'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 507) + self.assertEqual(resp.status_int, 507) def test_GET_over_limit(self): req = Request.blank( @@ -1615,7 +1616,7 @@ class TestContainerController(unittest.TestCase): (constraints.CONTAINER_LISTING_LIMIT + 1), environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 412) + self.assertEqual(resp.status_int, 412) def test_GET_json(self): # make a container @@ -1628,8 +1629,8 @@ class TestContainerController(unittest.TestCase): '/sda1/p/a/jsonc?format=json', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 200) - self.assertEquals(simplejson.loads(resp.body), []) + self.assertEqual(resp.status_int, 200) + self.assertEqual(simplejson.loads(resp.body), []) # fill the container for i in range(3): req = Request.blank( @@ -1641,7 +1642,7 @@ class TestContainerController(unittest.TestCase): 'HTTP_X_SIZE': 0}) self._update_object_put_headers(req) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) # test format json_body = [{"name": "0", "hash": "x", @@ -1663,15 +1664,15 @@ class TestContainerController(unittest.TestCase): '/sda1/p/a/jsonc?format=json', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) - self.assertEquals(resp.content_type, 'application/json') - self.assertEquals(simplejson.loads(resp.body), json_body) - self.assertEquals(resp.charset, 'utf-8') + self.assertEqual(resp.content_type, 'application/json') + self.assertEqual(simplejson.loads(resp.body), json_body) + self.assertEqual(resp.charset, 'utf-8') req = Request.blank( '/sda1/p/a/jsonc?format=json', environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.controller) - self.assertEquals(resp.content_type, 'application/json') + self.assertEqual(resp.content_type, 'application/json') for accept in ('application/json', 'application/json;q=1.0,*/*;q=0.9', '*/*;q=0.9,application/json;q=1.0', 'application/*'): @@ -1680,10 +1681,10 @@ class TestContainerController(unittest.TestCase): environ={'REQUEST_METHOD': 'GET'}) req.accept = accept resp = req.get_response(self.controller) - self.assertEquals( + self.assertEqual( simplejson.loads(resp.body), json_body, 'Invalid body for Accept: %s' % accept) - self.assertEquals( + self.assertEqual( resp.content_type, 'application/json', 'Invalid content_type for Accept: %s' % accept) @@ -1692,7 +1693,7 @@ class TestContainerController(unittest.TestCase): environ={'REQUEST_METHOD': 'HEAD'}) req.accept = accept resp = req.get_response(self.controller) - self.assertEquals( + self.assertEqual( resp.content_type, 'application/json', 'Invalid content_type for Accept: %s' % accept) @@ -1706,7 +1707,7 @@ class TestContainerController(unittest.TestCase): req = Request.blank( '/sda1/p/a/plainc', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) # fill the container for i in range(3): req = Request.blank( @@ -1718,20 +1719,20 @@ class TestContainerController(unittest.TestCase): 'HTTP_X_SIZE': 0}) self._update_object_put_headers(req) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) plain_body = '0\n1\n2\n' req = Request.blank('/sda1/p/a/plainc', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) - self.assertEquals(resp.content_type, 'text/plain') - self.assertEquals(resp.body, plain_body) - self.assertEquals(resp.charset, 'utf-8') + self.assertEqual(resp.content_type, 'text/plain') + self.assertEqual(resp.body, plain_body) + self.assertEqual(resp.charset, 'utf-8') req = Request.blank('/sda1/p/a/plainc', environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.controller) - self.assertEquals(resp.content_type, 'text/plain') + self.assertEqual(resp.content_type, 'text/plain') for accept in ('', 'text/plain', 'application/xml;q=0.8,*/*;q=0.9', '*/*;q=0.9,application/xml;q=0.8', '*/*', @@ -1741,10 +1742,10 @@ class TestContainerController(unittest.TestCase): environ={'REQUEST_METHOD': 'GET'}) req.accept = accept resp = req.get_response(self.controller) - self.assertEquals( + self.assertEqual( resp.body, plain_body, 'Invalid body for Accept: %s' % accept) - self.assertEquals( + self.assertEqual( resp.content_type, 'text/plain', 'Invalid content_type for Accept: %s' % accept) @@ -1753,7 +1754,7 @@ class TestContainerController(unittest.TestCase): environ={'REQUEST_METHOD': 'GET'}) req.accept = accept resp = req.get_response(self.controller) - self.assertEquals( + self.assertEqual( resp.content_type, 'text/plain', 'Invalid content_type for Accept: %s' % accept) @@ -1763,17 +1764,17 @@ class TestContainerController(unittest.TestCase): environ={'REQUEST_METHOD': 'GET'}) req.accept = 'application/json' resp = req.get_response(self.controller) - self.assertEquals(resp.content_type, 'text/plain') - self.assertEquals(resp.body, plain_body) + self.assertEqual(resp.content_type, 'text/plain') + self.assertEqual(resp.body, plain_body) # test unknown format uses default plain req = Request.blank( '/sda1/p/a/plainc?format=somethingelse', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 200) - self.assertEquals(resp.content_type, 'text/plain') - self.assertEquals(resp.body, plain_body) + self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.content_type, 'text/plain') + self.assertEqual(resp.body, plain_body) def test_GET_json_last_modified(self): # make a container @@ -1792,7 +1793,7 @@ class TestContainerController(unittest.TestCase): 'HTTP_X_SIZE': 0}) self._update_object_put_headers(req) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) # test format # last_modified format must be uniform, even when there are not msecs json_body = [{"name": "0", @@ -1810,9 +1811,9 @@ class TestContainerController(unittest.TestCase): '/sda1/p/a/jsonc?format=json', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) - self.assertEquals(resp.content_type, 'application/json') - self.assertEquals(simplejson.loads(resp.body), json_body) - self.assertEquals(resp.charset, 'utf-8') + self.assertEqual(resp.content_type, 'application/json') + self.assertEqual(simplejson.loads(resp.body), json_body) + self.assertEqual(resp.charset, 'utf-8') def test_GET_xml(self): # make a container @@ -1832,7 +1833,7 @@ class TestContainerController(unittest.TestCase): 'HTTP_X_SIZE': 0}) self._update_object_put_headers(req) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) xml_body = '\n' \ '' \ '0x0' \ @@ -1854,15 +1855,15 @@ class TestContainerController(unittest.TestCase): '/sda1/p/a/xmlc?format=xml', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) - self.assertEquals(resp.content_type, 'application/xml') - self.assertEquals(resp.body, xml_body) - self.assertEquals(resp.charset, 'utf-8') + self.assertEqual(resp.content_type, 'application/xml') + self.assertEqual(resp.body, xml_body) + self.assertEqual(resp.charset, 'utf-8') req = Request.blank( '/sda1/p/a/xmlc?format=xml', environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.controller) - self.assertEquals(resp.content_type, 'application/xml') + self.assertEqual(resp.content_type, 'application/xml') for xml_accept in ( 'application/xml', 'application/xml;q=1.0,*/*;q=0.9', @@ -1872,10 +1873,10 @@ class TestContainerController(unittest.TestCase): environ={'REQUEST_METHOD': 'GET'}) req.accept = xml_accept resp = req.get_response(self.controller) - self.assertEquals( + self.assertEqual( resp.body, xml_body, 'Invalid body for Accept: %s' % xml_accept) - self.assertEquals( + self.assertEqual( resp.content_type, 'application/xml', 'Invalid content_type for Accept: %s' % xml_accept) @@ -1884,7 +1885,7 @@ class TestContainerController(unittest.TestCase): environ={'REQUEST_METHOD': 'HEAD'}) req.accept = xml_accept resp = req.get_response(self.controller) - self.assertEquals( + self.assertEqual( resp.content_type, 'application/xml', 'Invalid content_type for Accept: %s' % xml_accept) @@ -1893,8 +1894,8 @@ class TestContainerController(unittest.TestCase): environ={'REQUEST_METHOD': 'GET'}) req.accept = 'text/xml' resp = req.get_response(self.controller) - self.assertEquals(resp.content_type, 'text/xml') - self.assertEquals(resp.body, xml_body) + self.assertEqual(resp.content_type, 'text/xml') + self.assertEqual(resp.body, xml_body) def test_GET_marker(self): # make a container @@ -1912,13 +1913,13 @@ class TestContainerController(unittest.TestCase): 'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 0}) self._update_object_put_headers(req) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) # test limit with marker req = Request.blank('/sda1/p/a/c?limit=2&marker=1', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) result = resp.body.split() - self.assertEquals(result, ['2', ]) + self.assertEqual(result, ['2', ]) def test_weird_content_types(self): snowman = u'\u2603' @@ -1935,12 +1936,12 @@ class TestContainerController(unittest.TestCase): 'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 0}) self._update_object_put_headers(req) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank('/sda1/p/a/c?format=json', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) result = [x['content_type'] for x in simplejson.loads(resp.body)] - self.assertEquals(result, [u'\u2603', 'text/plain;charset="utf-8"']) + self.assertEqual(result, [u'\u2603', 'text/plain;charset="utf-8"']) def test_GET_accept_not_valid(self): req = Request.blank('/sda1/p/a/c', method='PUT', headers={ @@ -1950,7 +1951,7 @@ class TestContainerController(unittest.TestCase): req = Request.blank('/sda1/p/a/c', method='GET') req.accept = 'application/xml*' resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 406) + self.assertEqual(resp.status_int, 406) def test_GET_limit(self): # make a container @@ -1970,13 +1971,13 @@ class TestContainerController(unittest.TestCase): 'HTTP_X_SIZE': 0}) self._update_object_put_headers(req) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) # test limit req = Request.blank( '/sda1/p/a/c?limit=2', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) result = resp.body.split() - self.assertEquals(result, ['0', '1']) + self.assertEqual(result, ['0', '1']) def test_GET_prefix(self): req = Request.blank( @@ -1994,18 +1995,18 @@ class TestContainerController(unittest.TestCase): 'HTTP_X_SIZE': 0}) self._update_object_put_headers(req) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank( '/sda1/p/a/c?prefix=a', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) - self.assertEquals(resp.body.split(), ['a1', 'a2', 'a3']) + self.assertEqual(resp.body.split(), ['a1', 'a2', 'a3']) def test_GET_delimiter_too_long(self): req = Request.blank('/sda1/p/a/c?delimiter=xx', environ={'REQUEST_METHOD': 'GET', 'HTTP_X_TIMESTAMP': '0'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 412) + self.assertEqual(resp.status_int, 412) def test_GET_delimiter(self): req = Request.blank( @@ -2021,12 +2022,12 @@ class TestContainerController(unittest.TestCase): 'HTTP_X_SIZE': 0}) self._update_object_put_headers(req) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank( '/sda1/p/a/c?prefix=US-&delimiter=-&format=json', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) - self.assertEquals( + self.assertEqual( simplejson.loads(resp.body), [{"subdir": "US-OK-"}, {"subdir": "US-TX-"}, @@ -2046,12 +2047,12 @@ class TestContainerController(unittest.TestCase): 'HTTP_X_SIZE': 0}) self._update_object_put_headers(req) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank( '/sda1/p/a/c?prefix=US-&delimiter=-&format=xml', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) - self.assertEquals( + self.assertEqual( resp.body, '' '\n' 'US-OK-' @@ -2071,22 +2072,22 @@ class TestContainerController(unittest.TestCase): 'HTTP_X_SIZE': 0}) self._update_object_put_headers(req) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank( '/sda1/p/a/c?delimiter=/&format=xml', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) dom = minidom.parseString(resp.body) - self.assert_(len(dom.getElementsByTagName('container')) == 1) + self.assertTrue(len(dom.getElementsByTagName('container')) == 1) container = dom.getElementsByTagName('container')[0] - self.assert_(len(container.getElementsByTagName('subdir')) == 1) + self.assertTrue(len(container.getElementsByTagName('subdir')) == 1) subdir = container.getElementsByTagName('subdir')[0] - self.assertEquals(unicode(subdir.attributes['name'].value), - u'<\'sub\' "dir">/') - self.assert_(len(subdir.getElementsByTagName('name')) == 1) + self.assertEqual(unicode(subdir.attributes['name'].value), + u'<\'sub\' "dir">/') + self.assertTrue(len(subdir.getElementsByTagName('name')) == 1) name = subdir.getElementsByTagName('name')[0] - self.assertEquals(unicode(name.childNodes[0].data), - u'<\'sub\' "dir">/') + self.assertEqual(unicode(name.childNodes[0].data), + u'<\'sub\' "dir">/') def test_GET_path(self): req = Request.blank( @@ -2102,12 +2103,12 @@ class TestContainerController(unittest.TestCase): 'HTTP_X_SIZE': 0}) self._update_object_put_headers(req) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank( '/sda1/p/a/c?path=US&format=json', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) - self.assertEquals( + self.assertEqual( simplejson.loads(resp.body), [{"name": "US/OK", "hash": "x", "bytes": 0, "content_type": "text/plain", @@ -2123,10 +2124,10 @@ class TestContainerController(unittest.TestCase): '/sda-null/p/a/c', environ={'REQUEST_METHOD': 'GET', 'HTTP_X_TIMESTAMP': '1'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 507) + self.assertEqual(resp.status_int, 507) def test_through_call(self): - inbuf = StringIO() + inbuf = BytesIO() errbuf = StringIO() outbuf = StringIO() @@ -2148,11 +2149,11 @@ class TestContainerController(unittest.TestCase): 'wsgi.multiprocess': False, 'wsgi.run_once': False}, start_response) - self.assertEquals(errbuf.getvalue(), '') - self.assertEquals(outbuf.getvalue()[:4], '404 ') + self.assertEqual(errbuf.getvalue(), '') + self.assertEqual(outbuf.getvalue()[:4], '404 ') def test_through_call_invalid_path(self): - inbuf = StringIO() + inbuf = BytesIO() errbuf = StringIO() outbuf = StringIO() @@ -2174,11 +2175,11 @@ class TestContainerController(unittest.TestCase): 'wsgi.multiprocess': False, 'wsgi.run_once': False}, start_response) - self.assertEquals(errbuf.getvalue(), '') - self.assertEquals(outbuf.getvalue()[:4], '400 ') + self.assertEqual(errbuf.getvalue(), '') + self.assertEqual(outbuf.getvalue()[:4], '400 ') def test_through_call_invalid_path_utf8(self): - inbuf = StringIO() + inbuf = BytesIO() errbuf = StringIO() outbuf = StringIO() @@ -2200,8 +2201,8 @@ class TestContainerController(unittest.TestCase): 'wsgi.multiprocess': False, 'wsgi.run_once': False}, start_response) - self.assertEquals(errbuf.getvalue(), '') - self.assertEquals(outbuf.getvalue()[:4], '412 ') + self.assertEqual(errbuf.getvalue(), '') + self.assertEqual(outbuf.getvalue()[:4], '412 ') def test_invalid_method_doesnt_exist(self): errbuf = StringIO() @@ -2213,8 +2214,8 @@ class TestContainerController(unittest.TestCase): self.controller.__call__({'REQUEST_METHOD': 'method_doesnt_exist', 'PATH_INFO': '/sda1/p/a/c'}, start_response) - self.assertEquals(errbuf.getvalue(), '') - self.assertEquals(outbuf.getvalue()[:4], '405 ') + self.assertEqual(errbuf.getvalue(), '') + self.assertEqual(outbuf.getvalue()[:4], '405 ') def test_invalid_method_is_not_public(self): errbuf = StringIO() @@ -2226,8 +2227,8 @@ class TestContainerController(unittest.TestCase): self.controller.__call__({'REQUEST_METHOD': '__init__', 'PATH_INFO': '/sda1/p/a/c'}, start_response) - self.assertEquals(errbuf.getvalue(), '') - self.assertEquals(outbuf.getvalue()[:4], '405 ') + self.assertEqual(errbuf.getvalue(), '') + self.assertEqual(outbuf.getvalue()[:4], '405 ') def test_params_format(self): req = Request.blank( @@ -2238,7 +2239,7 @@ class TestContainerController(unittest.TestCase): req = Request.blank('/sda1/p/a/c?format=%s' % format, method='GET') resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) def test_params_utf8(self): # Bad UTF8 sequence, all parameters should cause 400 error @@ -2247,14 +2248,14 @@ class TestContainerController(unittest.TestCase): req = Request.blank('/sda1/p/a/c?%s=\xce' % param, environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 400, - "%d on param %s" % (resp.status_int, param)) + self.assertEqual(resp.status_int, 400, + "%d on param %s" % (resp.status_int, param)) # Good UTF8 sequence for delimiter, too long (1 byte delimiters only) req = Request.blank('/sda1/p/a/c?delimiter=\xce\xa9', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 412, - "%d on param delimiter" % (resp.status_int)) + self.assertEqual(resp.status_int, 412, + "%d on param delimiter" % (resp.status_int)) req = Request.blank('/sda1/p/a/c', method='PUT', headers={'X-Timestamp': Timestamp(1).internal}) req.get_response(self.controller) @@ -2264,8 +2265,8 @@ class TestContainerController(unittest.TestCase): req = Request.blank('/sda1/p/a/c?%s=\xce\xa9' % param, environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204, - "%d on param %s" % (resp.status_int, param)) + self.assertEqual(resp.status_int, 204, + "%d on param %s" % (resp.status_int, param)) def test_put_auto_create(self): headers = {'x-timestamp': Timestamp(1).internal, @@ -2277,25 +2278,25 @@ class TestContainerController(unittest.TestCase): environ={'REQUEST_METHOD': 'PUT'}, headers=dict(headers)) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) req = Request.blank('/sda1/p/.a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers=dict(headers)) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank('/sda1/p/a/.c/o', environ={'REQUEST_METHOD': 'PUT'}, headers=dict(headers)) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) req = Request.blank('/sda1/p/a/c/.o', environ={'REQUEST_METHOD': 'PUT'}, headers=dict(headers)) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) def test_delete_auto_create(self): headers = {'x-timestamp': Timestamp(1).internal} @@ -2304,25 +2305,25 @@ class TestContainerController(unittest.TestCase): environ={'REQUEST_METHOD': 'DELETE'}, headers=dict(headers)) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) req = Request.blank('/sda1/p/.a/c/o', environ={'REQUEST_METHOD': 'DELETE'}, headers=dict(headers)) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) req = Request.blank('/sda1/p/a/.c/o', environ={'REQUEST_METHOD': 'DELETE'}, headers=dict(headers)) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) req = Request.blank('/sda1/p/a/.c/.o', environ={'REQUEST_METHOD': 'DELETE'}, headers=dict(headers)) resp = req.get_response(self.controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) def test_content_type_on_HEAD(self): Request.blank('/sda1/p/a/o', @@ -2334,30 +2335,30 @@ class TestContainerController(unittest.TestCase): req = Request.blank('/sda1/p/a/o?format=xml', environ=env) resp = req.get_response(self.controller) - self.assertEquals(resp.content_type, 'application/xml') - self.assertEquals(resp.charset, 'utf-8') + self.assertEqual(resp.content_type, 'application/xml') + self.assertEqual(resp.charset, 'utf-8') req = Request.blank('/sda1/p/a/o?format=json', environ=env) resp = req.get_response(self.controller) - self.assertEquals(resp.content_type, 'application/json') - self.assertEquals(resp.charset, 'utf-8') + self.assertEqual(resp.content_type, 'application/json') + self.assertEqual(resp.charset, 'utf-8') req = Request.blank('/sda1/p/a/o', environ=env) resp = req.get_response(self.controller) - self.assertEquals(resp.content_type, 'text/plain') - self.assertEquals(resp.charset, 'utf-8') + self.assertEqual(resp.content_type, 'text/plain') + self.assertEqual(resp.charset, 'utf-8') req = Request.blank( '/sda1/p/a/o', headers={'Accept': 'application/json'}, environ=env) resp = req.get_response(self.controller) - self.assertEquals(resp.content_type, 'application/json') - self.assertEquals(resp.charset, 'utf-8') + self.assertEqual(resp.content_type, 'application/json') + self.assertEqual(resp.charset, 'utf-8') req = Request.blank( '/sda1/p/a/o', headers={'Accept': 'application/xml'}, environ=env) resp = req.get_response(self.controller) - self.assertEquals(resp.content_type, 'application/xml') - self.assertEquals(resp.charset, 'utf-8') + self.assertEqual(resp.content_type, 'application/xml') + self.assertEqual(resp.charset, 'utf-8') def test_updating_multiple_container_servers(self): http_connect_args = [] @@ -2402,8 +2403,8 @@ class TestContainerController(unittest.TestCase): http_connect_args.sort(key=operator.itemgetter('ipaddr')) - self.assertEquals(len(http_connect_args), 2) - self.assertEquals( + self.assertEqual(len(http_connect_args), 2) + self.assertEqual( http_connect_args[0], {'ipaddr': '1.2.3.4', 'port': '5', @@ -2421,7 +2422,7 @@ class TestContainerController(unittest.TestCase): 'referer': 'PUT http://localhost/sda1/p/a/c', 'user-agent': 'container-server %d' % os.getpid(), 'x-trans-id': '-'})}) - self.assertEquals( + self.assertEqual( http_connect_args[1], {'ipaddr': '6.7.8.9', 'port': '10', @@ -2444,7 +2445,7 @@ class TestContainerController(unittest.TestCase): # Test replication_server flag was set from configuration file. container_controller = container_server.ContainerController conf = {'devices': self.testdir, 'mount_check': 'false'} - self.assertEquals(container_controller(conf).replication_server, None) + self.assertEqual(container_controller(conf).replication_server, None) for val in [True, '1', 'True', 'true']: conf['replication_server'] = val self.assertTrue(container_controller(conf).replication_server) @@ -2461,12 +2462,12 @@ class TestContainerController(unittest.TestCase): self.assertFalse(hasattr(method, 'replication')) for method_name in repl_methods: method = getattr(self.controller, method_name) - self.assertEquals(method.replication, True) + self.assertEqual(method.replication, True) def test_correct_allowed_method(self): # Test correct work for allowed method using # swift.container.server.ContainerController.__call__ - inbuf = StringIO() + inbuf = BytesIO() errbuf = StringIO() outbuf = StringIO() self.controller = container_server.ContainerController( @@ -2503,7 +2504,7 @@ class TestContainerController(unittest.TestCase): def test_not_allowed_method(self): # Test correct work for NOT allowed method using # swift.container.server.ContainerController.__call__ - inbuf = StringIO() + inbuf = BytesIO() errbuf = StringIO() outbuf = StringIO() self.controller = container_server.ContainerController( @@ -2539,7 +2540,7 @@ class TestContainerController(unittest.TestCase): self.assertEqual(response, answer) def test_call_incorrect_replication_method(self): - inbuf = StringIO() + inbuf = BytesIO() errbuf = StringIO() outbuf = StringIO() self.controller = container_server.ContainerController( @@ -2567,8 +2568,8 @@ class TestContainerController(unittest.TestCase): 'wsgi.multiprocess': False, 'wsgi.run_once': False} self.controller(env, start_response) - self.assertEquals(errbuf.getvalue(), '') - self.assertEquals(outbuf.getvalue()[:4], '405 ') + self.assertEqual(errbuf.getvalue(), '') + self.assertEqual(outbuf.getvalue()[:4], '405 ') def test_GET_log_requests_true(self): self.controller.logger = FakeLogger() diff --git a/test/unit/container/test_sync.py b/test/unit/container/test_sync.py index 9251e6c378..0a97e843e2 100644 --- a/test/unit/container/test_sync.py +++ b/test/unit/container/test_sync.py @@ -85,22 +85,22 @@ class TestContainerSync(unittest.TestCase): got = flo.read(2) self.assertTrue(len(got) <= 2) - self.assertEquals(got, expect[:len(got)]) + self.assertEqual(got, expect[:len(got)]) expect = expect[len(got):] got = flo.read(5) self.assertTrue(len(got) <= 5) - self.assertEquals(got, expect[:len(got)]) + self.assertEqual(got, expect[:len(got)]) expect = expect[len(got):] - self.assertEquals(flo.read(), expect) - self.assertEquals(flo.read(), '') - self.assertEquals(flo.read(2), '') + self.assertEqual(flo.read(), expect) + self.assertEqual(flo.read(), '') + self.assertEqual(flo.read(2), '') flo = sync.FileLikeIter(iter(['123', '4567', '89', '0'])) - self.assertEquals(flo.read(), '1234567890') - self.assertEquals(flo.read(), '') - self.assertEquals(flo.read(2), '') + self.assertEqual(flo.read(), '1234567890') + self.assertEqual(flo.read(), '') + self.assertEqual(flo.read(2), '') def assertLogMessage(self, msg_level, expected, skip=0): for line in self.logger.get_lines_for_level(msg_level)[skip:]: @@ -129,8 +129,8 @@ class TestContainerSync(unittest.TestCase): self.assertTrue(mock_ic.called) conf_path, name, retry = mock_ic.call_args[0] self.assertTrue(isinstance(conf_path, ConfigString)) - self.assertEquals(conf_path.contents.getvalue(), - dedent(sync.ic_conf_body)) + self.assertEqual(conf_path.contents.getvalue(), + dedent(sync.ic_conf_body)) self.assertLogMessage('warning', 'internal_client_conf_path') self.assertLogMessage('warning', 'internal-client.conf-sample') @@ -143,7 +143,7 @@ class TestContainerSync(unittest.TestCase): self.assertTrue(cs.container_ring is cring) self.assertTrue(mock_ic.called) conf_path, name, retry = mock_ic.call_args[0] - self.assertEquals(conf_path, ic_conf_path) + self.assertEqual(conf_path, ic_conf_path) sample_conf_filename = os.path.join( os.path.dirname(test.__file__), @@ -207,12 +207,12 @@ class TestContainerSync(unittest.TestCase): sync.audit_location_generator = orig_audit_location_generator sync.ContainerBroker = orig_ContainerBroker - self.assertEquals(time_calls, [9]) - self.assertEquals(len(sleep_calls), 2) + self.assertEqual(time_calls, [9]) + self.assertEqual(len(sleep_calls), 2) self.assertTrue(sleep_calls[0] <= cs.interval) self.assertTrue(sleep_calls[1] == cs.interval - 1) - self.assertEquals(audit_location_generator_calls, [2]) - self.assertEquals(cs.reported, 3602) + self.assertEqual(audit_location_generator_calls, [2]) + self.assertEqual(cs.reported, 3602) def test_run_once(self): # This runs runs_once with fakes twice, the first causing an interim @@ -255,9 +255,9 @@ class TestContainerSync(unittest.TestCase): cs = sync.ContainerSync({}, container_ring=FakeRing()) sync.audit_location_generator = fake_audit_location_generator cs.run_once(1, 2, a=3, b=4, verbose=True) - self.assertEquals(time_calls, [6]) - self.assertEquals(audit_location_generator_calls, [1]) - self.assertEquals(cs.reported, 3602) + self.assertEqual(time_calls, [6]) + self.assertEqual(audit_location_generator_calls, [1]) + self.assertEqual(cs.reported, 3602) cs.run_once() except Exception as err: if str(err) != 'we are now done': @@ -267,22 +267,22 @@ class TestContainerSync(unittest.TestCase): sync.audit_location_generator = orig_audit_location_generator sync.ContainerBroker = orig_ContainerBroker - self.assertEquals(time_calls, [10]) - self.assertEquals(audit_location_generator_calls, [2]) - self.assertEquals(cs.reported, 3604) + self.assertEqual(time_calls, [10]) + self.assertEqual(audit_location_generator_calls, [2]) + self.assertEqual(cs.reported, 3604) def test_container_sync_not_db(self): cring = FakeRing() with mock.patch('swift.container.sync.InternalClient'): cs = sync.ContainerSync({}, container_ring=cring) - self.assertEquals(cs.container_failures, 0) + self.assertEqual(cs.container_failures, 0) def test_container_sync_missing_db(self): cring = FakeRing() with mock.patch('swift.container.sync.InternalClient'): cs = sync.ContainerSync({}, container_ring=cring) cs.container_sync('isa.db') - self.assertEquals(cs.container_failures, 1) + self.assertEqual(cs.container_failures, 1) def test_container_sync_not_my_db(self): # Db could be there due to handoff replication so test that we ignore @@ -302,24 +302,24 @@ class TestContainerSync(unittest.TestCase): cs._myips = ['127.0.0.1'] # No match cs._myport = 1 # No match cs.container_sync('isa.db') - self.assertEquals(cs.container_failures, 0) + self.assertEqual(cs.container_failures, 0) cs._myips = ['10.0.0.0'] # Match cs._myport = 1 # No match cs.container_sync('isa.db') - self.assertEquals(cs.container_failures, 0) + self.assertEqual(cs.container_failures, 0) cs._myips = ['127.0.0.1'] # No match cs._myport = 1000 # Match cs.container_sync('isa.db') - self.assertEquals(cs.container_failures, 0) + self.assertEqual(cs.container_failures, 0) cs._myips = ['10.0.0.0'] # Match cs._myport = 1000 # Match # This complete match will cause the 1 container failure since the # broker's info doesn't contain sync point keys cs.container_sync('isa.db') - self.assertEquals(cs.container_failures, 1) + self.assertEqual(cs.container_failures, 1) finally: sync.ContainerBroker = orig_ContainerBroker @@ -337,7 +337,7 @@ class TestContainerSync(unittest.TestCase): # This complete match will cause the 1 container failure since the # broker's info doesn't contain sync point keys cs.container_sync('isa.db') - self.assertEquals(cs.container_failures, 1) + self.assertEqual(cs.container_failures, 1) sync.ContainerBroker = lambda p: FakeContainerBroker( p, info={'account': 'a', 'container': 'c', @@ -345,7 +345,7 @@ class TestContainerSync(unittest.TestCase): # This complete match will not cause any more container failures # since the broker indicates deletion cs.container_sync('isa.db') - self.assertEquals(cs.container_failures, 1) + self.assertEqual(cs.container_failures, 1) finally: sync.ContainerBroker = orig_ContainerBroker @@ -365,8 +365,8 @@ class TestContainerSync(unittest.TestCase): # This complete match will be skipped since the broker's metadata # has no x-container-sync-to or x-container-sync-key cs.container_sync('isa.db') - self.assertEquals(cs.container_failures, 0) - self.assertEquals(cs.container_skips, 1) + self.assertEqual(cs.container_failures, 0) + self.assertEqual(cs.container_skips, 1) sync.ContainerBroker = lambda p: FakeContainerBroker( p, info={'account': 'a', 'container': 'c', @@ -379,8 +379,8 @@ class TestContainerSync(unittest.TestCase): # This complete match will be skipped since the broker's metadata # has no x-container-sync-key cs.container_sync('isa.db') - self.assertEquals(cs.container_failures, 0) - self.assertEquals(cs.container_skips, 2) + self.assertEqual(cs.container_failures, 0) + self.assertEqual(cs.container_skips, 2) sync.ContainerBroker = lambda p: FakeContainerBroker( p, info={'account': 'a', 'container': 'c', @@ -393,8 +393,8 @@ class TestContainerSync(unittest.TestCase): # This complete match will be skipped since the broker's metadata # has no x-container-sync-to cs.container_sync('isa.db') - self.assertEquals(cs.container_failures, 0) - self.assertEquals(cs.container_skips, 3) + self.assertEqual(cs.container_failures, 0) + self.assertEqual(cs.container_skips, 3) sync.ContainerBroker = lambda p: FakeContainerBroker( p, info={'account': 'a', 'container': 'c', @@ -409,8 +409,8 @@ class TestContainerSync(unittest.TestCase): # This complete match will cause a container failure since the # sync-to won't validate as allowed. cs.container_sync('isa.db') - self.assertEquals(cs.container_failures, 1) - self.assertEquals(cs.container_skips, 3) + self.assertEqual(cs.container_failures, 1) + self.assertEqual(cs.container_skips, 3) sync.ContainerBroker = lambda p: FakeContainerBroker( p, info={'account': 'a', 'container': 'c', @@ -425,8 +425,8 @@ class TestContainerSync(unittest.TestCase): # This complete match will succeed completely since the broker # get_items_since will return no new rows. cs.container_sync('isa.db') - self.assertEquals(cs.container_failures, 1) - self.assertEquals(cs.container_skips, 3) + self.assertEqual(cs.container_failures, 1) + self.assertEqual(cs.container_skips, 3) finally: sync.ContainerBroker = orig_ContainerBroker @@ -450,8 +450,8 @@ class TestContainerSync(unittest.TestCase): cs.allowed_sync_hosts = ['127.0.0.1'] # This sync will fail since the items_since data is bad. cs.container_sync('isa.db') - self.assertEquals(cs.container_failures, 1) - self.assertEquals(cs.container_skips, 0) + self.assertEqual(cs.container_failures, 1) + self.assertEqual(cs.container_skips, 0) # Set up fake times to make the sync short-circuit as having taken # too long @@ -468,8 +468,8 @@ class TestContainerSync(unittest.TestCase): # as to be time to move on (before it ever actually tries to do # anything). cs.container_sync('isa.db') - self.assertEquals(cs.container_failures, 1) - self.assertEquals(cs.container_skips, 0) + self.assertEqual(cs.container_failures, 1) + self.assertEqual(cs.container_skips, 0) finally: sync.ContainerBroker = orig_ContainerBroker sync.time = orig_time @@ -501,10 +501,10 @@ class TestContainerSync(unittest.TestCase): cs.allowed_sync_hosts = ['127.0.0.1'] cs.container_sync('isa.db') # Succeeds because no rows match - self.assertEquals(cs.container_failures, 1) - self.assertEquals(cs.container_skips, 0) - self.assertEquals(fcb.sync_point1, None) - self.assertEquals(fcb.sync_point2, -1) + self.assertEqual(cs.container_failures, 1) + self.assertEqual(cs.container_skips, 0) + self.assertEqual(fcb.sync_point1, None) + self.assertEqual(fcb.sync_point2, -1) def fake_hash_path(account, container, obj, raw_digest=False): # Ensures that all rows match for full syncing, ordinal is 0 @@ -529,10 +529,10 @@ class TestContainerSync(unittest.TestCase): cs.allowed_sync_hosts = ['127.0.0.1'] cs.container_sync('isa.db') # Succeeds because the two sync points haven't deviated yet - self.assertEquals(cs.container_failures, 1) - self.assertEquals(cs.container_skips, 0) - self.assertEquals(fcb.sync_point1, -1) - self.assertEquals(fcb.sync_point2, -1) + self.assertEqual(cs.container_failures, 1) + self.assertEqual(cs.container_skips, 0) + self.assertEqual(fcb.sync_point1, -1) + self.assertEqual(fcb.sync_point2, -1) fcb = FakeContainerBroker( 'path', @@ -550,10 +550,10 @@ class TestContainerSync(unittest.TestCase): cs.container_sync('isa.db') # Fails because container_sync_row will fail since the row has no # 'deleted' key - self.assertEquals(cs.container_failures, 2) - self.assertEquals(cs.container_skips, 0) - self.assertEquals(fcb.sync_point1, None) - self.assertEquals(fcb.sync_point2, -1) + self.assertEqual(cs.container_failures, 2) + self.assertEqual(cs.container_skips, 0) + self.assertEqual(fcb.sync_point1, None) + self.assertEqual(fcb.sync_point2, -1) def fake_delete_object(*args, **kwargs): raise ClientException @@ -577,10 +577,10 @@ class TestContainerSync(unittest.TestCase): cs.allowed_sync_hosts = ['127.0.0.1'] cs.container_sync('isa.db') # Fails because delete_object fails - self.assertEquals(cs.container_failures, 3) - self.assertEquals(cs.container_skips, 0) - self.assertEquals(fcb.sync_point1, None) - self.assertEquals(fcb.sync_point2, -1) + self.assertEqual(cs.container_failures, 3) + self.assertEqual(cs.container_skips, 0) + self.assertEqual(fcb.sync_point1, None) + self.assertEqual(fcb.sync_point2, -1) fcb = FakeContainerBroker( 'path', @@ -602,10 +602,10 @@ class TestContainerSync(unittest.TestCase): cs.allowed_sync_hosts = ['127.0.0.1'] cs.container_sync('isa.db') # Succeeds because delete_object succeeds - self.assertEquals(cs.container_failures, 3) - self.assertEquals(cs.container_skips, 0) - self.assertEquals(fcb.sync_point1, None) - self.assertEquals(fcb.sync_point2, 1) + self.assertEqual(cs.container_failures, 3) + self.assertEqual(cs.container_skips, 0) + self.assertEqual(fcb.sync_point1, None) + self.assertEqual(fcb.sync_point2, 1) def test_container_second_loop(self): cring = FakeRing() @@ -640,10 +640,10 @@ class TestContainerSync(unittest.TestCase): cs.allowed_sync_hosts = ['127.0.0.1'] cs.container_sync('isa.db') # Succeeds because no rows match - self.assertEquals(cs.container_failures, 0) - self.assertEquals(cs.container_skips, 0) - self.assertEquals(fcb.sync_point1, 1) - self.assertEquals(fcb.sync_point2, None) + self.assertEqual(cs.container_failures, 0) + self.assertEqual(cs.container_skips, 0) + self.assertEqual(fcb.sync_point1, 1) + self.assertEqual(fcb.sync_point2, None) def fake_hash_path(account, container, obj, raw_digest=False): # Ensures that all rows match for second loop, ordinal is 0 and @@ -671,10 +671,10 @@ class TestContainerSync(unittest.TestCase): cs.container_sync('isa.db') # Fails because row is missing 'deleted' key # Nevertheless the fault is skipped - self.assertEquals(cs.container_failures, 1) - self.assertEquals(cs.container_skips, 0) - self.assertEquals(fcb.sync_point1, 1) - self.assertEquals(fcb.sync_point2, None) + self.assertEqual(cs.container_failures, 1) + self.assertEqual(cs.container_skips, 0) + self.assertEqual(fcb.sync_point1, 1) + self.assertEqual(fcb.sync_point2, None) fcb = FakeContainerBroker( 'path', @@ -693,10 +693,10 @@ class TestContainerSync(unittest.TestCase): cs.container_sync('isa.db') # Succeeds because row now has 'deleted' key and delete_object # succeeds - self.assertEquals(cs.container_failures, 1) - self.assertEquals(cs.container_skips, 0) - self.assertEquals(fcb.sync_point1, 1) - self.assertEquals(fcb.sync_point2, None) + self.assertEqual(cs.container_failures, 1) + self.assertEqual(cs.container_skips, 0) + self.assertEqual(fcb.sync_point1, 1) + self.assertEqual(fcb.sync_point2, None) finally: sync.ContainerBroker = orig_ContainerBroker sync.hash_path = orig_hash_path @@ -720,18 +720,18 @@ class TestContainerSync(unittest.TestCase): def fake_delete_object(path, name=None, headers=None, proxy=None, logger=None, timeout=None): - self.assertEquals(path, 'http://sync/to/path') - self.assertEquals(name, 'object') + self.assertEqual(path, 'http://sync/to/path') + self.assertEqual(name, 'object') if realm: - self.assertEquals(headers, { + self.assertEqual(headers, { 'x-container-sync-auth': 'US abcdef 90e95aabb45a6cdc0892a3db5535e7f918428c90', 'x-timestamp': '1.2'}) else: - self.assertEquals( + self.assertEqual( headers, {'x-container-sync-key': 'key', 'x-timestamp': '1.2'}) - self.assertEquals(proxy, 'http://proxy') + self.assertEqual(proxy, 'http://proxy') self.assertEqual(timeout, 5.0) self.assertEqual(logger, self.logger) @@ -749,7 +749,7 @@ class TestContainerSync(unittest.TestCase): 'key', FakeContainerBroker('broker'), {'account': 'a', 'container': 'c', 'storage_policy_index': 0}, realm, realm_key)) - self.assertEquals(cs.container_deletes, 1) + self.assertEqual(cs.container_deletes, 1) exc = [] @@ -766,9 +766,9 @@ class TestContainerSync(unittest.TestCase): 'key', FakeContainerBroker('broker'), {'account': 'a', 'container': 'c', 'storage_policy_index': 0}, realm, realm_key)) - self.assertEquals(cs.container_deletes, 1) - self.assertEquals(len(exc), 1) - self.assertEquals(str(exc[-1]), 'test exception') + self.assertEqual(cs.container_deletes, 1) + self.assertEqual(len(exc), 1) + self.assertEqual(str(exc[-1]), 'test exception') def fake_delete_object(*args, **kwargs): exc.append(ClientException('test client exception')) @@ -783,9 +783,9 @@ class TestContainerSync(unittest.TestCase): 'key', FakeContainerBroker('broker'), {'account': 'a', 'container': 'c', 'storage_policy_index': 0}, realm, realm_key)) - self.assertEquals(cs.container_deletes, 1) - self.assertEquals(len(exc), 2) - self.assertEquals(str(exc[-1]), 'test client exception') + self.assertEqual(cs.container_deletes, 1) + self.assertEqual(len(exc), 2) + self.assertEqual(str(exc[-1]), 'test client exception') def fake_delete_object(*args, **kwargs): exc.append(ClientException('test client exception', @@ -801,9 +801,9 @@ class TestContainerSync(unittest.TestCase): 'key', FakeContainerBroker('broker'), {'account': 'a', 'container': 'c', 'storage_policy_index': 0}, realm, realm_key)) - self.assertEquals(cs.container_deletes, 2) - self.assertEquals(len(exc), 3) - self.assertEquals(str(exc[-1]), 'test client exception: 404') + self.assertEqual(cs.container_deletes, 2) + self.assertEqual(len(exc), 3) + self.assertEqual(str(exc[-1]), 'test client exception: 404') finally: sync.uuid = orig_uuid sync.delete_object = orig_delete_object @@ -829,8 +829,8 @@ class TestContainerSync(unittest.TestCase): def fake_put_object(sync_to, name=None, headers=None, contents=None, proxy=None, logger=None, timeout=None): - self.assertEquals(sync_to, 'http://sync/to/path') - self.assertEquals(name, 'object') + self.assertEqual(sync_to, 'http://sync/to/path') + self.assertEqual(name, 'object') if realm: self.assertEqual(headers, { 'x-container-sync-auth': @@ -840,14 +840,14 @@ class TestContainerSync(unittest.TestCase): 'other-header': 'other header value', 'content-type': 'text/plain'}) else: - self.assertEquals(headers, { + self.assertEqual(headers, { 'x-container-sync-key': 'key', 'x-timestamp': '1.2', 'other-header': 'other header value', 'etag': 'etagvalue', 'content-type': 'text/plain'}) - self.assertEquals(contents.read(), 'contents') - self.assertEquals(proxy, 'http://proxy') + self.assertEqual(contents.read(), 'contents') + self.assertEqual(proxy, 'http://proxy') self.assertEqual(timeout, 5.0) self.assertEqual(logger, self.logger) @@ -861,9 +861,10 @@ class TestContainerSync(unittest.TestCase): def fake_get_object(acct, con, obj, headers, acceptable_statuses): self.assertEqual(headers['X-Backend-Storage-Policy-Index'], '0') - return (200, {'other-header': 'other header value', - 'etag': '"etagvalue"', 'x-timestamp': '1.2', - 'content-type': 'text/plain; swift_bytes=123'}, + return (200, + {'other-header': 'other header value', + 'etag': '"etagvalue"', 'x-timestamp': '1.2', + 'content-type': 'text/plain; swift_bytes=123'}, iter('contents')) cs.swift.get_object = fake_get_object @@ -875,18 +876,19 @@ class TestContainerSync(unittest.TestCase): 'key', FakeContainerBroker('broker'), {'account': 'a', 'container': 'c', 'storage_policy_index': 0}, realm, realm_key)) - self.assertEquals(cs.container_puts, 1) + self.assertEqual(cs.container_puts, 1) def fake_get_object(acct, con, obj, headers, acceptable_statuses): - self.assertEquals(headers['X-Newest'], True) - self.assertEquals(headers['X-Backend-Storage-Policy-Index'], - '0') - return (200, {'date': 'date value', - 'last-modified': 'last modified value', - 'x-timestamp': '1.2', - 'other-header': 'other header value', - 'etag': '"etagvalue"', - 'content-type': 'text/plain; swift_bytes=123'}, + self.assertEqual(headers['X-Newest'], True) + self.assertEqual(headers['X-Backend-Storage-Policy-Index'], + '0') + return (200, + {'date': 'date value', + 'last-modified': 'last modified value', + 'x-timestamp': '1.2', + 'other-header': 'other header value', + 'etag': '"etagvalue"', + 'content-type': 'text/plain; swift_bytes=123'}, iter('contents')) cs.swift.get_object = fake_get_object @@ -900,14 +902,14 @@ class TestContainerSync(unittest.TestCase): 'key', FakeContainerBroker('broker'), {'account': 'a', 'container': 'c', 'storage_policy_index': 0}, realm, realm_key)) - self.assertEquals(cs.container_puts, 2) + self.assertEqual(cs.container_puts, 2) exc = [] def fake_get_object(acct, con, obj, headers, acceptable_statuses): - self.assertEquals(headers['X-Newest'], True) - self.assertEquals(headers['X-Backend-Storage-Policy-Index'], - '0') + self.assertEqual(headers['X-Newest'], True) + self.assertEqual(headers['X-Backend-Storage-Policy-Index'], + '0') exc.append(Exception('test exception')) raise exc[-1] @@ -920,16 +922,16 @@ class TestContainerSync(unittest.TestCase): 'key', FakeContainerBroker('broker'), {'account': 'a', 'container': 'c', 'storage_policy_index': 0}, realm, realm_key)) - self.assertEquals(cs.container_puts, 2) - self.assertEquals(len(exc), 1) - self.assertEquals(str(exc[-1]), 'test exception') + self.assertEqual(cs.container_puts, 2) + self.assertEqual(len(exc), 1) + self.assertEqual(str(exc[-1]), 'test exception') exc = [] def fake_get_object(acct, con, obj, headers, acceptable_statuses): - self.assertEquals(headers['X-Newest'], True) - self.assertEquals(headers['X-Backend-Storage-Policy-Index'], - '0') + self.assertEqual(headers['X-Newest'], True) + self.assertEqual(headers['X-Backend-Storage-Policy-Index'], + '0') exc.append(ClientException('test client exception')) raise exc[-1] @@ -943,14 +945,14 @@ class TestContainerSync(unittest.TestCase): 'key', FakeContainerBroker('broker'), {'account': 'a', 'container': 'c', 'storage_policy_index': 0}, realm, realm_key)) - self.assertEquals(cs.container_puts, 2) - self.assertEquals(len(exc), 1) - self.assertEquals(str(exc[-1]), 'test client exception') + self.assertEqual(cs.container_puts, 2) + self.assertEqual(len(exc), 1) + self.assertEqual(str(exc[-1]), 'test client exception') def fake_get_object(acct, con, obj, headers, acceptable_statuses): - self.assertEquals(headers['X-Newest'], True) - self.assertEquals(headers['X-Backend-Storage-Policy-Index'], - '0') + self.assertEqual(headers['X-Newest'], True) + self.assertEqual(headers['X-Backend-Storage-Policy-Index'], + '0') return (200, {'other-header': 'other header value', 'x-timestamp': '1.2', 'etag': '"etagvalue"'}, iter('contents')) @@ -968,7 +970,7 @@ class TestContainerSync(unittest.TestCase): 'key', FakeContainerBroker('broker'), {'account': 'a', 'container': 'c', 'storage_policy_index': 0}, realm, realm_key)) - self.assertEquals(cs.container_puts, 2) + self.assertEqual(cs.container_puts, 2) self.assertLogMessage('info', 'Unauth') def fake_put_object(*args, **kwargs): @@ -983,7 +985,7 @@ class TestContainerSync(unittest.TestCase): 'key', FakeContainerBroker('broker'), {'account': 'a', 'container': 'c', 'storage_policy_index': 0}, realm, realm_key)) - self.assertEquals(cs.container_puts, 2) + self.assertEqual(cs.container_puts, 2) self.assertLogMessage('info', 'Not found', 1) def fake_put_object(*args, **kwargs): @@ -998,7 +1000,7 @@ class TestContainerSync(unittest.TestCase): 'key', FakeContainerBroker('broker'), {'account': 'a', 'container': 'c', 'storage_policy_index': 0}, realm, realm_key)) - self.assertEquals(cs.container_puts, 2) + self.assertEqual(cs.container_puts, 2) self.assertLogMessage('error', 'ERROR Syncing') finally: sync.uuid = orig_uuid diff --git a/test/unit/container/test_updater.py b/test/unit/container/test_updater.py index 0b3d33c56c..c2ad673236 100644 --- a/test/unit/container/test_updater.py +++ b/test/unit/container/test_updater.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import cPickle as pickle +import six.moves.cPickle as pickle import mock import os import unittest @@ -66,13 +66,58 @@ class TestContainerUpdater(unittest.TestCase): 'concurrency': '2', 'node_timeout': '5', }) - self.assert_(hasattr(cu, 'logger')) - self.assert_(cu.logger is not None) - self.assertEquals(cu.devices, self.devices_dir) - self.assertEquals(cu.interval, 1) - self.assertEquals(cu.concurrency, 2) - self.assertEquals(cu.node_timeout, 5) - self.assert_(cu.get_account_ring() is not None) + self.assertTrue(hasattr(cu, 'logger')) + self.assertTrue(cu.logger is not None) + self.assertEqual(cu.devices, self.devices_dir) + self.assertEqual(cu.interval, 1) + self.assertEqual(cu.concurrency, 2) + self.assertEqual(cu.node_timeout, 5) + self.assertTrue(cu.get_account_ring() is not None) + + @mock.patch.object(container_updater, 'ismount') + @mock.patch.object(container_updater.ContainerUpdater, 'container_sweep') + def test_run_once_with_device_unmounted(self, mock_sweep, mock_ismount): + + mock_ismount.return_value = False + cu = container_updater.ContainerUpdater({ + 'devices': self.devices_dir, + 'mount_check': 'false', + 'swift_dir': self.testdir, + 'interval': '1', + 'concurrency': '1', + 'node_timeout': '15', + 'account_suppression_time': 0 + }) + containers_dir = os.path.join(self.sda1, DATADIR) + os.mkdir(containers_dir) + partition_dir = os.path.join(containers_dir, "a") + os.mkdir(partition_dir) + + cu.run_once() + self.assertTrue(os.path.exists(containers_dir)) # sanity check + + # only called if a partition dir exists + self.assertTrue(mock_sweep.called) + + mock_sweep.reset_mock() + + cu = container_updater.ContainerUpdater({ + 'devices': self.devices_dir, + 'mount_check': 'true', + 'swift_dir': self.testdir, + 'interval': '1', + 'concurrency': '1', + 'node_timeout': '15', + 'account_suppression_time': 0 + }) + cu.logger = FakeLogger() + cu.run_once() + log_lines = cu.logger.get_lines_for_level('warning') + self.assertTrue(len(log_lines) > 0) + msg = 'sda1 is not mounted' + self.assertEqual(log_lines[0], msg) + # Ensure that the container_sweep did not run + self.assertFalse(mock_sweep.called) def test_run_once(self): cu = container_updater.ContainerUpdater({ @@ -88,7 +133,7 @@ class TestContainerUpdater(unittest.TestCase): containers_dir = os.path.join(self.sda1, DATADIR) os.mkdir(containers_dir) cu.run_once() - self.assert_(os.path.exists(containers_dir)) + self.assertTrue(os.path.exists(containers_dir)) subdir = os.path.join(containers_dir, 'subdir') os.mkdir(subdir) cb = ContainerBroker(os.path.join(subdir, 'hash.db'), account='a', @@ -96,19 +141,19 @@ class TestContainerUpdater(unittest.TestCase): cb.initialize(normalize_timestamp(1), 0) cu.run_once() info = cb.get_info() - self.assertEquals(info['object_count'], 0) - self.assertEquals(info['bytes_used'], 0) - self.assertEquals(info['reported_object_count'], 0) - self.assertEquals(info['reported_bytes_used'], 0) + self.assertEqual(info['object_count'], 0) + self.assertEqual(info['bytes_used'], 0) + self.assertEqual(info['reported_object_count'], 0) + self.assertEqual(info['reported_bytes_used'], 0) cb.put_object('o', normalize_timestamp(2), 3, 'text/plain', '68b329da9893e34099c7d8ad5cb9c940') cu.run_once() info = cb.get_info() - self.assertEquals(info['object_count'], 1) - self.assertEquals(info['bytes_used'], 3) - self.assertEquals(info['reported_object_count'], 0) - self.assertEquals(info['reported_bytes_used'], 0) + self.assertEqual(info['object_count'], 1) + self.assertEqual(info['bytes_used'], 3) + self.assertEqual(info['reported_object_count'], 0) + self.assertEqual(info['reported_bytes_used'], 0) def accept(sock, addr, return_code): try: @@ -118,18 +163,18 @@ class TestContainerUpdater(unittest.TestCase): out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' % return_code) out.flush() - self.assertEquals(inc.readline(), - 'PUT /sda1/0/a/c HTTP/1.1\r\n') + self.assertEqual(inc.readline(), + 'PUT /sda1/0/a/c HTTP/1.1\r\n') headers = {} line = inc.readline() while line and line != '\r\n': headers[line.split(':')[0].lower()] = \ line.split(':')[1].strip() line = inc.readline() - self.assert_('x-put-timestamp' in headers) - self.assert_('x-delete-timestamp' in headers) - self.assert_('x-object-count' in headers) - self.assert_('x-bytes-used' in headers) + self.assertTrue('x-put-timestamp' in headers) + self.assertTrue('x-delete-timestamp' in headers) + self.assertTrue('x-object-count' in headers) + self.assertTrue('x-bytes-used' in headers) except BaseException as err: import traceback traceback.print_exc() @@ -154,10 +199,10 @@ class TestContainerUpdater(unittest.TestCase): if err: raise err info = cb.get_info() - self.assertEquals(info['object_count'], 1) - self.assertEquals(info['bytes_used'], 3) - self.assertEquals(info['reported_object_count'], 1) - self.assertEquals(info['reported_bytes_used'], 3) + self.assertEqual(info['object_count'], 1) + self.assertEqual(info['bytes_used'], 3) + self.assertEqual(info['reported_object_count'], 1) + self.assertEqual(info['reported_bytes_used'], 3) @mock.patch('os.listdir') def test_listdir_with_exception(self, mock_listdir): @@ -250,11 +295,10 @@ class TestContainerUpdater(unittest.TestCase): if err: raise err info = cb.get_info() - self.assertEquals(info['object_count'], 1) - self.assertEquals(info['bytes_used'], 3) - self.assertEquals(info['reported_object_count'], 1) - self.assertEquals(info['reported_bytes_used'], 3) - + self.assertEqual(info['object_count'], 1) + self.assertEqual(info['bytes_used'], 3) + self.assertEqual(info['reported_object_count'], 1) + self.assertEqual(info['reported_bytes_used'], 3) if __name__ == '__main__': unittest.main() diff --git a/test/unit/obj/test_auditor.py b/test/unit/obj/test_auditor.py index 3cfcb47573..2429879e5d 100644 --- a/test/unit/obj/test_auditor.py +++ b/test/unit/obj/test_auditor.py @@ -22,12 +22,11 @@ import string from shutil import rmtree from hashlib import md5 from tempfile import mkdtemp -from test.unit import FakeLogger, patch_policies +from test.unit import FakeLogger, patch_policies, make_timestamp_iter from swift.obj import auditor from swift.obj.diskfile import DiskFile, write_metadata, invalidate_hash, \ get_data_dir, DiskFileManager, AuditLocation -from swift.common.utils import hash_path, mkdirs, normalize_timestamp, \ - storage_directory +from swift.common.utils import mkdirs, normalize_timestamp, Timestamp from swift.common.storage_policy import StoragePolicy, POLICIES @@ -85,8 +84,8 @@ class TestAuditor(unittest.TestCase): def test_worker_conf_parms(self): def check_common_defaults(): - self.assertEquals(auditor_worker.max_bytes_per_second, 10000000) - self.assertEquals(auditor_worker.log_time, 3600) + self.assertEqual(auditor_worker.max_bytes_per_second, 10000000) + self.assertEqual(auditor_worker.log_time, 3600) # test default values conf = dict( @@ -96,9 +95,9 @@ class TestAuditor(unittest.TestCase): auditor_worker = auditor.AuditorWorker(conf, self.logger, self.rcache, self.devices) check_common_defaults() - self.assertEquals(auditor_worker.diskfile_mgr.disk_chunk_size, 65536) - self.assertEquals(auditor_worker.max_files_per_second, 20) - self.assertEquals(auditor_worker.zero_byte_only_at_fps, 0) + self.assertEqual(auditor_worker.diskfile_mgr.disk_chunk_size, 65536) + self.assertEqual(auditor_worker.max_files_per_second, 20) + self.assertEqual(auditor_worker.zero_byte_only_at_fps, 0) # test specified audit value overrides conf.update({'disk_chunk_size': 4096}) @@ -106,9 +105,9 @@ class TestAuditor(unittest.TestCase): self.rcache, self.devices, zero_byte_only_at_fps=50) check_common_defaults() - self.assertEquals(auditor_worker.diskfile_mgr.disk_chunk_size, 4096) - self.assertEquals(auditor_worker.max_files_per_second, 50) - self.assertEquals(auditor_worker.zero_byte_only_at_fps, 50) + self.assertEqual(auditor_worker.diskfile_mgr.disk_chunk_size, 4096) + self.assertEqual(auditor_worker.max_files_per_second, 50) + self.assertEqual(auditor_worker.zero_byte_only_at_fps, 50) def test_object_audit_extra_data(self): def run_tests(disk_file): @@ -132,15 +131,15 @@ class TestAuditor(unittest.TestCase): auditor_worker.object_audit( AuditLocation(disk_file._datadir, 'sda', '0', policy=POLICIES.legacy)) - self.assertEquals(auditor_worker.quarantines, pre_quarantines) + self.assertEqual(auditor_worker.quarantines, pre_quarantines) os.write(writer._fd, 'extra_data') auditor_worker.object_audit( AuditLocation(disk_file._datadir, 'sda', '0', policy=POLICIES.legacy)) - self.assertEquals(auditor_worker.quarantines, - pre_quarantines + 1) + self.assertEqual(auditor_worker.quarantines, + pre_quarantines + 1) run_tests(self.disk_file) run_tests(self.disk_file_p1) @@ -169,7 +168,7 @@ class TestAuditor(unittest.TestCase): auditor_worker.object_audit( AuditLocation(self.disk_file._datadir, 'sda', '0', policy=POLICIES.legacy)) - self.assertEquals(auditor_worker.quarantines, pre_quarantines) + self.assertEqual(auditor_worker.quarantines, pre_quarantines) etag = md5() etag.update('1' + '0' * 1023) etag = etag.hexdigest() @@ -182,7 +181,7 @@ class TestAuditor(unittest.TestCase): auditor_worker.object_audit( AuditLocation(self.disk_file._datadir, 'sda', '0', policy=POLICIES.legacy)) - self.assertEquals(auditor_worker.quarantines, pre_quarantines + 1) + self.assertEqual(auditor_worker.quarantines, pre_quarantines + 1) def test_object_audit_no_meta(self): timestamp = str(normalize_timestamp(time.time())) @@ -198,7 +197,7 @@ class TestAuditor(unittest.TestCase): auditor_worker.object_audit( AuditLocation(self.disk_file._datadir, 'sda', '0', policy=POLICIES.legacy)) - self.assertEquals(auditor_worker.quarantines, pre_quarantines + 1) + self.assertEqual(auditor_worker.quarantines, pre_quarantines + 1) def test_object_audit_will_not_swallow_errors_in_tests(self): timestamp = str(normalize_timestamp(time.time())) @@ -233,7 +232,7 @@ class TestAuditor(unittest.TestCase): auditor_worker.failsafe_object_audit( AuditLocation(os.path.dirname(path), 'sda', '0', policy=POLICIES.legacy)) - self.assertEquals(auditor_worker.errors, 1) + self.assertEqual(auditor_worker.errors, 1) def test_generic_exception_handling(self): auditor_worker = auditor.AuditorWorker(self.conf, self.logger, @@ -257,7 +256,7 @@ class TestAuditor(unittest.TestCase): with mock.patch('swift.obj.diskfile.DiskFileManager.diskfile_cls', lambda *_: 1 / 0): auditor_worker.audit_all_objects() - self.assertEquals(auditor_worker.errors, pre_errors + 1) + self.assertEqual(auditor_worker.errors, pre_errors + 1) def test_object_run_once_pass(self): auditor_worker = auditor.AuditorWorker(self.conf, self.logger, @@ -286,10 +285,10 @@ class TestAuditor(unittest.TestCase): write_file(self.disk_file_p1) auditor_worker.audit_all_objects() - self.assertEquals(auditor_worker.quarantines, pre_quarantines) + self.assertEqual(auditor_worker.quarantines, pre_quarantines) # 1 object per policy falls into 1024 bucket - self.assertEquals(auditor_worker.stats_buckets[1024], 2) - self.assertEquals(auditor_worker.stats_buckets[10240], 0) + self.assertEqual(auditor_worker.stats_buckets[1024], 2) + self.assertEqual(auditor_worker.stats_buckets[10240], 0) # pick up some additional code coverage, large file data = '0' * 1024 * 1024 @@ -305,22 +304,22 @@ class TestAuditor(unittest.TestCase): } writer.put(metadata) auditor_worker.audit_all_objects(device_dirs=['sda', 'sdb']) - self.assertEquals(auditor_worker.quarantines, pre_quarantines) + self.assertEqual(auditor_worker.quarantines, pre_quarantines) # still have the 1024 byte object left in policy-1 (plus the # stats from the original 2) - self.assertEquals(auditor_worker.stats_buckets[1024], 3) - self.assertEquals(auditor_worker.stats_buckets[10240], 0) + self.assertEqual(auditor_worker.stats_buckets[1024], 3) + self.assertEqual(auditor_worker.stats_buckets[10240], 0) # and then policy-0 disk_file was re-written as a larger object - self.assertEquals(auditor_worker.stats_buckets['OVER'], 1) + self.assertEqual(auditor_worker.stats_buckets['OVER'], 1) # pick up even more additional code coverage, misc paths auditor_worker.log_time = -1 auditor_worker.stats_sizes = [] auditor_worker.audit_all_objects(device_dirs=['sda', 'sdb']) - self.assertEquals(auditor_worker.quarantines, pre_quarantines) - self.assertEquals(auditor_worker.stats_buckets[1024], 3) - self.assertEquals(auditor_worker.stats_buckets[10240], 0) - self.assertEquals(auditor_worker.stats_buckets['OVER'], 1) + self.assertEqual(auditor_worker.quarantines, pre_quarantines) + self.assertEqual(auditor_worker.stats_buckets[1024], 3) + self.assertEqual(auditor_worker.stats_buckets[10240], 0) + self.assertEqual(auditor_worker.stats_buckets['OVER'], 1) def test_object_run_logging(self): logger = FakeLogger() @@ -361,7 +360,7 @@ class TestAuditor(unittest.TestCase): writer.put(metadata) os.write(writer._fd, 'extra_data') auditor_worker.audit_all_objects() - self.assertEquals(auditor_worker.quarantines, pre_quarantines + 1) + self.assertEqual(auditor_worker.quarantines, pre_quarantines + 1) def test_object_run_once_multi_devices(self): auditor_worker = auditor.AuditorWorker(self.conf, self.logger, @@ -399,7 +398,7 @@ class TestAuditor(unittest.TestCase): writer.put(metadata) os.write(writer._fd, 'extra_data') auditor_worker.audit_all_objects() - self.assertEquals(auditor_worker.quarantines, pre_quarantines + 1) + self.assertEqual(auditor_worker.quarantines, pre_quarantines + 1) def test_object_run_fast_track_non_zero(self): self.auditor = auditor.ObjectAuditor(self.conf) @@ -432,28 +431,17 @@ class TestAuditor(unittest.TestCase): self.auditor.run_audit(**kwargs) self.assertTrue(os.path.isdir(quarantine_path)) - def setup_bad_zero_byte(self, with_ts=False): + def setup_bad_zero_byte(self, timestamp=None): + if timestamp is None: + timestamp = Timestamp(time.time()) self.auditor = auditor.ObjectAuditor(self.conf) self.auditor.log_time = 0 - ts_file_path = '' - if with_ts: - name_hash = hash_path('a', 'c', 'o') - dir_path = os.path.join( - self.devices, 'sda', - storage_directory(get_data_dir(POLICIES[0]), '0', name_hash)) - ts_file_path = os.path.join(dir_path, '99999.ts') - if not os.path.exists(dir_path): - mkdirs(dir_path) - fp = open(ts_file_path, 'w') - write_metadata(fp, {'X-Timestamp': '99999', 'name': '/a/c/o'}) - fp.close() - etag = md5() with self.disk_file.create() as writer: etag = etag.hexdigest() metadata = { 'ETag': etag, - 'X-Timestamp': str(normalize_timestamp(time.time())), + 'X-Timestamp': timestamp.internal, 'Content-Length': 10, } writer.put(metadata) @@ -461,7 +449,6 @@ class TestAuditor(unittest.TestCase): etag = etag.hexdigest() metadata['ETag'] = etag write_metadata(writer._fd, metadata) - return ts_file_path def test_object_run_fast_track_all(self): self.setup_bad_zero_byte() @@ -512,12 +499,36 @@ class TestAuditor(unittest.TestCase): self.auditor = auditor.ObjectAuditor(self.conf) self.assertRaises(SystemExit, self.auditor.fork_child, self) - def test_with_tombstone(self): - ts_file_path = self.setup_bad_zero_byte(with_ts=True) - self.assertTrue(ts_file_path.endswith('ts')) + def test_with_only_tombstone(self): + # sanity check that auditor doesn't touch solitary tombstones + ts_iter = make_timestamp_iter() + self.setup_bad_zero_byte(timestamp=ts_iter.next()) + self.disk_file.delete(ts_iter.next()) + files = os.listdir(self.disk_file._datadir) + self.assertEqual(1, len(files)) + self.assertTrue(files[0].endswith('ts')) kwargs = {'mode': 'once'} self.auditor.run_audit(**kwargs) - self.assertTrue(os.path.exists(ts_file_path)) + files_after = os.listdir(self.disk_file._datadir) + self.assertEqual(files, files_after) + + def test_with_tombstone_and_data(self): + # rsync replication could leave a tombstone and data file in object + # dir - verify they are both removed during audit + ts_iter = make_timestamp_iter() + ts_tomb = ts_iter.next() + ts_data = ts_iter.next() + self.setup_bad_zero_byte(timestamp=ts_data) + tomb_file_path = os.path.join(self.disk_file._datadir, + '%s.ts' % ts_tomb.internal) + with open(tomb_file_path, 'wb') as fd: + write_metadata(fd, {'X-Timestamp': ts_tomb.internal}) + files = os.listdir(self.disk_file._datadir) + self.assertEqual(2, len(files)) + self.assertTrue(os.path.basename(tomb_file_path) in files, files) + kwargs = {'mode': 'once'} + self.auditor.run_audit(**kwargs) + self.assertFalse(os.path.exists(self.disk_file._datadir)) def test_sleeper(self): with mock.patch( @@ -535,6 +546,8 @@ class TestAuditor(unittest.TestCase): class Bogus(Exception): pass + loop_error = Bogus('exception') + class ObjectAuditorMock(object): check_args = () check_kwargs = {} @@ -557,7 +570,7 @@ class TestAuditor(unittest.TestCase): def mock_audit_loop_error(self, parent, zbo_fps, override_devices=None, **kwargs): - raise Bogus('exception') + raise loop_error def mock_fork(self): self.fork_called += 1 @@ -591,23 +604,23 @@ class TestAuditor(unittest.TestCase): my_auditor._sleep = mocker.mock_sleep_stop my_auditor.run_once(zero_byte_fps=50) my_auditor.logger.exception.assert_called_once_with( - 'ERROR auditing: exception') + 'ERROR auditing: %s', loop_error) my_auditor.logger.exception.reset_mock() self.assertRaises(StopForever, my_auditor.run_forever) my_auditor.logger.exception.assert_called_once_with( - 'ERROR auditing: exception') + 'ERROR auditing: %s', loop_error) my_auditor.audit_loop = real_audit_loop self.assertRaises(StopForever, my_auditor.run_forever, zero_byte_fps=50) - self.assertEquals(mocker.check_kwargs['zero_byte_fps'], 50) - self.assertEquals(mocker.fork_called, 0) + self.assertEqual(mocker.check_kwargs['zero_byte_fps'], 50) + self.assertEqual(mocker.fork_called, 0) self.assertRaises(SystemExit, my_auditor.run_once) - self.assertEquals(mocker.fork_called, 1) - self.assertEquals(mocker.check_kwargs['zero_byte_fps'], 89) - self.assertEquals(mocker.check_device_dir, []) - self.assertEquals(mocker.check_args, ()) + self.assertEqual(mocker.fork_called, 1) + self.assertEqual(mocker.check_kwargs['zero_byte_fps'], 89) + self.assertEqual(mocker.check_device_dir, []) + self.assertEqual(mocker.check_args, ()) device_list = ['sd%s' % i for i in string.ascii_letters[2:10]] device_string = ','.join(device_list) @@ -616,9 +629,9 @@ class TestAuditor(unittest.TestCase): mocker.fork_called = 0 self.assertRaises(SystemExit, my_auditor.run_once, devices=device_string_bogus) - self.assertEquals(mocker.fork_called, 1) - self.assertEquals(mocker.check_kwargs['zero_byte_fps'], 89) - self.assertEquals(sorted(mocker.check_device_dir), device_list) + self.assertEqual(mocker.fork_called, 1) + self.assertEqual(mocker.check_kwargs['zero_byte_fps'], 89) + self.assertEqual(sorted(mocker.check_device_dir), device_list) mocker.master = 1 @@ -627,8 +640,8 @@ class TestAuditor(unittest.TestCase): # Fork is called 2 times since the zbf process is forked just # once before self._sleep() is called and StopForever is raised # Also wait is called just once before StopForever is raised - self.assertEquals(mocker.fork_called, 2) - self.assertEquals(mocker.wait_called, 1) + self.assertEqual(mocker.fork_called, 2) + self.assertEqual(mocker.wait_called, 1) my_auditor._sleep = mocker.mock_sleep_continue @@ -639,10 +652,10 @@ class TestAuditor(unittest.TestCase): # Fork is called no. of devices + (no. of devices)/2 + 1 times # since zbf process is forked (no.of devices)/2 + 1 times no_devices = len(os.listdir(self.devices)) - self.assertEquals(mocker.fork_called, no_devices + no_devices / 2 - + 1) - self.assertEquals(mocker.wait_called, no_devices + no_devices / 2 - + 1) + self.assertEqual(mocker.fork_called, no_devices + no_devices / 2 + + 1) + self.assertEqual(mocker.wait_called, no_devices + no_devices / 2 + + 1) finally: os.fork = was_fork diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py index c2bb41db6d..47ef9b102d 100644 --- a/test/unit/obj/test_diskfile.py +++ b/test/unit/obj/test_diskfile.py @@ -16,7 +16,7 @@ """Tests for swift.obj.diskfile""" -import cPickle as pickle +import six.moves.cPickle as pickle import os import errno import itertools @@ -206,28 +206,28 @@ class TestDiskFileModuleMethods(unittest.TestCase): self.devices, qbit) def test_get_data_dir(self): - self.assertEquals(diskfile.get_data_dir(POLICIES[0]), - diskfile.DATADIR_BASE) - self.assertEquals(diskfile.get_data_dir(POLICIES[1]), - diskfile.DATADIR_BASE + "-1") + self.assertEqual(diskfile.get_data_dir(POLICIES[0]), + diskfile.DATADIR_BASE) + self.assertEqual(diskfile.get_data_dir(POLICIES[1]), + diskfile.DATADIR_BASE + "-1") self.assertRaises(ValueError, diskfile.get_data_dir, 'junk') self.assertRaises(ValueError, diskfile.get_data_dir, 99) def test_get_async_dir(self): - self.assertEquals(diskfile.get_async_dir(POLICIES[0]), - diskfile.ASYNCDIR_BASE) - self.assertEquals(diskfile.get_async_dir(POLICIES[1]), - diskfile.ASYNCDIR_BASE + "-1") + self.assertEqual(diskfile.get_async_dir(POLICIES[0]), + diskfile.ASYNCDIR_BASE) + self.assertEqual(diskfile.get_async_dir(POLICIES[1]), + diskfile.ASYNCDIR_BASE + "-1") self.assertRaises(ValueError, diskfile.get_async_dir, 'junk') self.assertRaises(ValueError, diskfile.get_async_dir, 99) def test_get_tmp_dir(self): - self.assertEquals(diskfile.get_tmp_dir(POLICIES[0]), - diskfile.TMP_BASE) - self.assertEquals(diskfile.get_tmp_dir(POLICIES[1]), - diskfile.TMP_BASE + "-1") + self.assertEqual(diskfile.get_tmp_dir(POLICIES[0]), + diskfile.TMP_BASE) + self.assertEqual(diskfile.get_tmp_dir(POLICIES[1]), + diskfile.TMP_BASE + "-1") self.assertRaises(ValueError, diskfile.get_tmp_dir, 'junk') self.assertRaises(ValueError, diskfile.get_tmp_dir, 99) @@ -512,7 +512,9 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): chosen = dict((f[1], os.path.join(class_under_test._datadir, f[0])) for f in test if f[1]) expected = tuple(chosen.get(ext) for ext in returned_ext_order) - files = list(zip(*test)[0]) + # list(zip(...)) for py3 compatibility (zip is lazy there) + files = list(list(zip(*test))[0]) + for _order in ('ordered', 'shuffled', 'shuffled'): class_under_test = self._get_diskfile(policy, frag_index) try: @@ -531,7 +533,8 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): # check that expected files are left in hashdir after cleanup for test in scenarios: class_under_test = self.df_router[policy] - files = list(zip(*test)[0]) + # list(zip(...)) for py3 compatibility (zip is lazy there) + files = list(list(zip(*test))[0]) hashdir = os.path.join(self.testdir, str(uuid.uuid4())) os.mkdir(hashdir) for fname in files: @@ -557,7 +560,8 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): # same scenarios as passed to _test_hash_cleanup_listdir_files for test in scenarios: class_under_test = self.df_router[policy] - files = list(zip(*test)[0]) + # list(zip(...)) for py3 compatibility (zip is lazy there) + files = list(list(zip(*test))[0]) dev_path = os.path.join(self.testdir, str(uuid.uuid4())) hashdir = os.path.join( dev_path, diskfile.get_data_dir(policy), @@ -570,8 +574,8 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin): with mock.patch('swift.obj.diskfile.time') as mock_time: # don't reclaim anything mock_time.time.return_value = 0.0 - mock_func = 'swift.obj.diskfile.DiskFileManager.get_dev_path' - with mock.patch(mock_func) as mock_path: + mocked = 'swift.obj.diskfile.BaseDiskFileManager.get_dev_path' + with mock.patch(mocked) as mock_path: mock_path.return_value = dev_path for _ in class_under_test.yield_hashes( 'ignored', '0', policy, suffixes=['abc']): @@ -1015,6 +1019,39 @@ class TestDiskFileManager(DiskFileManagerMixin, unittest.TestCase): class_under_test.manager.get_ondisk_files, files, self.testdir) + def test_hash_cleanup_listdir_reclaim(self): + # Each scenario specifies a list of (filename, extension, [survives]) + # tuples. If extension is set or 'survives' is True, the filename + # should still be in the dir after cleanup. + much_older = Timestamp(time() - 2000).internal + older = Timestamp(time() - 1001).internal + newer = Timestamp(time() - 900).internal + scenarios = [[('%s.ts' % older, False, False)], + + # fresh tombstone is preserved + [('%s.ts' % newer, '.ts', True)], + + # .data files are not reclaimed, ever + [('%s.data' % older, '.data', True)], + [('%s.data' % newer, '.data', True)], + + # ... and we could have a mixture of fresh and stale .data + [('%s.data' % newer, '.data', True), + ('%s.data' % older, False, False)], + + # tombstone reclaimed despite newer data + [('%s.data' % newer, '.data', True), + ('%s.data' % older, False, False), + ('%s.ts' % much_older, '.ts', False)], + + # tombstone reclaimed despite junk file + [('junk', False, True), + ('%s.ts' % much_older, '.ts', False)], + ] + + self._test_hash_cleanup_listdir_files(scenarios, POLICIES.default, + reclaim_age=1000) + def test_yield_hashes(self): old_ts = '1383180000.12345' fresh_ts = Timestamp(time() - 10).internal @@ -1279,7 +1316,7 @@ class TestECDiskFileManager(DiskFileManagerMixin, unittest.TestCase): # ...even when other older files are in dir [('%s.durable' % older, False, False), - ('%s.ts' % much_older, False, False)], + ('%s.ts' % much_older, False, False)], # isolated .data files are cleaned up when stale [('%s#2.data' % older, False, False), @@ -1300,16 +1337,12 @@ class TestECDiskFileManager(DiskFileManagerMixin, unittest.TestCase): [('%s#2.data' % newer, False, True), ('%s#4.data' % older, False, False)], - # TODO these remaining scenarios exhibit different - # behavior than the legacy replication DiskFileManager - # behavior... - # tombstone reclaimed despite newer non-durable data [('%s#2.data' % newer, False, True), ('%s#4.data' % older, False, False), ('%s.ts' % much_older, '.ts', False)], - # tombstone reclaimed despite newer non-durable data + # tombstone reclaimed despite much older durable [('%s.ts' % older, '.ts', False), ('%s.durable' % much_older, False, False)], @@ -1329,11 +1362,11 @@ class TestECDiskFileManager(DiskFileManagerMixin, unittest.TestCase): '0000000006.00000.durable'], ['0000000007.00000.meta', - '0000000006.00000#1.data'], + '0000000006.00000#1.data'], ['0000000007.00000.meta', - '0000000006.00000.durable', - '0000000005.00000#1.data'] + '0000000006.00000.durable', + '0000000005.00000#1.data'] ] for files in scenarios: class_under_test = self._get_diskfile(POLICIES.default) @@ -1949,7 +1982,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): df = self._get_open_disk_file(ts=self.ts().internal, extra_metadata=orig_metadata) with df.open(): - self.assertEquals('1024', df._metadata['Content-Length']) + self.assertEqual('1024', df._metadata['Content-Length']) # write some new metadata (fast POST, don't send orig meta, at t0+1) df = self._simple_get_diskfile() df.write_metadata({'X-Timestamp': self.ts().internal, @@ -1957,11 +1990,11 @@ class DiskFileMixin(BaseDiskFileTestMixin): df = self._simple_get_diskfile() with df.open(): # non-fast-post updateable keys are preserved - self.assertEquals('text/garbage', df._metadata['Content-Type']) + self.assertEqual('text/garbage', df._metadata['Content-Type']) # original fast-post updateable keys are removed - self.assert_('X-Object-Meta-Key1' not in df._metadata) + self.assertTrue('X-Object-Meta-Key1' not in df._metadata) # new fast-post updateable keys are added - self.assertEquals('Value2', df._metadata['X-Object-Meta-Key2']) + self.assertEqual('Value2', df._metadata['X-Object-Meta-Key2']) def test_disk_file_preserves_sysmeta(self): # build an object with some meta (at t0) @@ -1970,7 +2003,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): df = self._get_open_disk_file(ts=self.ts().internal, extra_metadata=orig_metadata) with df.open(): - self.assertEquals('1024', df._metadata['Content-Length']) + self.assertEqual('1024', df._metadata['Content-Length']) # write some new metadata (fast POST, don't send orig meta, at t0+1s) df = self._simple_get_diskfile() df.write_metadata({'X-Timestamp': self.ts().internal, @@ -1979,9 +2012,9 @@ class DiskFileMixin(BaseDiskFileTestMixin): df = self._simple_get_diskfile() with df.open(): # non-fast-post updateable keys are preserved - self.assertEquals('text/garbage', df._metadata['Content-Type']) + self.assertEqual('text/garbage', df._metadata['Content-Type']) # original sysmeta keys are preserved - self.assertEquals('Value1', df._metadata['X-Object-Sysmeta-Key1']) + self.assertEqual('Value1', df._metadata['X-Object-Sysmeta-Key1']) def test_disk_file_reader_iter(self): df = self._create_test_file('1234567890') @@ -2004,9 +2037,9 @@ class DiskFileMixin(BaseDiskFileTestMixin): df = self._create_test_file('1234567890') quarantine_msgs = [] reader = df.reader(_quarantine_hook=quarantine_msgs.append) - self.assertEquals(''.join(reader.app_iter_range(0, None)), - '1234567890') - self.assertEquals(quarantine_msgs, []) + self.assertEqual(''.join(reader.app_iter_range(0, None)), + '1234567890') + self.assertEqual(quarantine_msgs, []) df = self._simple_get_diskfile() with df.open(): reader = df.reader() @@ -2099,7 +2132,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): '5e816ff8b8b8e9a5d355497e5d9e0301\r\n']) value = header + ''.join(it) - self.assertEquals(quarantine_msgs, []) + self.assertEqual(quarantine_msgs, []) parts = map(lambda p: p.get_payload(decode=True), email.message_from_string(value).walk())[1:3] @@ -2131,7 +2164,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): os.rmdir(tmpdir) df = self._simple_get_diskfile(policy=policy) with df.create(): - self.assert_(os.path.exists(tmpdir)) + self.assertTrue(os.path.exists(tmpdir)) def _get_open_disk_file(self, invalid_type=None, obj_name='o', fsize=1024, csize=8, mark_deleted=False, prealloc=False, @@ -2538,7 +2571,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): metadata = {'X-Timestamp': timestamp, 'X-Object-Meta-test': 'data'} df.write_metadata(metadata) dl = os.listdir(df._datadir) - self.assertEquals(len(dl), file_count + 1) + self.assertEqual(len(dl), file_count + 1) exp_name = '%s.meta' % timestamp self.assertTrue(exp_name in set(dl)) @@ -2605,8 +2638,8 @@ class DiskFileMixin(BaseDiskFileTestMixin): if policy.policy_type == EC_POLICY: expected = ['%s#2.data' % timestamp, '%s.durable' % timestamp] - self.assertEquals(len(dl), len(expected), - 'Unexpected dir listing %s' % dl) + self.assertEqual(len(dl), len(expected), + 'Unexpected dir listing %s' % dl) self.assertEqual(sorted(expected), sorted(dl)) def test_write_cleanup(self): @@ -2624,8 +2657,8 @@ class DiskFileMixin(BaseDiskFileTestMixin): if policy.policy_type == EC_POLICY: expected = ['%s#2.data' % timestamp_2, '%s.durable' % timestamp_2] - self.assertEquals(len(dl), len(expected), - 'Unexpected dir listing %s' % dl) + self.assertEqual(len(dl), len(expected), + 'Unexpected dir listing %s' % dl) self.assertEqual(sorted(expected), sorted(dl)) def test_commit_fsync(self): @@ -2680,8 +2713,8 @@ class DiskFileMixin(BaseDiskFileTestMixin): expected = ['%s#2.data' % timestamp.internal, '%s.durable' % timestamp.internal] dl = os.listdir(df._datadir) - self.assertEquals(len(dl), len(expected), - 'Unexpected dir listing %s' % dl) + self.assertEqual(len(dl), len(expected), + 'Unexpected dir listing %s' % dl) self.assertEqual(sorted(expected), sorted(dl)) def test_number_calls_to_hash_cleanup_listdir_during_create(self): @@ -2756,7 +2789,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): df.delete(ts) exp_name = '%s.ts' % ts.internal dl = os.listdir(df._datadir) - self.assertEquals(len(dl), 1) + self.assertEqual(len(dl), 1) self.assertTrue(exp_name in set(dl), 'Expected file %s missing in %s' % (exp_name, dl)) # cleanup before next policy @@ -2768,7 +2801,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): df.delete(ts) exp_name = '%s.ts' % str(Timestamp(ts).internal) dl = os.listdir(df._datadir) - self.assertEquals(len(dl), 1) + self.assertEqual(len(dl), 1) self.assertTrue(exp_name in set(dl)) df = self._simple_get_diskfile() self.assertRaises(DiskFileDeleted, df.open) @@ -2779,7 +2812,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): df.delete(ts) exp_name = '%s.ts' % str(Timestamp(ts).internal) dl = os.listdir(df._datadir) - self.assertEquals(len(dl), 1) + self.assertEqual(len(dl), 1) self.assertTrue(exp_name in set(dl)) # it's pickle-format, so removing the last byte is sufficient to # corrupt it @@ -2829,7 +2862,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): for chunk in reader: pass # close is called at the end of the iterator - self.assertEquals(reader._fp, None) + self.assertEqual(reader._fp, None) error_lines = df._logger.get_lines_for_level('error') self.assertEqual(len(error_lines), 1) self.assertTrue('close failure' in error_lines[0]) @@ -2858,7 +2891,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): try: df.open() except DiskFileDeleted as d: - self.assertEquals(d.timestamp, Timestamp(10).internal) + self.assertEqual(d.timestamp, Timestamp(10).internal) else: self.fail("Expected DiskFileDeleted exception") @@ -2874,7 +2907,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): try: df.open() except DiskFileDeleted as d: - self.assertEquals(d.timestamp, Timestamp(8).internal) + self.assertEqual(d.timestamp, Timestamp(8).internal) else: self.fail("Expected DiskFileDeleted exception") @@ -2892,8 +2925,8 @@ class DiskFileMixin(BaseDiskFileTestMixin): df = self._simple_get_diskfile() with df.open(): self.assertTrue('X-Timestamp' in df._metadata) - self.assertEquals(df._metadata['X-Timestamp'], - Timestamp(10).internal) + self.assertEqual(df._metadata['X-Timestamp'], + Timestamp(10).internal) self.assertTrue('deleted' not in df._metadata) def test_ondisk_search_loop_data_meta_ts(self): @@ -2910,8 +2943,8 @@ class DiskFileMixin(BaseDiskFileTestMixin): df = self._simple_get_diskfile() with df.open(): self.assertTrue('X-Timestamp' in df._metadata) - self.assertEquals(df._metadata['X-Timestamp'], - Timestamp(10).internal) + self.assertEqual(df._metadata['X-Timestamp'], + Timestamp(10).internal) self.assertTrue('deleted' not in df._metadata) def test_ondisk_search_loop_wayward_files_ignored(self): @@ -2929,8 +2962,8 @@ class DiskFileMixin(BaseDiskFileTestMixin): df = self._simple_get_diskfile() with df.open(): self.assertTrue('X-Timestamp' in df._metadata) - self.assertEquals(df._metadata['X-Timestamp'], - Timestamp(10).internal) + self.assertEqual(df._metadata['X-Timestamp'], + Timestamp(10).internal) self.assertTrue('deleted' not in df._metadata) def test_ondisk_search_loop_listdir_error(self): @@ -2965,7 +2998,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): pass reader.close() log_lines = df._logger.get_lines_for_level('error') - self.assert_('a very special error' in log_lines[-1]) + self.assertTrue('a very special error' in log_lines[-1]) def test_diskfile_names(self): df = self._simple_get_diskfile() @@ -2988,7 +3021,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): df.delete(ts) exp_name = '%s.ts' % str(Timestamp(ts).internal) dl = os.listdir(df._datadir) - self.assertEquals(len(dl), 1) + self.assertEqual(len(dl), 1) self.assertTrue(exp_name in set(dl)) df = self._simple_get_diskfile() exc = None @@ -3020,7 +3053,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): df.delete(ts) exp_name = '%s.ts' % str(Timestamp(ts).internal) dl = os.listdir(df._datadir) - self.assertEquals(len(dl), 1) + self.assertEqual(len(dl), 1) self.assertTrue(exp_name in set(dl)) df = self._simple_get_diskfile() exc = None @@ -3053,7 +3086,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): self.fail("OSError raised when it should have been swallowed") exp_name = '%s.ts' % str(Timestamp(ts).internal) dl = os.listdir(df._datadir) - self.assertEquals(len(dl), file_count + 1) + self.assertEqual(len(dl), file_count + 1) self.assertTrue(exp_name in set(dl)) def _system_can_zero_copy(self): @@ -3097,7 +3130,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): self.assertFalse(reader.can_zero_copy_send()) log_lines = df_mgr.logger.get_lines_for_level('warning') - self.assert_('MD5 sockets' in log_lines[-1]) + self.assertTrue('MD5 sockets' in log_lines[-1]) def test_tee_to_md5_pipe_length_mismatch(self): if not self._system_can_zero_copy(): @@ -3565,7 +3598,7 @@ class TestECDiskFile(DiskFileMixin, unittest.TestCase): ts.internal + '.durable', ]) - def test_purge_non_existant_fragment_index(self): + def test_purge_non_existent_fragment_index(self): ts = self.ts() frag_index = 7 df = self._simple_get_diskfile(frag_index=frag_index) @@ -3875,7 +3908,7 @@ class TestSuffixHashes(unittest.TestCase): df_mgr.hash_cleanup_listdir, path) return files = df_mgr.hash_cleanup_listdir('/whatever') - self.assertEquals(files, output_files) + self.assertEqual(files, output_files) # hash_cleanup_listdir tests - behaviors @@ -4015,14 +4048,7 @@ class TestSuffixHashes(unittest.TestCase): for policy in self.iter_policies(): file1, file2 = [self.ts().internal + '.meta' for i in range(2)] file_list = [file1, file2] - if policy.policy_type == EC_POLICY: - # EC policy does tolerate only .meta's in dir when cleaning up - expected = [file2] - else: - # the get_ondisk_files contract validation doesn't allow a - # directory with only .meta files - expected = AssertionError() - self.check_hash_cleanup_listdir(policy, file_list, expected) + self.check_hash_cleanup_listdir(policy, file_list, [file2]) def test_hash_cleanup_listdir_ignore_orphaned_ts(self): for policy in self.iter_policies(): @@ -4056,13 +4082,7 @@ class TestSuffixHashes(unittest.TestCase): file1 = Timestamp(old_float).internal + '.ts' file2 = Timestamp(time() + 2).internal + '.meta' file_list = [file1, file2] - if policy.policy_type == EC_POLICY: - # EC will clean up old .ts despite a .meta - expected = [file2] - else: - # An orphaned .meta will not clean up a very old .ts - expected = [file2, file1] - self.check_hash_cleanup_listdir(policy, file_list, expected) + self.check_hash_cleanup_listdir(policy, file_list, [file2]) def test_hash_cleanup_listdir_keep_single_old_data(self): for policy in self.iter_policies(): @@ -4127,13 +4147,7 @@ class TestSuffixHashes(unittest.TestCase): file1 = self._datafilename(Timestamp(1), policy) file2 = '0000000002.00000.ts' file_list = [file1, file2] - if policy.policy_type == EC_POLICY: - # the .ts gets reclaimed up despite failed .data delete - expected = [] - else: - # the .ts isn't reclaimed because there were two files in dir - expected = [file2] - self.check_hash_cleanup_listdir(policy, file_list, expected) + self.check_hash_cleanup_listdir(policy, file_list, []) # invalidate_hash tests - behavior @@ -4230,21 +4244,45 @@ class TestSuffixHashes(unittest.TestCase): df_mgr = self.df_router[policy] df = df_mgr.get_diskfile( 'sda1', '0', 'a', 'c', 'o', policy=policy) - suffix = os.path.basename(os.path.dirname(df._datadir)) # scale back this tests manager's reclaim age a bit df_mgr.reclaim_age = 1000 # write a tombstone that's just a *little* older old_time = time() - 1001 timestamp = Timestamp(old_time) df.delete(timestamp.internal) - tombstone_hash = md5(timestamp.internal + '.ts').hexdigest() hashes = df_mgr.get_hashes('sda1', '0', [], policy) + self.assertEqual(hashes, {}) + + def test_hash_suffix_one_reclaim_and_one_valid_tombstone(self): + for policy in self.iter_policies(): + paths, suffix = find_paths_with_matching_suffixes(2, 1) + df_mgr = self.df_router[policy] + a, c, o = paths[suffix][0] + df1 = df_mgr.get_diskfile( + 'sda1', '0', a, c, o, policy=policy) + # scale back this tests manager's reclaim age a bit + df_mgr.reclaim_age = 1000 + # write one tombstone that's just a *little* older + df1.delete(Timestamp(time() - 1001)) + # create another tombstone in same suffix dir that's newer + a, c, o = paths[suffix][1] + df2 = df_mgr.get_diskfile( + 'sda1', '0', a, c, o, policy=policy) + t_df2 = Timestamp(time() - 900) + df2.delete(t_df2) + + hashes = df_mgr.get_hashes('sda1', '0', [], policy) + + suffix = os.path.basename(os.path.dirname(df1._datadir)) + df2_tombstone_hash = md5(t_df2.internal + '.ts').hexdigest() expected = { - # repl is broken, it doesn't use self.reclaim_age - REPL_POLICY: tombstone_hash, - EC_POLICY: {}, + REPL_POLICY: {suffix: df2_tombstone_hash}, + EC_POLICY: {suffix: { + # fi is None here because we have a tombstone + None: df2_tombstone_hash}}, }[policy.policy_type] - self.assertEqual(hashes, {suffix: expected}) + + self.assertEqual(hashes, expected) def test_hash_suffix_one_datafile(self): for policy in self.iter_policies(): @@ -4420,20 +4458,17 @@ class TestSuffixHashes(unittest.TestCase): hashes = df_mgr.get_hashes('sda1', '0', [suffix], policy) # suffix dir cleaned up by get_hashes self.assertFalse(os.path.exists(suffix_path)) - expected = { - EC_POLICY: {'123': {}}, - REPL_POLICY: {'123': EMPTY_ETAG}, - }[policy.policy_type] - msg = 'expected %r != %r for policy %r' % (expected, hashes, - policy) + expected = {} + msg = 'expected %r != %r for policy %r' % ( + expected, hashes, policy) self.assertEqual(hashes, expected, msg) # now make the suffix path a file open(suffix_path, 'w').close() hashes = df_mgr.get_hashes('sda1', '0', [suffix], policy) expected = {} - msg = 'expected %r != %r for policy %r' % (expected, hashes, - policy) + msg = 'expected %r != %r for policy %r' % ( + expected, hashes, policy) self.assertEqual(hashes, expected, msg) def test_hash_suffix_listdir_enoent(self): @@ -4481,11 +4516,7 @@ class TestSuffixHashes(unittest.TestCase): df_mgr = self.df_router[policy] hashes = df_mgr.get_hashes(self.existing_device, '0', [suffix], policy) - expected = { - REPL_POLICY: {suffix: EMPTY_ETAG}, - EC_POLICY: {suffix: {}}, - }[policy.policy_type] - self.assertEqual(hashes, expected) + self.assertEqual(hashes, {}) # and hash path is quarantined self.assertFalse(os.path.exists(df._datadir)) # each device a quarantined directory @@ -4693,12 +4724,9 @@ class TestSuffixHashes(unittest.TestCase): self.assertNotEqual(new_hashes, hashes) # and the empty suffix path is removed self.assertFalse(os.path.exists(suffix_path)) - # ... but is hashed as "empty" - expected = { - EC_POLICY: {}, - REPL_POLICY: md5().hexdigest(), - }[policy.policy_type] - self.assertEqual({suffix: expected}, hashes) + # ... and the suffix key is removed + expected = {} + self.assertEqual(expected, hashes) def test_get_hashes_multi_file_multi_suffix(self): paths, suffix = find_paths_with_matching_suffixes(needed_matches=2, @@ -4875,10 +4903,7 @@ class TestSuffixHashes(unittest.TestCase): self.assertTrue(os.path.exists(suffix_path)) # sanity hashes = df_mgr.get_hashes(self.existing_device, '0', [suffix], policy) - expected = { - EC_POLICY: {'123': {}}, - REPL_POLICY: {'123': EMPTY_ETAG}, - }[policy.policy_type] + expected = {} msg = 'expected %r != %r for policy %r' % (expected, hashes, policy) self.assertEqual(hashes, expected, msg) diff --git a/test/unit/obj/test_reconstructor.py b/test/unit/obj/test_reconstructor.py index f753de00ce..7aa5ebc60d 100755 --- a/test/unit/obj/test_reconstructor.py +++ b/test/unit/obj/test_reconstructor.py @@ -17,14 +17,14 @@ import unittest import os from hashlib import md5 import mock -import cPickle as pickle +import six.moves.cPickle as pickle import tempfile import time import shutil import re import random import struct -from eventlet import Timeout +from eventlet import Timeout, sleep from contextlib import closing, nested, contextmanager from gzip import GzipFile @@ -599,10 +599,74 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase): self.assertFalse(jobs) # that should be all of them check_jobs(part_num) - def test_run_once(self): - with mocked_http_conn(*[200] * 12, body=pickle.dumps({})): + def _run_once(self, http_count, extra_devices, override_devices=None): + ring_devs = list(self.policy.object_ring.devs) + for device, parts in extra_devices.items(): + device_path = os.path.join(self.devices, device) + os.mkdir(device_path) + for part in range(parts): + os.makedirs(os.path.join(device_path, 'objects-1', str(part))) + # we update the ring to make is_local happy + devs = [dict(d) for d in ring_devs] + for d in devs: + d['device'] = device + self.policy.object_ring.devs.extend(devs) + self.reconstructor.stats_interval = 0 + self.process_job = lambda j: sleep(0) + with mocked_http_conn(*[200] * http_count, body=pickle.dumps({})): with mock_ssync_sender(): - self.reconstructor.run_once() + self.reconstructor.run_once(devices=override_devices) + + def test_run_once(self): + # sda1: 3 is done in setup + extra_devices = { + 'sdb1': 4, + 'sdc1': 1, + 'sdd1': 0, + } + self._run_once(18, extra_devices) + stats_lines = set() + for line in self.logger.get_lines_for_level('info'): + if 'devices reconstructed in' not in line: + continue + stat_line = line.split('of', 1)[0].strip() + stats_lines.add(stat_line) + acceptable = set([ + '0/3 (0.00%) partitions', + '8/8 (100.00%) partitions', + ]) + matched = stats_lines & acceptable + self.assertEqual(matched, acceptable, + 'missing some expected acceptable:\n%s' % ( + '\n'.join(sorted(acceptable - matched)))) + self.assertEqual(self.reconstructor.reconstruction_device_count, 4) + self.assertEqual(self.reconstructor.reconstruction_part_count, 8) + self.assertEqual(self.reconstructor.part_count, 8) + + def test_run_once_override_devices(self): + # sda1: 3 is done in setup + extra_devices = { + 'sdb1': 4, + 'sdc1': 1, + 'sdd1': 0, + } + self._run_once(2, extra_devices, 'sdc1') + stats_lines = set() + for line in self.logger.get_lines_for_level('info'): + if 'devices reconstructed in' not in line: + continue + stat_line = line.split('of', 1)[0].strip() + stats_lines.add(stat_line) + acceptable = set([ + '1/1 (100.00%) partitions', + ]) + matched = stats_lines & acceptable + self.assertEqual(matched, acceptable, + 'missing some expected acceptable:\n%s' % ( + '\n'.join(sorted(acceptable - matched)))) + self.assertEqual(self.reconstructor.reconstruction_device_count, 1) + self.assertEqual(self.reconstructor.reconstruction_part_count, 1) + self.assertEqual(self.reconstructor.part_count, 1) def test_get_response(self): part = self.part_nums[0] @@ -621,6 +685,7 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase): def test_reconstructor_skips_bogus_partition_dirs(self): # A directory in the wrong place shouldn't crash the reconstructor + self.reconstructor._reset_stats() rmtree(self.objects_1) os.mkdir(self.objects_1) @@ -699,6 +764,7 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase): self.assertEqual(expected_partners, sorted(got_partners)) def test_collect_parts(self): + self.reconstructor._reset_stats() parts = [] for part_info in self.reconstructor.collect_parts(): parts.append(part_info['partition']) @@ -709,6 +775,7 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase): def blowup_mkdirs(path): raise OSError('Ow!') + self.reconstructor._reset_stats() with mock.patch.object(object_reconstructor, 'mkdirs', blowup_mkdirs): rmtree(self.objects_1, ignore_errors=1) parts = [] @@ -717,7 +784,7 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase): error_lines = self.logger.get_lines_for_level('error') self.assertEqual(len(error_lines), 1) log_args, log_kwargs = self.logger.log_dict['error'][0] - self.assertEquals(str(log_kwargs['exc_info'][1]), 'Ow!') + self.assertEqual(str(log_kwargs['exc_info'][1]), 'Ow!') def test_removes_zbf(self): # After running xfs_repair, a partition directory could become a @@ -734,6 +801,7 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase): # since our collect_parts job is a generator, that yields directly # into build_jobs and then spawns it's safe to do the remove_files # without making reconstructor startup slow + self.reconstructor._reset_stats() for part_info in self.reconstructor.collect_parts(): self.assertNotEqual(pol_1_part_1_path, part_info['part_path']) self.assertFalse(os.path.exists(pol_1_part_1_path)) @@ -1033,6 +1101,7 @@ class TestObjectReconstructor(unittest.TestCase): self.reconstructor.job_count = 1 def tearDown(self): + self.reconstructor._reset_stats() self.reconstructor.stats_line() shutil.rmtree(self.testdir) diff --git a/test/unit/obj/test_replicator.py b/test/unit/obj/test_replicator.py index 08eb88b9aa..018f470c3b 100644 --- a/test/unit/obj/test_replicator.py +++ b/test/unit/obj/test_replicator.py @@ -18,19 +18,21 @@ import os import mock from gzip import GzipFile from shutil import rmtree -import cPickle as pickle +import six.moves.cPickle as pickle import time import tempfile from contextlib import contextmanager, closing +from collections import defaultdict from errno import ENOENT, ENOTEMPTY, ENOTDIR from eventlet.green import subprocess from eventlet import Timeout, tpool -from test.unit import debug_logger, patch_policies +from test.unit import (debug_logger, patch_policies, make_timestamp_iter, + mocked_http_conn) from swift.common import utils -from swift.common.utils import hash_path, mkdirs, normalize_timestamp, \ - storage_directory +from swift.common.utils import (hash_path, mkdirs, normalize_timestamp, + storage_directory) from swift.common import ring from swift.obj import diskfile, replicator as object_replicator from swift.common.storage_policy import StoragePolicy, POLICIES @@ -76,6 +78,7 @@ class MockProcess(object): ret_code = None ret_log = None check_args = None + captured_log = None class Stream(object): @@ -99,20 +102,32 @@ class MockProcess(object): if targ not in args[0]: process_errors.append("Invalid: %s not in %s" % (targ, args)) + self.captured_info = { + 'rsync_args': args[0], + } self.stdout = self.Stream() def wait(self): - return next(self.ret_code) + # the _mock_process context manager assures this class attribute is a + # mutable list and takes care of resetting it + rv = next(self.ret_code) + if self.captured_log is not None: + self.captured_info['ret_code'] = rv + self.captured_log.append(self.captured_info) + return rv @contextmanager def _mock_process(ret): + captured_log = [] + MockProcess.captured_log = captured_log orig_process = subprocess.Popen MockProcess.ret_code = (i[0] for i in ret) MockProcess.ret_log = (i[1] for i in ret) MockProcess.check_args = (i[2] for i in ret) object_replicator.subprocess.Popen = MockProcess - yield + yield captured_log + MockProcess.captured_log = None object_replicator.subprocess.Popen = orig_process @@ -180,10 +195,40 @@ class TestObjectReplicator(unittest.TestCase): swift_dir=self.testdir, devices=self.devices, mount_check='false', timeout='300', stats_interval='1', sync_method='rsync') self._create_replicator() + self.ts = make_timestamp_iter() def tearDown(self): + self.assertFalse(process_errors) rmtree(self.testdir, ignore_errors=1) + def test_handoff_replication_setting_warnings(self): + conf_tests = [ + # (config, expected_warning) + ({}, False), + ({'handoff_delete': 'auto'}, False), + ({'handoffs_first': 'no'}, False), + ({'handoff_delete': '2'}, True), + ({'handoffs_first': 'yes'}, True), + ({'handoff_delete': '1', 'handoffs_first': 'yes'}, True), + ] + log_message = 'Handoff only mode is not intended for normal ' \ + 'operation, please disable handoffs_first and ' \ + 'handoff_delete before the next normal rebalance' + for config, expected_warning in conf_tests: + self.logger.clear() + object_replicator.ObjectReplicator(config, logger=self.logger) + warning_log_lines = self.logger.get_lines_for_level('warning') + if expected_warning: + expected_log_lines = [log_message] + else: + expected_log_lines = [] + self.assertEqual(expected_log_lines, warning_log_lines, + 'expected %s != %s for config %r' % ( + expected_log_lines, + warning_log_lines, + config, + )) + def _write_disk_data(self, disk_name): os.mkdir(os.path.join(self.devices, disk_name)) objects = os.path.join(self.devices, disk_name, @@ -205,6 +250,8 @@ class TestObjectReplicator(unittest.TestCase): def _create_replicator(self): self.replicator = object_replicator.ObjectReplicator(self.conf) self.replicator.logger = self.logger + self.replicator._zero_stats() + self.replicator.all_devs_info = set() self.df_mgr = diskfile.DiskFileManager(self.conf, self.logger) def test_run_once(self): @@ -314,36 +361,36 @@ class TestObjectReplicator(unittest.TestCase): jobs_by_pol_part = {} for job in jobs: jobs_by_pol_part[str(int(job['policy'])) + job['partition']] = job - self.assertEquals(len(jobs_to_delete), 2) + self.assertEqual(len(jobs_to_delete), 2) self.assertTrue('1', jobs_to_delete[0]['partition']) - self.assertEquals( + self.assertEqual( [node['id'] for node in jobs_by_pol_part['00']['nodes']], [1, 2]) - self.assertEquals( + self.assertEqual( [node['id'] for node in jobs_by_pol_part['01']['nodes']], [1, 2, 3]) - self.assertEquals( + self.assertEqual( [node['id'] for node in jobs_by_pol_part['02']['nodes']], [2, 3]) - self.assertEquals( + self.assertEqual( [node['id'] for node in jobs_by_pol_part['03']['nodes']], [3, 1]) - self.assertEquals( + self.assertEqual( [node['id'] for node in jobs_by_pol_part['10']['nodes']], [1, 2]) - self.assertEquals( + self.assertEqual( [node['id'] for node in jobs_by_pol_part['11']['nodes']], [1, 2, 3]) - self.assertEquals( + self.assertEqual( [node['id'] for node in jobs_by_pol_part['12']['nodes']], [2, 3]) - self.assertEquals( + self.assertEqual( [node['id'] for node in jobs_by_pol_part['13']['nodes']], [3, 1]) for part in ['00', '01', '02', '03']: for node in jobs_by_pol_part[part]['nodes']: - self.assertEquals(node['device'], 'sda') - self.assertEquals(jobs_by_pol_part[part]['path'], - os.path.join(self.objects, part[1:])) + self.assertEqual(node['device'], 'sda') + self.assertEqual(jobs_by_pol_part[part]['path'], + os.path.join(self.objects, part[1:])) for part in ['10', '11', '12', '13']: for node in jobs_by_pol_part[part]['nodes']: - self.assertEquals(node['device'], 'sda') - self.assertEquals(jobs_by_pol_part[part]['path'], - os.path.join(self.objects_1, part[1:])) + self.assertEqual(node['device'], 'sda') + self.assertEqual(jobs_by_pol_part[part]['path'], + os.path.join(self.objects_1, part[1:])) @mock.patch('swift.obj.replicator.random.shuffle', side_effect=lambda l: l) def test_collect_jobs_multi_disk(self, mock_shuffle): @@ -373,7 +420,7 @@ class TestObjectReplicator(unittest.TestCase): self.assertEqual([mock.call(jobs)], mock_shuffle.mock_calls) jobs_to_delete = [j for j in jobs if j['delete']] - self.assertEquals(len(jobs_to_delete), 4) + self.assertEqual(len(jobs_to_delete), 4) self.assertEqual([ '1', '2', # policy 0; 1 not on sda, 2 not on sdb '1', '2', # policy 1; 1 not on sda, 2 not on sdb @@ -387,64 +434,64 @@ class TestObjectReplicator(unittest.TestCase): str(int(job['policy'])) + job['partition'] + job['device'] ] = job - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['00sda']['nodes']], - [1, 2]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['00sdb']['nodes']], - [0, 2]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['01sda']['nodes']], - [1, 2, 3]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['01sdb']['nodes']], - [2, 3]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['02sda']['nodes']], - [2, 3]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['02sdb']['nodes']], - [2, 3, 0]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['03sda']['nodes']], - [3, 1]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['03sdb']['nodes']], - [3, 0]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['10sda']['nodes']], - [1, 2]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['10sdb']['nodes']], - [0, 2]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['11sda']['nodes']], - [1, 2, 3]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['11sdb']['nodes']], - [2, 3]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['12sda']['nodes']], - [2, 3]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['12sdb']['nodes']], - [2, 3, 0]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['13sda']['nodes']], - [3, 1]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['13sdb']['nodes']], - [3, 0]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['00sda']['nodes']], + [1, 2]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['00sdb']['nodes']], + [0, 2]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['01sda']['nodes']], + [1, 2, 3]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['01sdb']['nodes']], + [2, 3]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['02sda']['nodes']], + [2, 3]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['02sdb']['nodes']], + [2, 3, 0]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['03sda']['nodes']], + [3, 1]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['03sdb']['nodes']], + [3, 0]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['10sda']['nodes']], + [1, 2]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['10sdb']['nodes']], + [0, 2]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['11sda']['nodes']], + [1, 2, 3]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['11sdb']['nodes']], + [2, 3]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['12sda']['nodes']], + [2, 3]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['12sdb']['nodes']], + [2, 3, 0]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['13sda']['nodes']], + [3, 1]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['13sdb']['nodes']], + [3, 0]) for part in ['00', '01', '02', '03']: - self.assertEquals(jobs_by_pol_part_dev[part + 'sda']['path'], - os.path.join(self.objects, part[1:])) - self.assertEquals(jobs_by_pol_part_dev[part + 'sdb']['path'], - os.path.join(objects_sdb, part[1:])) + self.assertEqual(jobs_by_pol_part_dev[part + 'sda']['path'], + os.path.join(self.objects, part[1:])) + self.assertEqual(jobs_by_pol_part_dev[part + 'sdb']['path'], + os.path.join(objects_sdb, part[1:])) for part in ['10', '11', '12', '13']: - self.assertEquals(jobs_by_pol_part_dev[part + 'sda']['path'], - os.path.join(self.objects_1, part[1:])) - self.assertEquals(jobs_by_pol_part_dev[part + 'sdb']['path'], - os.path.join(objects_1_sdb, part[1:])) + self.assertEqual(jobs_by_pol_part_dev[part + 'sda']['path'], + os.path.join(self.objects_1, part[1:])) + self.assertEqual(jobs_by_pol_part_dev[part + 'sdb']['path'], + os.path.join(objects_1_sdb, part[1:])) @mock.patch('swift.obj.replicator.random.shuffle', side_effect=lambda l: l) def test_collect_jobs_multi_disk_diff_ports_normal(self, mock_shuffle): @@ -480,7 +527,7 @@ class TestObjectReplicator(unittest.TestCase): self.assertEqual([mock.call(jobs)], mock_shuffle.mock_calls) jobs_to_delete = [j for j in jobs if j['delete']] - self.assertEquals(len(jobs_to_delete), 2) + self.assertEqual(len(jobs_to_delete), 2) self.assertEqual([ '3', # policy 0; 3 not on sdc '3', # policy 1; 3 not on sdc @@ -494,36 +541,36 @@ class TestObjectReplicator(unittest.TestCase): str(int(job['policy'])) + job['partition'] + job['device'] ] = job - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['00sdc']['nodes']], - [0, 1]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['01sdc']['nodes']], - [1, 3]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['02sdc']['nodes']], - [3, 0]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['03sdc']['nodes']], - [3, 0, 1]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['10sdc']['nodes']], - [0, 1]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['11sdc']['nodes']], - [1, 3]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['12sdc']['nodes']], - [3, 0]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['13sdc']['nodes']], - [3, 0, 1]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['00sdc']['nodes']], + [0, 1]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['01sdc']['nodes']], + [1, 3]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['02sdc']['nodes']], + [3, 0]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['03sdc']['nodes']], + [3, 0, 1]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['10sdc']['nodes']], + [0, 1]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['11sdc']['nodes']], + [1, 3]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['12sdc']['nodes']], + [3, 0]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['13sdc']['nodes']], + [3, 0, 1]) for part in ['00', '01', '02', '03']: - self.assertEquals(jobs_by_pol_part_dev[part + 'sdc']['path'], - os.path.join(objects_sdc, part[1:])) + self.assertEqual(jobs_by_pol_part_dev[part + 'sdc']['path'], + os.path.join(objects_sdc, part[1:])) for part in ['10', '11', '12', '13']: - self.assertEquals(jobs_by_pol_part_dev[part + 'sdc']['path'], - os.path.join(objects_1_sdc, part[1:])) + self.assertEqual(jobs_by_pol_part_dev[part + 'sdc']['path'], + os.path.join(objects_1_sdc, part[1:])) @mock.patch('swift.obj.replicator.random.shuffle', side_effect=lambda l: l) def test_collect_jobs_multi_disk_servers_per_port(self, mock_shuffle): @@ -561,7 +608,7 @@ class TestObjectReplicator(unittest.TestCase): self.assertEqual([mock.call(jobs)], mock_shuffle.mock_calls) jobs_to_delete = [j for j in jobs if j['delete']] - self.assertEquals(len(jobs_to_delete), 4) + self.assertEqual(len(jobs_to_delete), 4) self.assertEqual([ '3', '0', # policy 0; 3 not on sdc, 0 not on sdd '3', '0', # policy 1; 3 not on sdc, 0 not on sdd @@ -575,70 +622,153 @@ class TestObjectReplicator(unittest.TestCase): str(int(job['policy'])) + job['partition'] + job['device'] ] = job - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['00sdc']['nodes']], - [0, 1]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['00sdd']['nodes']], - [0, 1, 2]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['01sdc']['nodes']], - [1, 3]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['01sdd']['nodes']], - [1, 2]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['02sdc']['nodes']], - [3, 0]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['02sdd']['nodes']], - [2, 0]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['03sdc']['nodes']], - [3, 0, 1]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['03sdd']['nodes']], - [0, 1]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['10sdc']['nodes']], - [0, 1]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['10sdd']['nodes']], - [0, 1, 2]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['11sdc']['nodes']], - [1, 3]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['11sdd']['nodes']], - [1, 2]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['12sdc']['nodes']], - [3, 0]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['12sdd']['nodes']], - [2, 0]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['13sdc']['nodes']], - [3, 0, 1]) - self.assertEquals([node['id'] - for node in jobs_by_pol_part_dev['13sdd']['nodes']], - [0, 1]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['00sdc']['nodes']], + [0, 1]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['00sdd']['nodes']], + [0, 1, 2]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['01sdc']['nodes']], + [1, 3]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['01sdd']['nodes']], + [1, 2]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['02sdc']['nodes']], + [3, 0]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['02sdd']['nodes']], + [2, 0]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['03sdc']['nodes']], + [3, 0, 1]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['03sdd']['nodes']], + [0, 1]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['10sdc']['nodes']], + [0, 1]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['10sdd']['nodes']], + [0, 1, 2]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['11sdc']['nodes']], + [1, 3]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['11sdd']['nodes']], + [1, 2]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['12sdc']['nodes']], + [3, 0]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['12sdd']['nodes']], + [2, 0]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['13sdc']['nodes']], + [3, 0, 1]) + self.assertEqual([node['id'] + for node in jobs_by_pol_part_dev['13sdd']['nodes']], + [0, 1]) for part in ['00', '01', '02', '03']: - self.assertEquals(jobs_by_pol_part_dev[part + 'sdc']['path'], - os.path.join(objects_sdc, part[1:])) - self.assertEquals(jobs_by_pol_part_dev[part + 'sdd']['path'], - os.path.join(objects_sdd, part[1:])) + self.assertEqual(jobs_by_pol_part_dev[part + 'sdc']['path'], + os.path.join(objects_sdc, part[1:])) + self.assertEqual(jobs_by_pol_part_dev[part + 'sdd']['path'], + os.path.join(objects_sdd, part[1:])) for part in ['10', '11', '12', '13']: - self.assertEquals(jobs_by_pol_part_dev[part + 'sdc']['path'], - os.path.join(objects_1_sdc, part[1:])) - self.assertEquals(jobs_by_pol_part_dev[part + 'sdd']['path'], - os.path.join(objects_1_sdd, part[1:])) + self.assertEqual(jobs_by_pol_part_dev[part + 'sdc']['path'], + os.path.join(objects_1_sdc, part[1:])) + self.assertEqual(jobs_by_pol_part_dev[part + 'sdd']['path'], + os.path.join(objects_1_sdd, part[1:])) def test_collect_jobs_handoffs_first(self): self.replicator.handoffs_first = True jobs = self.replicator.collect_jobs() self.assertTrue(jobs[0]['delete']) - self.assertEquals('1', jobs[0]['partition']) + self.assertEqual('1', jobs[0]['partition']) + + def test_handoffs_first_mode_will_process_all_jobs_after_handoffs(self): + # make a object in the handoff & primary partition + expected_suffix_paths = [] + for policy in POLICIES: + # primary + ts = next(self.ts) + df = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o', policy) + with df.create() as w: + w.write('asdf') + w.put({'X-Timestamp': ts.internal}) + w.commit(ts) + expected_suffix_paths.append(os.path.dirname(df._datadir)) + # handoff + ts = next(self.ts) + df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o', policy) + with df.create() as w: + w.write('asdf') + w.put({'X-Timestamp': ts.internal}) + w.commit(ts) + expected_suffix_paths.append(os.path.dirname(df._datadir)) + + # rsync will be called for all parts we created objects in + process_arg_checker = [ + # (return_code, stdout, ) + (0, '', []), + (0, '', []), + (0, '', []), # handoff job "first" policy + (0, '', []), + (0, '', []), + (0, '', []), # handoff job "second" policy + (0, '', []), + (0, '', []), # update job "first" policy + (0, '', []), + (0, '', []), # update job "second" policy + ] + # each handoff partition node gets one replicate request for after + # rsync (2 * 3), each primary partition with objects gets two + # replicate requests (pre-flight and post sync) to each of each + # partners (2 * 2 * 2), the 2 remaining empty parts (2 & 3) get a + # pre-flight replicate request per node for each storage policy + # (2 * 2 * 2) - so 6 + 8 + 8 == 22 + replicate_responses = [200] * 22 + stub_body = pickle.dumps({}) + with _mock_process(process_arg_checker) as rsync_log, \ + mock.patch('swift.obj.replicator.whataremyips', + side_effect=_ips), \ + mocked_http_conn(*replicate_responses, + body=stub_body) as conn_log: + self.replicator.handoffs_first = True + self.replicator.replicate() + # all jobs processed! + self.assertEqual(self.replicator.job_count, + self.replicator.replication_count) + + # sanity, all the handoffs suffixes we filled in were rsync'd + found_rsync_suffix_paths = set() + for subprocess_info in rsync_log: + local_path, remote_path = subprocess_info['rsync_args'][-2:] + found_rsync_suffix_paths.add(local_path) + self.assertEqual(set(expected_suffix_paths), found_rsync_suffix_paths) + # sanity, all nodes got replicated + found_replicate_calls = defaultdict(int) + for req in conn_log.requests: + self.assertEqual(req['method'], 'REPLICATE') + found_replicate_key = ( + int(req['headers']['X-Backend-Storage-Policy-Index']), + req['path']) + found_replicate_calls[found_replicate_key] += 1 + expected_replicate_calls = { + (0, '/sda/1/a83'): 3, + (1, '/sda/1/a83'): 3, + (0, '/sda/0'): 2, + (0, '/sda/0/a83'): 2, + (1, '/sda/0'): 2, + (1, '/sda/0/a83'): 2, + (0, '/sda/2'): 2, + (1, '/sda/2'): 2, + (0, '/sda/3'): 2, + (1, '/sda/3'): 2, + } + self.assertEquals(dict(found_replicate_calls), + expected_replicate_calls) def test_replicator_skips_bogus_partition_dirs(self): # A directory in the wrong place shouldn't crash the replicator @@ -771,6 +901,7 @@ class TestObjectReplicator(unittest.TestCase): self.conf['sync_method'] = 'ssync' self.replicator = object_replicator.ObjectReplicator(self.conf) self.replicator.logger = debug_logger() + self.replicator._zero_stats() with mock.patch('swift.obj.replicator.http_connect', mock_http_connect(200)): @@ -1269,7 +1400,7 @@ class TestObjectReplicator(unittest.TestCase): self.assertFalse(process_errors) for i, result in [('0', True), ('1', False), ('2', True), ('3', True)]: - self.assertEquals(os.access( + self.assertEqual(os.access( os.path.join(self.objects, i, diskfile.HASH_FILE), os.F_OK), result) @@ -1282,7 +1413,7 @@ class TestObjectReplicator(unittest.TestCase): mount_check='false', timeout='300', stats_interval='1') replicator = object_replicator.ObjectReplicator(conf) was_connector = object_replicator.http_connect - was_get_hashes = object_replicator.get_hashes + was_get_hashes = object_replicator.DiskFileManager._get_hashes was_execute = tpool.execute self.get_hash_count = 0 try: @@ -1300,7 +1431,7 @@ class TestObjectReplicator(unittest.TestCase): self.i_failed = False object_replicator.http_connect = mock_http_connect(200) - object_replicator.get_hashes = fake_get_hashes + object_replicator.DiskFileManager._get_hashes = fake_get_hashes replicator.logger.exception = \ lambda *args, **kwargs: fake_exc(self, *args, **kwargs) # Write some files into '1' and run replicate- they should be moved @@ -1337,7 +1468,7 @@ class TestObjectReplicator(unittest.TestCase): self.assertFalse(self.i_failed) finally: object_replicator.http_connect = was_connector - object_replicator.get_hashes = was_get_hashes + object_replicator.DiskFileManager._get_hashes = was_get_hashes tpool.execute = was_execute def test_run(self): @@ -1391,15 +1522,15 @@ class TestObjectReplicator(unittest.TestCase): self.replicator.update(job) self.assertTrue(error in mock_logger.error.call_args[0][0]) self.assertTrue(expect in mock_logger.exception.call_args[0][0]) - self.assertEquals(len(self.replicator.partition_times), 1) - self.assertEquals(mock_http.call_count, len(ring._devs) - 1) + self.assertEqual(len(self.replicator.partition_times), 1) + self.assertEqual(mock_http.call_count, len(ring._devs) - 1) reqs = [] for node in job['nodes']: reqs.append(mock.call(node['ip'], node['port'], node['device'], job['partition'], 'REPLICATE', '', headers=self.headers)) if job['partition'] == '0': - self.assertEquals(self.replicator.suffix_hash, 0) + self.assertEqual(self.replicator.suffix_hash, 0) mock_http.assert_has_calls(reqs, any_order=True) mock_http.reset_mock() mock_logger.reset_mock() @@ -1411,7 +1542,7 @@ class TestObjectReplicator(unittest.TestCase): set_default(self) self.replicator.update(job) self.assertTrue(error in mock_logger.error.call_args[0][0]) - self.assertEquals(len(self.replicator.partition_times), 1) + self.assertEqual(len(self.replicator.partition_times), 1) mock_logger.reset_mock() # Check successful http_connection and exception with @@ -1422,7 +1553,7 @@ class TestObjectReplicator(unittest.TestCase): set_default(self) self.replicator.update(job) self.assertTrue(expect in mock_logger.exception.call_args[0][0]) - self.assertEquals(len(self.replicator.partition_times), 1) + self.assertEqual(len(self.replicator.partition_times), 1) mock_logger.reset_mock() # Check successful http_connection and correct @@ -1437,12 +1568,12 @@ class TestObjectReplicator(unittest.TestCase): local_job = job.copy() continue self.replicator.update(job) - self.assertEquals(mock_logger.exception.call_count, 0) - self.assertEquals(mock_logger.error.call_count, 0) - self.assertEquals(len(self.replicator.partition_times), 1) - self.assertEquals(self.replicator.suffix_hash, 0) - self.assertEquals(self.replicator.suffix_sync, 0) - self.assertEquals(self.replicator.suffix_count, 0) + self.assertEqual(mock_logger.exception.call_count, 0) + self.assertEqual(mock_logger.error.call_count, 0) + self.assertEqual(len(self.replicator.partition_times), 1) + self.assertEqual(self.replicator.suffix_hash, 0) + self.assertEqual(self.replicator.suffix_sync, 0) + self.assertEqual(self.replicator.suffix_count, 0) mock_logger.reset_mock() # Check successful http_connect and sync for local node @@ -1458,11 +1589,11 @@ class TestObjectReplicator(unittest.TestCase): for node in local_job['nodes']: reqs.append(mock.call(node, local_job, ['a83'])) fake_func.assert_has_calls(reqs, any_order=True) - self.assertEquals(fake_func.call_count, 2) - self.assertEquals(self.replicator.replication_count, 1) - self.assertEquals(self.replicator.suffix_sync, 2) - self.assertEquals(self.replicator.suffix_hash, 1) - self.assertEquals(self.replicator.suffix_count, 1) + self.assertEqual(fake_func.call_count, 2) + self.assertEqual(self.replicator.replication_count, 1) + self.assertEqual(self.replicator.suffix_sync, 2) + self.assertEqual(self.replicator.suffix_hash, 1) + self.assertEqual(self.replicator.suffix_count, 1) # Efficient Replication Case set_default(self) @@ -1477,11 +1608,11 @@ class TestObjectReplicator(unittest.TestCase): # The candidate nodes to replicate (i.e. dev1 and dev3) # belong to another region self.replicator.update(job) - self.assertEquals(fake_func.call_count, 1) - self.assertEquals(self.replicator.replication_count, 1) - self.assertEquals(self.replicator.suffix_sync, 1) - self.assertEquals(self.replicator.suffix_hash, 1) - self.assertEquals(self.replicator.suffix_count, 1) + self.assertEqual(fake_func.call_count, 1) + self.assertEqual(self.replicator.replication_count, 1) + self.assertEqual(self.replicator.suffix_sync, 1) + self.assertEqual(self.replicator.suffix_hash, 1) + self.assertEqual(self.replicator.suffix_count, 1) mock_http.reset_mock() mock_logger.reset_mock() diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py index bff913cb57..937d9f4106 100755 --- a/test/unit/obj/test_server.py +++ b/test/unit/obj/test_server.py @@ -1,4 +1,4 @@ -#-*- coding:utf-8 -*- +# coding: utf-8 # Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,25 +16,27 @@ """Tests for swift.obj.server""" -import cPickle as pickle +import six.moves.cPickle as pickle import datetime import json import errno import operator import os import mock +import six +from six import StringIO import unittest import math import random from shutil import rmtree -from StringIO import StringIO from time import gmtime, strftime, time, struct_time from tempfile import mkdtemp from hashlib import md5 import itertools import tempfile +from contextlib import contextmanager -from eventlet import sleep, spawn, wsgi, listen, Timeout, tpool +from eventlet import sleep, spawn, wsgi, listen, Timeout, tpool, greenthread from eventlet.green import httplib from nose import SkipTest @@ -49,7 +51,7 @@ from swift.common import utils, bufferedhttp from swift.common.utils import hash_path, mkdirs, normalize_timestamp, \ NullLogger, storage_directory, public, replication from swift.common import constraints -from swift.common.swob import Request, HeaderKeyDict, WsgiStringIO +from swift.common.swob import Request, HeaderKeyDict, WsgiBytesIO from swift.common.splice import splice from swift.common.storage_policy import (StoragePolicy, ECStoragePolicy, POLICIES, EC_POLICY) @@ -67,6 +69,30 @@ test_policies = [ ] +@contextmanager +def fake_spawn(): + """ + Spawn and capture the result so we can later wait on it. This means we can + test code executing in a greenthread but still wait() on the result to + ensure that the method has completed. + """ + + greenlets = [] + + def _inner_fake_spawn(func, *a, **kw): + gt = greenthread.spawn(func, *a, **kw) + greenlets.append(gt) + return gt + + object_server.spawn = _inner_fake_spawn + with mock.patch('swift.obj.server.spawn', _inner_fake_spawn): + try: + yield + finally: + for gt in greenlets: + gt.wait() + + @patch_policies(test_policies) class TestObjectController(unittest.TestCase): """Test swift.obj.server.ObjectController""" @@ -79,7 +105,8 @@ class TestObjectController(unittest.TestCase): self.testdir = os.path.join(self.tmpdir, 'tmp_test_object_server_ObjectController') mkdirs(os.path.join(self.testdir, 'sda1')) - self.conf = {'devices': self.testdir, 'mount_check': 'false'} + self.conf = {'devices': self.testdir, 'mount_check': 'false', + 'container_update_timeout': 0.0} self.object_controller = object_server.ObjectController( self.conf, logger=debug_logger()) self.object_controller.bytes_per_sync = 1 @@ -159,7 +186,7 @@ class TestObjectController(unittest.TestCase): 'X-Object-Meta-Two': 'Two'}) req.body = 'VERIFY' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) timestamp = normalize_timestamp(time()) req = Request.blank('/sda1/p/a/c/o', @@ -172,32 +199,32 @@ class TestObjectController(unittest.TestCase): 'Bar': 'barheader', 'Content-Type': 'application/x-test'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 202) + self.assertEqual(resp.status_int, 202) req = Request.blank('/sda1/p/a/c/o') resp = req.get_response(self.object_controller) - self.assert_("X-Object-Meta-1" not in resp.headers and - "X-Object-Meta-Two" not in resp.headers and - "X-Object-Meta-3" in resp.headers and - "X-Object-Meta-4" in resp.headers and - "Foo" in resp.headers and - "Bar" in resp.headers and - "Baz" not in resp.headers and - "Content-Encoding" in resp.headers) - self.assertEquals(resp.headers['Content-Type'], 'application/x-test') + self.assertTrue("X-Object-Meta-1" not in resp.headers and + "X-Object-Meta-Two" not in resp.headers and + "X-Object-Meta-3" in resp.headers and + "X-Object-Meta-4" in resp.headers and + "Foo" in resp.headers and + "Bar" in resp.headers and + "Baz" not in resp.headers and + "Content-Encoding" in resp.headers) + self.assertEqual(resp.headers['Content-Type'], 'application/x-test') req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.object_controller) - self.assert_("X-Object-Meta-1" not in resp.headers and - "X-Object-Meta-Two" not in resp.headers and - "X-Object-Meta-3" in resp.headers and - "X-Object-Meta-4" in resp.headers and - "Foo" in resp.headers and - "Bar" in resp.headers and - "Baz" not in resp.headers and - "Content-Encoding" in resp.headers) - self.assertEquals(resp.headers['Content-Type'], 'application/x-test') + self.assertTrue("X-Object-Meta-1" not in resp.headers and + "X-Object-Meta-Two" not in resp.headers and + "X-Object-Meta-3" in resp.headers and + "X-Object-Meta-4" in resp.headers and + "Foo" in resp.headers and + "Bar" in resp.headers and + "Baz" not in resp.headers and + "Content-Encoding" in resp.headers) + self.assertEqual(resp.headers['Content-Type'], 'application/x-test') timestamp = normalize_timestamp(time()) req = Request.blank('/sda1/p/a/c/o', @@ -205,15 +232,15 @@ class TestObjectController(unittest.TestCase): headers={'X-Timestamp': timestamp, 'Content-Type': 'application/x-test'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 202) + self.assertEqual(resp.status_int, 202) req = Request.blank('/sda1/p/a/c/o') resp = req.get_response(self.object_controller) - self.assert_("X-Object-Meta-3" not in resp.headers and - "X-Object-Meta-4" not in resp.headers and - "Foo" not in resp.headers and - "Bar" not in resp.headers and - "Content-Encoding" not in resp.headers) - self.assertEquals(resp.headers['Content-Type'], 'application/x-test') + self.assertTrue("X-Object-Meta-3" not in resp.headers and + "X-Object-Meta-4" not in resp.headers and + "Foo" not in resp.headers and + "Bar" not in resp.headers and + "Content-Encoding" not in resp.headers) + self.assertEqual(resp.headers['Content-Type'], 'application/x-test') # test defaults self.object_controller.allowed_headers = original_headers @@ -226,18 +253,20 @@ class TestObjectController(unittest.TestCase): 'X-Object-Manifest': 'c/bar', 'Content-Encoding': 'gzip', 'Content-Disposition': 'bar', + 'X-Static-Large-Object': 'True', }) req.body = 'VERIFY' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank('/sda1/p/a/c/o') resp = req.get_response(self.object_controller) - self.assert_("X-Object-Meta-1" in resp.headers and - "Foo" not in resp.headers and - "Content-Encoding" in resp.headers and - "X-Object-Manifest" in resp.headers and - "Content-Disposition" in resp.headers) - self.assertEquals(resp.headers['Content-Type'], 'application/x-test') + self.assertTrue("X-Object-Meta-1" in resp.headers and + "Foo" not in resp.headers and + "Content-Encoding" in resp.headers and + "X-Object-Manifest" in resp.headers and + "Content-Disposition" in resp.headers and + "X-Static-Large-Object" in resp.headers) + self.assertEqual(resp.headers['Content-Type'], 'application/x-test') timestamp = normalize_timestamp(time()) req = Request.blank('/sda1/p/a/c/o', @@ -247,16 +276,17 @@ class TestObjectController(unittest.TestCase): 'Foo': 'fooheader', 'Content-Type': 'application/x-test'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 202) + self.assertEqual(resp.status_int, 202) req = Request.blank('/sda1/p/a/c/o') resp = req.get_response(self.object_controller) - self.assert_("X-Object-Meta-1" not in resp.headers and - "Foo" not in resp.headers and - "Content-Encoding" not in resp.headers and - "X-Object-Manifest" not in resp.headers and - "Content-Disposition" not in resp.headers and - "X-Object-Meta-3" in resp.headers) - self.assertEquals(resp.headers['Content-Type'], 'application/x-test') + self.assertTrue("X-Object-Meta-1" not in resp.headers and + "Foo" not in resp.headers and + "Content-Encoding" not in resp.headers and + "X-Object-Manifest" not in resp.headers and + "Content-Disposition" not in resp.headers and + "X-Object-Meta-3" in resp.headers and + "X-Static-Large-Object" in resp.headers) + self.assertEqual(resp.headers['Content-Type'], 'application/x-test') # Test for empty metadata timestamp = normalize_timestamp(time()) @@ -269,7 +299,7 @@ class TestObjectController(unittest.TestCase): self.assertEqual(resp.status_int, 202) req = Request.blank('/sda1/p/a/c/o') resp = req.get_response(self.object_controller) - self.assertEquals(resp.headers["x-object-meta-3"], '') + self.assertEqual(resp.headers["x-object-meta-3"], '') def test_POST_old_timestamp(self): ts = time() @@ -281,7 +311,7 @@ class TestObjectController(unittest.TestCase): 'X-Object-Meta-Two': 'Two'}) req.body = 'VERIFY' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) # Same timestamp should result in 409 req = Request.blank('/sda1/p/a/c/o', @@ -292,7 +322,7 @@ class TestObjectController(unittest.TestCase): 'Content-Encoding': 'gzip', 'Content-Type': 'application/x-test'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 409) + self.assertEqual(resp.status_int, 409) self.assertEqual(resp.headers['X-Backend-Timestamp'], orig_timestamp) # Earlier timestamp should result in 409 @@ -305,7 +335,7 @@ class TestObjectController(unittest.TestCase): 'Content-Encoding': 'gzip', 'Content-Type': 'application/x-test'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 409) + self.assertEqual(resp.status_int, 409) self.assertEqual(resp.headers['X-Backend-Timestamp'], orig_timestamp) def test_POST_not_exist(self): @@ -317,7 +347,7 @@ class TestObjectController(unittest.TestCase): 'X-Object-Meta-2': 'Two', 'Content-Type': 'text/plain'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) def test_POST_invalid_path(self): timestamp = normalize_timestamp(time()) @@ -327,7 +357,7 @@ class TestObjectController(unittest.TestCase): 'X-Object-Meta-2': 'Two', 'Content-Type': 'text/plain'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) def test_POST_no_timestamp(self): req = Request.blank('/sda1/p/a/c/o', @@ -371,55 +401,54 @@ class TestObjectController(unittest.TestCase): return lambda *args, **kwargs: FakeConn(response, with_exc) - old_http_connect = object_server.http_connect - try: - ts = time() - timestamp = normalize_timestamp(ts) - req = Request.blank( - '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, - headers={'X-Timestamp': timestamp, - 'Content-Type': 'text/plain', - 'Content-Length': '0'}) + ts = time() + timestamp = normalize_timestamp(ts) + req = Request.blank( + '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, + headers={'X-Timestamp': timestamp, + 'Content-Type': 'text/plain', + 'Content-Length': '0'}) + resp = req.get_response(self.object_controller) + self.assertEqual(resp.status_int, 201) + req = Request.blank( + '/sda1/p/a/c/o', + environ={'REQUEST_METHOD': 'POST'}, + headers={'X-Timestamp': normalize_timestamp(ts + 1), + 'X-Container-Host': '1.2.3.4:0', + 'X-Container-Partition': '3', + 'X-Container-Device': 'sda1', + 'X-Container-Timestamp': '1', + 'Content-Type': 'application/new1'}) + with mock.patch.object(object_server, 'http_connect', + mock_http_connect(202)): resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) - req = Request.blank( - '/sda1/p/a/c/o', - environ={'REQUEST_METHOD': 'POST'}, - headers={'X-Timestamp': normalize_timestamp(ts + 1), - 'X-Container-Host': '1.2.3.4:0', - 'X-Container-Partition': '3', - 'X-Container-Device': 'sda1', - 'X-Container-Timestamp': '1', - 'Content-Type': 'application/new1'}) - object_server.http_connect = mock_http_connect(202) + self.assertEqual(resp.status_int, 202) + req = Request.blank( + '/sda1/p/a/c/o', + environ={'REQUEST_METHOD': 'POST'}, + headers={'X-Timestamp': normalize_timestamp(ts + 2), + 'X-Container-Host': '1.2.3.4:0', + 'X-Container-Partition': '3', + 'X-Container-Device': 'sda1', + 'X-Container-Timestamp': '1', + 'Content-Type': 'application/new1'}) + with mock.patch.object(object_server, 'http_connect', + mock_http_connect(202, with_exc=True)): resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 202) - req = Request.blank( - '/sda1/p/a/c/o', - environ={'REQUEST_METHOD': 'POST'}, - headers={'X-Timestamp': normalize_timestamp(ts + 2), - 'X-Container-Host': '1.2.3.4:0', - 'X-Container-Partition': '3', - 'X-Container-Device': 'sda1', - 'X-Container-Timestamp': '1', - 'Content-Type': 'application/new1'}) - object_server.http_connect = mock_http_connect(202, with_exc=True) + self.assertEqual(resp.status_int, 202) + req = Request.blank( + '/sda1/p/a/c/o', + environ={'REQUEST_METHOD': 'POST'}, + headers={'X-Timestamp': normalize_timestamp(ts + 3), + 'X-Container-Host': '1.2.3.4:0', + 'X-Container-Partition': '3', + 'X-Container-Device': 'sda1', + 'X-Container-Timestamp': '1', + 'Content-Type': 'application/new2'}) + with mock.patch.object(object_server, 'http_connect', + mock_http_connect(500)): resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 202) - req = Request.blank( - '/sda1/p/a/c/o', - environ={'REQUEST_METHOD': 'POST'}, - headers={'X-Timestamp': normalize_timestamp(ts + 3), - 'X-Container-Host': '1.2.3.4:0', - 'X-Container-Partition': '3', - 'X-Container-Device': 'sda1', - 'X-Container-Timestamp': '1', - 'Content-Type': 'application/new2'}) - object_server.http_connect = mock_http_connect(500) - resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 202) - finally: - object_server.http_connect = old_http_connect + self.assertEqual(resp.status_int, 202) def test_POST_quarantine_zbyte(self): timestamp = normalize_timestamp(time()) @@ -428,7 +457,7 @@ class TestObjectController(unittest.TestCase): 'Content-Type': 'application/x-test'}) req.body = 'VERIFY' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) objfile = self.df_mgr.get_diskfile('sda1', 'p', 'a', 'c', 'o', policy=POLICIES.legacy) @@ -439,30 +468,30 @@ class TestObjectController(unittest.TestCase): os.unlink(objfile._data_file) with open(objfile._data_file, 'w') as fp: diskfile.write_metadata(fp, metadata) - self.assertEquals(os.listdir(objfile._datadir)[0], file_name) + self.assertEqual(os.listdir(objfile._datadir)[0], file_name) req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': normalize_timestamp(time())}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) quar_dir = os.path.join( self.testdir, 'sda1', 'quarantined', 'objects', os.path.basename(os.path.dirname(objfile._data_file))) - self.assertEquals(os.listdir(quar_dir)[0], file_name) + self.assertEqual(os.listdir(quar_dir)[0], file_name) def test_PUT_invalid_path(self): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) def test_PUT_no_timestamp(self): req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT', 'CONTENT_LENGTH': '0'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) def test_PUT_no_content_type(self): req = Request.blank( @@ -471,7 +500,7 @@ class TestObjectController(unittest.TestCase): 'Content-Length': '6'}) req.body = 'VERIFY' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) def test_PUT_invalid_content_type(self): req = Request.blank( @@ -481,8 +510,8 @@ class TestObjectController(unittest.TestCase): 'Content-Type': '\xff\xff'}) req.body = 'VERIFY' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 400) - self.assert_('Content-Type' in resp.body) + self.assertEqual(resp.status_int, 400) + self.assertTrue('Content-Type' in resp.body) def test_PUT_no_content_length(self): req = Request.blank( @@ -492,7 +521,7 @@ class TestObjectController(unittest.TestCase): req.body = 'VERIFY' del req.headers['Content-Length'] resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 411) + self.assertEqual(resp.status_int, 411) def test_PUT_zero_content_length(self): req = Request.blank( @@ -500,9 +529,9 @@ class TestObjectController(unittest.TestCase): headers={'X-Timestamp': normalize_timestamp(time()), 'Content-Type': 'application/octet-stream'}) req.body = '' - self.assertEquals(req.headers['Content-Length'], '0') + self.assertEqual(req.headers['Content-Length'], '0') resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) def test_PUT_bad_transfer_encoding(self): req = Request.blank( @@ -525,7 +554,7 @@ class TestObjectController(unittest.TestCase): 'If-None-Match': '*'}) req.body = 'VERIFY' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) # File should already exist so it should fail timestamp = normalize_timestamp(time()) req = Request.blank( @@ -536,7 +565,7 @@ class TestObjectController(unittest.TestCase): 'If-None-Match': '*'}) req.body = 'VERIFY' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 412) + self.assertEqual(resp.status_int, 412) def test_PUT_if_none_match(self): # PUT with if-none-match set and nothing there should succeed @@ -549,7 +578,7 @@ class TestObjectController(unittest.TestCase): 'If-None-Match': 'notthere'}) req.body = 'VERIFY' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) # PUT with if-none-match of the object etag should fail timestamp = normalize_timestamp(time()) req = Request.blank( @@ -560,7 +589,7 @@ class TestObjectController(unittest.TestCase): 'If-None-Match': '0b4c12d7e0a73840c1c4f148fda3b037'}) req.body = 'VERIFY' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 412) + self.assertEqual(resp.status_int, 412) def test_PUT_common(self): timestamp = normalize_timestamp(time()) @@ -579,22 +608,22 @@ class TestObjectController(unittest.TestCase): self.object_controller.allowed_headers = ['Custom-Header'] resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) objfile = os.path.join( self.testdir, 'sda1', storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p', hash_path('a', 'c', 'o')), utils.Timestamp(timestamp).internal + '.data') - self.assert_(os.path.isfile(objfile)) - self.assertEquals(open(objfile).read(), 'VERIFY') - self.assertEquals(diskfile.read_metadata(objfile), - {'X-Timestamp': utils.Timestamp(timestamp).internal, - 'Content-Length': '6', - 'ETag': '0b4c12d7e0a73840c1c4f148fda3b037', - 'Content-Type': 'application/octet-stream', - 'name': '/a/c/o', - 'X-Object-Meta-Test': 'one', - 'Custom-Header': '*'}) + self.assertTrue(os.path.isfile(objfile)) + self.assertEqual(open(objfile).read(), 'VERIFY') + self.assertEqual(diskfile.read_metadata(objfile), + {'X-Timestamp': utils.Timestamp(timestamp).internal, + 'Content-Length': '6', + 'ETag': '0b4c12d7e0a73840c1c4f148fda3b037', + 'Content-Type': 'application/octet-stream', + 'name': '/a/c/o', + 'X-Object-Meta-Test': 'one', + 'Custom-Header': '*'}) def test_PUT_overwrite(self): req = Request.blank( @@ -604,7 +633,7 @@ class TestObjectController(unittest.TestCase): 'Content-Type': 'application/octet-stream'}) req.body = 'VERIFY' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) sleep(.00001) timestamp = normalize_timestamp(time()) req = Request.blank( @@ -614,21 +643,21 @@ class TestObjectController(unittest.TestCase): 'Content-Encoding': 'gzip'}) req.body = 'VERIFY TWO' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) objfile = os.path.join( self.testdir, 'sda1', storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p', hash_path('a', 'c', 'o')), utils.Timestamp(timestamp).internal + '.data') - self.assert_(os.path.isfile(objfile)) - self.assertEquals(open(objfile).read(), 'VERIFY TWO') - self.assertEquals(diskfile.read_metadata(objfile), - {'X-Timestamp': utils.Timestamp(timestamp).internal, - 'Content-Length': '10', - 'ETag': 'b381a4c5dab1eaa1eb9711fa647cd039', - 'Content-Type': 'text/plain', - 'name': '/a/c/o', - 'Content-Encoding': 'gzip'}) + self.assertTrue(os.path.isfile(objfile)) + self.assertEqual(open(objfile).read(), 'VERIFY TWO') + self.assertEqual(diskfile.read_metadata(objfile), + {'X-Timestamp': utils.Timestamp(timestamp).internal, + 'Content-Length': '10', + 'ETag': 'b381a4c5dab1eaa1eb9711fa647cd039', + 'Content-Type': 'text/plain', + 'name': '/a/c/o', + 'Content-Encoding': 'gzip'}) def test_PUT_overwrite_w_delete_at(self): req = Request.blank( @@ -675,7 +704,7 @@ class TestObjectController(unittest.TestCase): 'Content-Type': 'application/octet-stream'}) req.body = 'VERIFY' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': normalize_timestamp(ts), @@ -683,7 +712,7 @@ class TestObjectController(unittest.TestCase): 'Content-Encoding': 'gzip'}) req.body = 'VERIFY TWO' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 409) + self.assertEqual(resp.status_int, 409) self.assertEqual(resp.headers['X-Backend-Timestamp'], orig_timestamp) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, @@ -693,7 +722,7 @@ class TestObjectController(unittest.TestCase): 'Content-Encoding': 'gzip'}) req.body = 'VERIFY THREE' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 409) + self.assertEqual(resp.status_int, 409) self.assertEqual(resp.headers['X-Backend-Timestamp'], orig_timestamp) def test_PUT_no_etag(self): @@ -703,7 +732,7 @@ class TestObjectController(unittest.TestCase): 'Content-Type': 'text/plain'}) req.body = 'test' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) def test_PUT_invalid_etag(self): req = Request.blank( @@ -713,7 +742,7 @@ class TestObjectController(unittest.TestCase): 'ETag': 'invalid'}) req.body = 'test' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 422) + self.assertEqual(resp.status_int, 422) def test_PUT_user_metadata(self): timestamp = normalize_timestamp(time()) @@ -726,22 +755,22 @@ class TestObjectController(unittest.TestCase): 'X-Object-Meta-Two': 'Two'}) req.body = 'VERIFY THREE' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) objfile = os.path.join( self.testdir, 'sda1', storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p', hash_path('a', 'c', 'o')), utils.Timestamp(timestamp).internal + '.data') - self.assert_(os.path.isfile(objfile)) - self.assertEquals(open(objfile).read(), 'VERIFY THREE') - self.assertEquals(diskfile.read_metadata(objfile), - {'X-Timestamp': utils.Timestamp(timestamp).internal, - 'Content-Length': '12', - 'ETag': 'b114ab7b90d9ccac4bd5d99cc7ebb568', - 'Content-Type': 'text/plain', - 'name': '/a/c/o', - 'X-Object-Meta-1': 'One', - 'X-Object-Meta-Two': 'Two'}) + self.assertTrue(os.path.isfile(objfile)) + self.assertEqual(open(objfile).read(), 'VERIFY THREE') + self.assertEqual(diskfile.read_metadata(objfile), + {'X-Timestamp': utils.Timestamp(timestamp).internal, + 'Content-Length': '12', + 'ETag': 'b114ab7b90d9ccac4bd5d99cc7ebb568', + 'Content-Type': 'text/plain', + 'name': '/a/c/o', + 'X-Object-Meta-1': 'One', + 'X-Object-Meta-Two': 'Two'}) def test_PUT_etag_in_footer(self): timestamp = normalize_timestamp(time()) @@ -997,7 +1026,7 @@ class TestObjectController(unittest.TestCase): with mock.patch('xattr.getxattr', mock_get_and_setxattr): with mock.patch('xattr.setxattr', mock_get_and_setxattr): resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 507) + self.assertEqual(resp.status_int, 507) def test_PUT_client_timeout(self): class FakeTimeout(BaseException): @@ -1017,9 +1046,9 @@ class TestObjectController(unittest.TestCase): headers={'X-Timestamp': timestamp, 'Content-Type': 'text/plain', 'Content-Length': '6'}) - req.environ['wsgi.input'] = WsgiStringIO('VERIFY') + req.environ['wsgi.input'] = WsgiBytesIO(b'VERIFY') resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 408) + self.assertEqual(resp.status_int, 408) def test_PUT_system_metadata(self): # check that sysmeta is stored in diskfile @@ -1034,23 +1063,23 @@ class TestObjectController(unittest.TestCase): 'X-Object-Sysmeta-Two': 'Two'}) req.body = 'VERIFY SYSMETA' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) objfile = os.path.join( self.testdir, 'sda1', storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p', hash_path('a', 'c', 'o')), timestamp + '.data') - self.assert_(os.path.isfile(objfile)) - self.assertEquals(open(objfile).read(), 'VERIFY SYSMETA') - self.assertEquals(diskfile.read_metadata(objfile), - {'X-Timestamp': timestamp, - 'Content-Length': '14', - 'Content-Type': 'text/plain', - 'ETag': '1000d172764c9dbc3a5798a67ec5bb76', - 'name': '/a/c/o', - 'X-Object-Meta-1': 'One', - 'X-Object-Sysmeta-1': 'One', - 'X-Object-Sysmeta-Two': 'Two'}) + self.assertTrue(os.path.isfile(objfile)) + self.assertEqual(open(objfile).read(), 'VERIFY SYSMETA') + self.assertEqual(diskfile.read_metadata(objfile), + {'X-Timestamp': timestamp, + 'Content-Length': '14', + 'Content-Type': 'text/plain', + 'ETag': '1000d172764c9dbc3a5798a67ec5bb76', + 'name': '/a/c/o', + 'X-Object-Meta-1': 'One', + 'X-Object-Sysmeta-1': 'One', + 'X-Object-Sysmeta-Two': 'Two'}) def test_POST_system_metadata(self): # check that diskfile sysmeta is not changed by a POST @@ -1065,7 +1094,7 @@ class TestObjectController(unittest.TestCase): 'X-Object-Sysmeta-Two': 'Two'}) req.body = 'VERIFY SYSMETA' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) timestamp2 = normalize_timestamp(time()) req = Request.blank( @@ -1075,7 +1104,7 @@ class TestObjectController(unittest.TestCase): 'X-Object-Sysmeta-1': 'Not One', 'X-Object-Sysmeta-Two': 'Not Two'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 202) + self.assertEqual(resp.status_int, 202) # original .data file metadata should be unchanged objfile = os.path.join( @@ -1083,17 +1112,17 @@ class TestObjectController(unittest.TestCase): storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p', hash_path('a', 'c', 'o')), timestamp1 + '.data') - self.assert_(os.path.isfile(objfile)) - self.assertEquals(open(objfile).read(), 'VERIFY SYSMETA') - self.assertEquals(diskfile.read_metadata(objfile), - {'X-Timestamp': timestamp1, - 'Content-Length': '14', - 'Content-Type': 'text/plain', - 'ETag': '1000d172764c9dbc3a5798a67ec5bb76', - 'name': '/a/c/o', - 'X-Object-Meta-1': 'One', - 'X-Object-Sysmeta-1': 'One', - 'X-Object-Sysmeta-Two': 'Two'}) + self.assertTrue(os.path.isfile(objfile)) + self.assertEqual(open(objfile).read(), 'VERIFY SYSMETA') + self.assertEqual(diskfile.read_metadata(objfile), + {'X-Timestamp': timestamp1, + 'Content-Length': '14', + 'Content-Type': 'text/plain', + 'ETag': '1000d172764c9dbc3a5798a67ec5bb76', + 'name': '/a/c/o', + 'X-Object-Meta-1': 'One', + 'X-Object-Sysmeta-1': 'One', + 'X-Object-Sysmeta-Two': 'Two'}) # .meta file metadata should have only user meta items metafile = os.path.join( @@ -1101,11 +1130,11 @@ class TestObjectController(unittest.TestCase): storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p', hash_path('a', 'c', 'o')), timestamp2 + '.meta') - self.assert_(os.path.isfile(metafile)) - self.assertEquals(diskfile.read_metadata(metafile), - {'X-Timestamp': timestamp2, - 'name': '/a/c/o', - 'X-Object-Meta-1': 'Not One'}) + self.assertTrue(os.path.isfile(metafile)) + self.assertEqual(diskfile.read_metadata(metafile), + {'X-Timestamp': timestamp2, + 'name': '/a/c/o', + 'X-Object-Meta-1': 'Not One'}) def test_PUT_then_fetch_system_metadata(self): timestamp = normalize_timestamp(time()) @@ -1119,22 +1148,22 @@ class TestObjectController(unittest.TestCase): 'X-Object-Sysmeta-Two': 'Two'}) req.body = 'VERIFY SYSMETA' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) def check_response(resp): - self.assertEquals(resp.status_int, 200) - self.assertEquals(resp.content_length, 14) - self.assertEquals(resp.content_type, 'text/plain') - self.assertEquals(resp.headers['content-type'], 'text/plain') - self.assertEquals( + self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.content_length, 14) + self.assertEqual(resp.content_type, 'text/plain') + self.assertEqual(resp.headers['content-type'], 'text/plain') + self.assertEqual( resp.headers['last-modified'], strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(math.ceil(float(timestamp))))) - self.assertEquals(resp.headers['etag'], - '"1000d172764c9dbc3a5798a67ec5bb76"') - self.assertEquals(resp.headers['x-object-meta-1'], 'One') - self.assertEquals(resp.headers['x-object-sysmeta-1'], 'One') - self.assertEquals(resp.headers['x-object-sysmeta-two'], 'Two') + self.assertEqual(resp.headers['etag'], + '"1000d172764c9dbc3a5798a67ec5bb76"') + self.assertEqual(resp.headers['x-object-meta-1'], 'One') + self.assertEqual(resp.headers['x-object-sysmeta-1'], 'One') + self.assertEqual(resp.headers['x-object-sysmeta-two'], 'Two') req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}) @@ -1158,7 +1187,7 @@ class TestObjectController(unittest.TestCase): 'X-Object-Sysmeta-Two': 'Two'}) req.body = 'VERIFY SYSMETA' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) timestamp2 = normalize_timestamp(time()) req = Request.blank( @@ -1168,23 +1197,23 @@ class TestObjectController(unittest.TestCase): 'X-Object-Sysmeta-1': 'Not One', 'X-Object-Sysmeta-Two': 'Not Two'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 202) + self.assertEqual(resp.status_int, 202) def check_response(resp): # user meta should be updated but not sysmeta - self.assertEquals(resp.status_int, 200) - self.assertEquals(resp.content_length, 14) - self.assertEquals(resp.content_type, 'text/plain') - self.assertEquals(resp.headers['content-type'], 'text/plain') - self.assertEquals( + self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.content_length, 14) + self.assertEqual(resp.content_type, 'text/plain') + self.assertEqual(resp.headers['content-type'], 'text/plain') + self.assertEqual( resp.headers['last-modified'], strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(math.ceil(float(timestamp2))))) - self.assertEquals(resp.headers['etag'], - '"1000d172764c9dbc3a5798a67ec5bb76"') - self.assertEquals(resp.headers['x-object-meta-1'], 'Not One') - self.assertEquals(resp.headers['x-object-sysmeta-1'], 'One') - self.assertEquals(resp.headers['x-object-sysmeta-two'], 'Two') + self.assertEqual(resp.headers['etag'], + '"1000d172764c9dbc3a5798a67ec5bb76"') + self.assertEqual(resp.headers['x-object-meta-1'], 'Not One') + self.assertEqual(resp.headers['x-object-sysmeta-1'], 'One') + self.assertEqual(resp.headers['x-object-sysmeta-two'], 'Two') req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}) @@ -1196,6 +1225,81 @@ class TestObjectController(unittest.TestCase): resp = req.get_response(self.object_controller) check_response(resp) + def test_PUT_with_replication_headers(self): + # check that otherwise disallowed headers are accepted when specified + # by X-Backend-Replication-Headers + + # first PUT object + timestamp1 = normalize_timestamp(time()) + req = Request.blank( + '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, + headers={'X-Timestamp': timestamp1, + 'Content-Type': 'text/plain', + 'Content-Length': '14', + 'Etag': '1000d172764c9dbc3a5798a67ec5bb76', + 'Custom-Header': 'custom1', + 'X-Object-Meta-1': 'meta1', + 'X-Static-Large-Object': 'False'}) + req.body = 'VERIFY SYSMETA' + + # restrict set of allowed headers on this server + with mock.patch.object(self.object_controller, 'allowed_headers', + ['Custom-Header']): + resp = req.get_response(self.object_controller) + self.assertEqual(resp.status_int, 201) + + objfile = os.path.join( + self.testdir, 'sda1', + storage_directory(diskfile.get_data_dir(0), 'p', + hash_path('a', 'c', 'o')), + timestamp1 + '.data') + # X-Static-Large-Object is disallowed. + self.assertEqual(diskfile.read_metadata(objfile), + {'X-Timestamp': timestamp1, + 'Content-Type': 'text/plain', + 'Content-Length': '14', + 'ETag': '1000d172764c9dbc3a5798a67ec5bb76', + 'name': '/a/c/o', + 'Custom-Header': 'custom1', + 'X-Object-Meta-1': 'meta1'}) + + # PUT object again with X-Backend-Replication-Headers + timestamp2 = normalize_timestamp(time()) + req = Request.blank( + '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, + headers={'X-Timestamp': timestamp2, + 'Content-Type': 'text/plain', + 'Content-Length': '14', + 'Etag': '1000d172764c9dbc3a5798a67ec5bb76', + 'Custom-Header': 'custom1', + 'X-Object-Meta-1': 'meta1', + 'X-Static-Large-Object': 'False', + 'X-Backend-Replication-Headers': + 'X-Static-Large-Object'}) + req.body = 'VERIFY SYSMETA' + + with mock.patch.object(self.object_controller, 'allowed_headers', + ['Custom-Header']): + resp = req.get_response(self.object_controller) + self.assertEqual(resp.status_int, 201) + + objfile = os.path.join( + self.testdir, 'sda1', + storage_directory(diskfile.get_data_dir(0), 'p', + hash_path('a', 'c', 'o')), + timestamp2 + '.data') + # X-Static-Large-Object should be copied since it is now allowed by + # replication headers. + self.assertEqual(diskfile.read_metadata(objfile), + {'X-Timestamp': timestamp2, + 'Content-Type': 'text/plain', + 'Content-Length': '14', + 'ETag': '1000d172764c9dbc3a5798a67ec5bb76', + 'name': '/a/c/o', + 'Custom-Header': 'custom1', + 'X-Object-Meta-1': 'meta1', + 'X-Static-Large-Object': 'False'}) + def test_PUT_container_connection(self): def mock_http_connect(response, with_exc=False): @@ -1219,52 +1323,54 @@ class TestObjectController(unittest.TestCase): return lambda *args, **kwargs: FakeConn(response, with_exc) - old_http_connect = object_server.http_connect - try: - timestamp = normalize_timestamp(time()) - req = Request.blank( - '/sda1/p/a/c/o', - environ={'REQUEST_METHOD': 'PUT'}, - headers={'X-Timestamp': timestamp, - 'X-Container-Host': '1.2.3.4:0', - 'X-Container-Partition': '3', - 'X-Container-Device': 'sda1', - 'X-Container-Timestamp': '1', - 'Content-Type': 'application/new1', - 'Content-Length': '0'}) - object_server.http_connect = mock_http_connect(201) + timestamp = normalize_timestamp(time()) + req = Request.blank( + '/sda1/p/a/c/o', + environ={'REQUEST_METHOD': 'PUT'}, + headers={'X-Timestamp': timestamp, + 'X-Container-Host': '1.2.3.4:0', + 'X-Container-Partition': '3', + 'X-Container-Device': 'sda1', + 'X-Container-Timestamp': '1', + 'Content-Type': 'application/new1', + 'Content-Length': '0'}) + with fake_spawn(), mock.patch.object( + object_server, 'http_connect', + mock_http_connect(201)): resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) - timestamp = normalize_timestamp(time()) - req = Request.blank( - '/sda1/p/a/c/o', - environ={'REQUEST_METHOD': 'PUT'}, - headers={'X-Timestamp': timestamp, - 'X-Container-Host': '1.2.3.4:0', - 'X-Container-Partition': '3', - 'X-Container-Device': 'sda1', - 'X-Container-Timestamp': '1', - 'Content-Type': 'application/new1', - 'Content-Length': '0'}) - object_server.http_connect = mock_http_connect(500) + self.assertEqual(resp.status_int, 201) + timestamp = normalize_timestamp(time()) + req = Request.blank( + '/sda1/p/a/c/o', + environ={'REQUEST_METHOD': 'PUT'}, + headers={'X-Timestamp': timestamp, + 'X-Container-Host': '1.2.3.4:0', + 'X-Container-Partition': '3', + 'X-Container-Device': 'sda1', + 'X-Container-Timestamp': '1', + 'Content-Type': 'application/new1', + 'Content-Length': '0'}) + with fake_spawn(), mock.patch.object( + object_server, 'http_connect', + mock_http_connect(500)): resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) - timestamp = normalize_timestamp(time()) - req = Request.blank( - '/sda1/p/a/c/o', - environ={'REQUEST_METHOD': 'PUT'}, - headers={'X-Timestamp': timestamp, - 'X-Container-Host': '1.2.3.4:0', - 'X-Container-Partition': '3', - 'X-Container-Device': 'sda1', - 'X-Container-Timestamp': '1', - 'Content-Type': 'application/new1', - 'Content-Length': '0'}) - object_server.http_connect = mock_http_connect(500, with_exc=True) + self.assertEqual(resp.status_int, 201) + timestamp = normalize_timestamp(time()) + req = Request.blank( + '/sda1/p/a/c/o', + environ={'REQUEST_METHOD': 'PUT'}, + headers={'X-Timestamp': timestamp, + 'X-Container-Host': '1.2.3.4:0', + 'X-Container-Partition': '3', + 'X-Container-Device': 'sda1', + 'X-Container-Timestamp': '1', + 'Content-Type': 'application/new1', + 'Content-Length': '0'}) + with fake_spawn(), mock.patch.object( + object_server, 'http_connect', + mock_http_connect(500, with_exc=True)): resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) - finally: - object_server.http_connect = old_http_connect + self.assertEqual(resp.status_int, 201) def test_PUT_ssync_multi_frag(self): timestamp = utils.Timestamp(time()).internal @@ -1283,7 +1389,7 @@ class TestObjectController(unittest.TestCase): req.body = 'VERIFY' resp = req.get_response(self.object_controller) - self.assertEquals( + self.assertEqual( resp.status_int, expected_rsp, 'got %s != %s for frag_index=%s node_index=%s' % ( resp.status_int, expected_rsp, @@ -1339,7 +1445,7 @@ class TestObjectController(unittest.TestCase): req.body = 'VERIFY' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) obj_dir = os.path.join( self.testdir, 'sda1', storage_directory(diskfile.get_data_dir(int(policy)), @@ -1360,12 +1466,12 @@ class TestObjectController(unittest.TestCase): # Test swift.obj.server.ObjectController.HEAD req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) self.assertFalse('X-Backend-Timestamp' in resp.headers) timestamp = normalize_timestamp(time()) @@ -1377,23 +1483,23 @@ class TestObjectController(unittest.TestCase): 'X-Object-Meta-Two': 'Two'}) req.body = 'VERIFY' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) - self.assertEquals(resp.content_length, 6) - self.assertEquals(resp.content_type, 'application/x-test') - self.assertEquals(resp.headers['content-type'], 'application/x-test') - self.assertEquals( + self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.content_length, 6) + self.assertEqual(resp.content_type, 'application/x-test') + self.assertEqual(resp.headers['content-type'], 'application/x-test') + self.assertEqual( resp.headers['last-modified'], strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(math.ceil(float(timestamp))))) - self.assertEquals(resp.headers['etag'], - '"0b4c12d7e0a73840c1c4f148fda3b037"') - self.assertEquals(resp.headers['x-object-meta-1'], 'One') - self.assertEquals(resp.headers['x-object-meta-two'], 'Two') + self.assertEqual(resp.headers['etag'], + '"0b4c12d7e0a73840c1c4f148fda3b037"') + self.assertEqual(resp.headers['x-object-meta-1'], 'One') + self.assertEqual(resp.headers['x-object-meta-two'], 'Two') objfile = os.path.join( self.testdir, 'sda1', @@ -1404,7 +1510,7 @@ class TestObjectController(unittest.TestCase): req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) sleep(.00001) timestamp = normalize_timestamp(time()) @@ -1415,7 +1521,7 @@ class TestObjectController(unittest.TestCase): 'Content-length': '6'}) req.body = 'VERIFY' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) sleep(.00001) timestamp = normalize_timestamp(time()) @@ -1423,14 +1529,14 @@ class TestObjectController(unittest.TestCase): environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': timestamp}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 404) - self.assertEquals(resp.headers['X-Backend-Timestamp'], - utils.Timestamp(timestamp).internal) + self.assertEqual(resp.status_int, 404) + self.assertEqual(resp.headers['X-Backend-Timestamp'], + utils.Timestamp(timestamp).internal) def test_HEAD_quarantine_zbyte(self): # Test swift.obj.server.ObjectController.GET @@ -1440,7 +1546,7 @@ class TestObjectController(unittest.TestCase): 'Content-Type': 'application/x-test'}) req.body = 'VERIFY' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) disk_file = self.df_mgr.get_diskfile('sda1', 'p', 'a', 'c', 'o', policy=POLICIES.legacy) disk_file.open() @@ -1453,16 +1559,16 @@ class TestObjectController(unittest.TestCase): diskfile.write_metadata(fp, metadata) file_name = os.path.basename(disk_file._data_file) - self.assertEquals(os.listdir(disk_file._datadir)[0], file_name) + self.assertEqual(os.listdir(disk_file._datadir)[0], file_name) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) quar_dir = os.path.join( self.testdir, 'sda1', 'quarantined', 'objects', os.path.basename(os.path.dirname(disk_file._data_file))) - self.assertEquals(os.listdir(quar_dir)[0], file_name) + self.assertEqual(os.listdir(quar_dir)[0], file_name) def test_OPTIONS(self): conf = {'devices': self.testdir, 'mount_check': 'false'} @@ -1471,24 +1577,24 @@ class TestObjectController(unittest.TestCase): req = Request.blank('/sda1/p/a/c/o', {'REQUEST_METHOD': 'OPTIONS'}) req.content_length = 0 resp = server_handler.OPTIONS(req) - self.assertEquals(200, resp.status_int) + self.assertEqual(200, resp.status_int) for verb in 'OPTIONS GET POST PUT DELETE HEAD REPLICATE \ SSYNC'.split(): self.assertTrue( verb in resp.headers['Allow'].split(', ')) - self.assertEquals(len(resp.headers['Allow'].split(', ')), 8) - self.assertEquals(resp.headers['Server'], - (server_handler.server_type + '/' + swift_version)) + self.assertEqual(len(resp.headers['Allow'].split(', ')), 8) + self.assertEqual(resp.headers['Server'], + (server_handler.server_type + '/' + swift_version)) def test_GET(self): # Test swift.obj.server.ObjectController.GET req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) self.assertFalse('X-Backend-Timestamp' in resp.headers) timestamp = normalize_timestamp(time()) @@ -1499,45 +1605,45 @@ class TestObjectController(unittest.TestCase): 'X-Object-Meta-Two': 'Two'}) req.body = 'VERIFY' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) - self.assertEquals(resp.body, 'VERIFY') - self.assertEquals(resp.content_length, 6) - self.assertEquals(resp.content_type, 'application/x-test') - self.assertEquals(resp.headers['content-length'], '6') - self.assertEquals(resp.headers['content-type'], 'application/x-test') - self.assertEquals( + self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.body, 'VERIFY') + self.assertEqual(resp.content_length, 6) + self.assertEqual(resp.content_type, 'application/x-test') + self.assertEqual(resp.headers['content-length'], '6') + self.assertEqual(resp.headers['content-type'], 'application/x-test') + self.assertEqual( resp.headers['last-modified'], strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(math.ceil(float(timestamp))))) - self.assertEquals(resp.headers['etag'], - '"0b4c12d7e0a73840c1c4f148fda3b037"') - self.assertEquals(resp.headers['x-object-meta-1'], 'One') - self.assertEquals(resp.headers['x-object-meta-two'], 'Two') + self.assertEqual(resp.headers['etag'], + '"0b4c12d7e0a73840c1c4f148fda3b037"') + self.assertEqual(resp.headers['x-object-meta-1'], 'One') + self.assertEqual(resp.headers['x-object-meta-two'], 'Two') req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}) req.range = 'bytes=1-3' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 206) - self.assertEquals(resp.body, 'ERI') - self.assertEquals(resp.headers['content-length'], '3') + self.assertEqual(resp.status_int, 206) + self.assertEqual(resp.body, 'ERI') + self.assertEqual(resp.headers['content-length'], '3') req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}) req.range = 'bytes=1-' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 206) - self.assertEquals(resp.body, 'ERIFY') - self.assertEquals(resp.headers['content-length'], '5') + self.assertEqual(resp.status_int, 206) + self.assertEqual(resp.body, 'ERIFY') + self.assertEqual(resp.headers['content-length'], '5') req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}) req.range = 'bytes=-2' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 206) - self.assertEquals(resp.body, 'FY') - self.assertEquals(resp.headers['content-length'], '2') + self.assertEqual(resp.status_int, 206) + self.assertEqual(resp.body, 'FY') + self.assertEqual(resp.headers['content-length'], '2') objfile = os.path.join( self.testdir, 'sda1', @@ -1547,7 +1653,7 @@ class TestObjectController(unittest.TestCase): os.unlink(objfile) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) sleep(.00001) timestamp = normalize_timestamp(time()) @@ -1558,7 +1664,7 @@ class TestObjectController(unittest.TestCase): 'Content-Length': '6'}) req.body = 'VERIFY' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) sleep(.00001) timestamp = normalize_timestamp(time()) @@ -1566,13 +1672,13 @@ class TestObjectController(unittest.TestCase): environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': timestamp}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 404) - self.assertEquals(resp.headers['X-Backend-Timestamp'], - utils.Timestamp(timestamp).internal) + self.assertEqual(resp.status_int, 404) + self.assertEqual(resp.headers['X-Backend-Timestamp'], + utils.Timestamp(timestamp).internal) def test_GET_if_match(self): req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, @@ -1582,44 +1688,44 @@ class TestObjectController(unittest.TestCase): 'Content-Length': '4'}) req.body = 'test' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) etag = resp.etag req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) - self.assertEquals(resp.etag, etag) + self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.etag, etag) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}, headers={'If-Match': '*'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) - self.assertEquals(resp.etag, etag) + self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.etag, etag) req = Request.blank('/sda1/p/a/c/o2', environ={'REQUEST_METHOD': 'GET'}, headers={'If-Match': '*'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 412) + self.assertEqual(resp.status_int, 412) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}, headers={'If-Match': '"%s"' % etag}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) - self.assertEquals(resp.etag, etag) + self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.etag, etag) req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}, headers={'If-Match': '"11111111111111111111111111111111"'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 412) + self.assertEqual(resp.status_int, 412) req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}, headers={ 'If-Match': '"11111111111111111111111111111111", "%s"' % etag}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}, @@ -1628,7 +1734,7 @@ class TestObjectController(unittest.TestCase): '"11111111111111111111111111111111", ' '"22222222222222222222222222222222"'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 412) + self.assertEqual(resp.status_int, 412) def test_GET_if_match_etag_is_at(self): headers = { @@ -1640,7 +1746,7 @@ class TestObjectController(unittest.TestCase): headers=headers) req.body = 'test' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) real_etag = resp.etag # match x-backend-etag-is-at @@ -1690,47 +1796,47 @@ class TestObjectController(unittest.TestCase): 'Content-Length': '4'}) req.body = 'test' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) etag = resp.etag req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) - self.assertEquals(resp.etag, etag) + self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.etag, etag) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}, headers={'If-Match': '*'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) - self.assertEquals(resp.etag, etag) + self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.etag, etag) req = Request.blank('/sda1/p/a/c/o2', environ={'REQUEST_METHOD': 'HEAD'}, headers={'If-Match': '*'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 412) + self.assertEqual(resp.status_int, 412) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}, headers={'If-Match': '"%s"' % etag}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) - self.assertEquals(resp.etag, etag) + self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.etag, etag) req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}, headers={'If-Match': '"11111111111111111111111111111111"'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 412) + self.assertEqual(resp.status_int, 412) req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}, headers={ 'If-Match': '"11111111111111111111111111111111", "%s"' % etag}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}, @@ -1739,7 +1845,7 @@ class TestObjectController(unittest.TestCase): '"11111111111111111111111111111111", ' '"22222222222222222222222222222222"'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 412) + self.assertEqual(resp.status_int, 412) def test_GET_if_none_match(self): req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, @@ -1750,40 +1856,40 @@ class TestObjectController(unittest.TestCase): 'Content-Length': '4'}) req.body = 'test' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) etag = resp.etag req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) - self.assertEquals(resp.etag, etag) + self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.etag, etag) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}, headers={'If-None-Match': '*'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 304) - self.assertEquals(resp.etag, etag) - self.assertEquals(resp.headers['Content-Type'], 'application/fizzbuzz') - self.assertEquals(resp.headers['X-Object-Meta-Soup'], 'gazpacho') + self.assertEqual(resp.status_int, 304) + self.assertEqual(resp.etag, etag) + self.assertEqual(resp.headers['Content-Type'], 'application/fizzbuzz') + self.assertEqual(resp.headers['X-Object-Meta-Soup'], 'gazpacho') req = Request.blank('/sda1/p/a/c/o2', environ={'REQUEST_METHOD': 'GET'}, headers={'If-None-Match': '*'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}, headers={'If-None-Match': '"%s"' % etag}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 304) - self.assertEquals(resp.etag, etag) + self.assertEqual(resp.status_int, 304) + self.assertEqual(resp.etag, etag) req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}, headers={'If-None-Match': '"11111111111111111111111111111111"'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) - self.assertEquals(resp.etag, etag) + self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.etag, etag) req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}, @@ -1791,8 +1897,8 @@ class TestObjectController(unittest.TestCase): '"11111111111111111111111111111111", ' '"%s"' % etag}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 304) - self.assertEquals(resp.etag, etag) + self.assertEqual(resp.status_int, 304) + self.assertEqual(resp.etag, etag) def test_HEAD_if_none_match(self): req = Request.blank('/sda1/p/a/c/o', @@ -1803,41 +1909,41 @@ class TestObjectController(unittest.TestCase): 'Content-Length': '4'}) req.body = 'test' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) etag = resp.etag req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) - self.assertEquals(resp.etag, etag) + self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.etag, etag) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}, headers={'If-None-Match': '*'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 304) - self.assertEquals(resp.etag, etag) + self.assertEqual(resp.status_int, 304) + self.assertEqual(resp.etag, etag) req = Request.blank('/sda1/p/a/c/o2', environ={'REQUEST_METHOD': 'HEAD'}, headers={'If-None-Match': '*'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}, headers={'If-None-Match': '"%s"' % etag}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 304) - self.assertEquals(resp.etag, etag) + self.assertEqual(resp.status_int, 304) + self.assertEqual(resp.etag, etag) req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}, headers={'If-None-Match': '"11111111111111111111111111111111"'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) - self.assertEquals(resp.etag, etag) + self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.etag, etag) req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}, @@ -1845,8 +1951,8 @@ class TestObjectController(unittest.TestCase): '"11111111111111111111111111111111", ' '"%s"' % etag}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 304) - self.assertEquals(resp.etag, etag) + self.assertEqual(resp.status_int, 304) + self.assertEqual(resp.etag, etag) def test_GET_if_modified_since(self): timestamp = normalize_timestamp(time()) @@ -1857,44 +1963,44 @@ class TestObjectController(unittest.TestCase): 'Content-Length': '4'}) req.body = 'test' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) since = strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(float(timestamp) + 1)) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}, headers={'If-Modified-Since': since}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 304) + self.assertEqual(resp.status_int, 304) since = \ strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(float(timestamp) - 1)) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}, headers={'If-Modified-Since': since}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) since = \ strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(float(timestamp) + 1)) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}, headers={'If-Modified-Since': since}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 304) + self.assertEqual(resp.status_int, 304) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.object_controller) since = resp.headers['Last-Modified'] - self.assertEquals(since, strftime('%a, %d %b %Y %H:%M:%S GMT', - gmtime(math.ceil(float(timestamp))))) + self.assertEqual(since, strftime('%a, %d %b %Y %H:%M:%S GMT', + gmtime(math.ceil(float(timestamp))))) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}, headers={'If-Modified-Since': since}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 304) + self.assertEqual(resp.status_int, 304) timestamp = normalize_timestamp(int(time())) req = Request.blank('/sda1/p/a/c/o2', @@ -1905,7 +2011,7 @@ class TestObjectController(unittest.TestCase): 'Content-Length': '4'}) req.body = 'test' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) since = strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(float(timestamp))) @@ -1913,7 +2019,7 @@ class TestObjectController(unittest.TestCase): environ={'REQUEST_METHOD': 'GET'}, headers={'If-Modified-Since': since}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 304) + self.assertEqual(resp.status_int, 304) def test_HEAD_if_modified_since(self): timestamp = normalize_timestamp(time()) @@ -1924,12 +2030,12 @@ class TestObjectController(unittest.TestCase): 'Content-Length': '4'}) req.body = 'test' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) since = strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(float(timestamp) + 1)) @@ -1937,7 +2043,7 @@ class TestObjectController(unittest.TestCase): environ={'REQUEST_METHOD': 'HEAD'}, headers={'If-Modified-Since': since}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 304) + self.assertEqual(resp.status_int, 304) since = \ strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(float(timestamp) - 1)) @@ -1945,7 +2051,7 @@ class TestObjectController(unittest.TestCase): environ={'REQUEST_METHOD': 'HEAD'}, headers={'If-Modified-Since': since}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) since = \ strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(float(timestamp) + 1)) @@ -1953,20 +2059,20 @@ class TestObjectController(unittest.TestCase): environ={'REQUEST_METHOD': 'HEAD'}, headers={'If-Modified-Since': since}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 304) + self.assertEqual(resp.status_int, 304) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.object_controller) since = resp.headers['Last-Modified'] - self.assertEquals(since, strftime('%a, %d %b %Y %H:%M:%S GMT', - gmtime(math.ceil(float(timestamp))))) + self.assertEqual(since, strftime('%a, %d %b %Y %H:%M:%S GMT', + gmtime(math.ceil(float(timestamp))))) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}, headers={'If-Modified-Since': since}) resp = self.object_controller.GET(req) - self.assertEquals(resp.status_int, 304) + self.assertEqual(resp.status_int, 304) timestamp = normalize_timestamp(int(time())) req = Request.blank('/sda1/p/a/c/o2', @@ -1977,7 +2083,7 @@ class TestObjectController(unittest.TestCase): 'Content-Length': '4'}) req.body = 'test' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) since = strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(float(timestamp))) @@ -1985,7 +2091,7 @@ class TestObjectController(unittest.TestCase): environ={'REQUEST_METHOD': 'HEAD'}, headers={'If-Modified-Since': since}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 304) + self.assertEqual(resp.status_int, 304) def test_GET_if_unmodified_since(self): timestamp = normalize_timestamp(time()) @@ -1997,47 +2103,47 @@ class TestObjectController(unittest.TestCase): 'Content-Length': '4'}) req.body = 'test' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) since = strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(float(timestamp) + 1)) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}, headers={'If-Unmodified-Since': since}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) since = \ strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(float(timestamp) - 9)) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}, headers={'If-Unmodified-Since': since}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 412) - self.assertEquals(resp.headers['Content-Type'], - 'application/cat-picture') - self.assertEquals(resp.headers['X-Object-Meta-Burr'], 'ito') + self.assertEqual(resp.status_int, 412) + self.assertEqual(resp.headers['Content-Type'], + 'application/cat-picture') + self.assertEqual(resp.headers['X-Object-Meta-Burr'], 'ito') since = \ strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(float(timestamp) + 9)) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}, headers={'If-Unmodified-Since': since}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.object_controller) since = resp.headers['Last-Modified'] - self.assertEquals(since, strftime('%a, %d %b %Y %H:%M:%S GMT', - gmtime(math.ceil(float(timestamp))))) + self.assertEqual(since, strftime('%a, %d %b %Y %H:%M:%S GMT', + gmtime(math.ceil(float(timestamp))))) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}, headers={'If-Unmodified-Since': since}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) def test_HEAD_if_unmodified_since(self): timestamp = normalize_timestamp(time()) @@ -2049,7 +2155,7 @@ class TestObjectController(unittest.TestCase): 'Content-Length': '4'}) req.body = 'test' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) since = strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(math.ceil(float(timestamp)) + 1)) @@ -2057,7 +2163,7 @@ class TestObjectController(unittest.TestCase): environ={'REQUEST_METHOD': 'HEAD'}, headers={'If-Unmodified-Since': since}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) since = strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(math.ceil(float(timestamp)))) @@ -2065,7 +2171,7 @@ class TestObjectController(unittest.TestCase): environ={'REQUEST_METHOD': 'HEAD'}, headers={'If-Unmodified-Since': since}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) since = strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(math.ceil(float(timestamp)) - 1)) @@ -2073,7 +2179,7 @@ class TestObjectController(unittest.TestCase): environ={'REQUEST_METHOD': 'HEAD'}, headers={'If-Unmodified-Since': since}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 412) + self.assertEqual(resp.status_int, 412) def test_GET_quarantine(self): # Test swift.obj.server.ObjectController.GET @@ -2083,7 +2189,7 @@ class TestObjectController(unittest.TestCase): 'Content-Type': 'application/x-test'}) req.body = 'VERIFY' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) disk_file = self.df_mgr.get_diskfile('sda1', 'p', 'a', 'c', 'o', policy=POLICIES.legacy) disk_file.open() @@ -2094,19 +2200,19 @@ class TestObjectController(unittest.TestCase): metadata = {'X-Timestamp': timestamp, 'name': '/a/c/o', 'Content-Length': 6, 'ETag': etag} diskfile.write_metadata(disk_file._fp, metadata) - self.assertEquals(os.listdir(disk_file._datadir)[0], file_name) + self.assertEqual(os.listdir(disk_file._datadir)[0], file_name) req = Request.blank('/sda1/p/a/c/o') resp = req.get_response(self.object_controller) quar_dir = os.path.join( self.testdir, 'sda1', 'quarantined', 'objects', os.path.basename(os.path.dirname(disk_file._data_file))) - self.assertEquals(os.listdir(disk_file._datadir)[0], file_name) + self.assertEqual(os.listdir(disk_file._datadir)[0], file_name) body = resp.body # actually does quarantining - self.assertEquals(body, 'VERIFY') - self.assertEquals(os.listdir(quar_dir)[0], file_name) + self.assertEqual(body, 'VERIFY') + self.assertEqual(os.listdir(quar_dir)[0], file_name) req = Request.blank('/sda1/p/a/c/o') resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) def test_GET_quarantine_zbyte(self): # Test swift.obj.server.ObjectController.GET @@ -2116,7 +2222,7 @@ class TestObjectController(unittest.TestCase): 'Content-Type': 'application/x-test'}) req.body = 'VERIFY' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) disk_file = self.df_mgr.get_diskfile('sda1', 'p', 'a', 'c', 'o', policy=POLICIES.legacy) disk_file.open() @@ -2127,15 +2233,15 @@ class TestObjectController(unittest.TestCase): with open(disk_file._data_file, 'w') as fp: diskfile.write_metadata(fp, metadata) - self.assertEquals(os.listdir(disk_file._datadir)[0], file_name) + self.assertEqual(os.listdir(disk_file._datadir)[0], file_name) req = Request.blank('/sda1/p/a/c/o') resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) quar_dir = os.path.join( self.testdir, 'sda1', 'quarantined', 'objects', os.path.basename(os.path.dirname(disk_file._data_file))) - self.assertEquals(os.listdir(quar_dir)[0], file_name) + self.assertEqual(os.listdir(quar_dir)[0], file_name) def test_GET_quarantine_range(self): # Test swift.obj.server.ObjectController.GET @@ -2145,7 +2251,7 @@ class TestObjectController(unittest.TestCase): 'Content-Type': 'application/x-test'}) req.body = 'VERIFY' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) disk_file = self.df_mgr.get_diskfile('sda1', 'p', 'a', 'c', 'o', policy=POLICIES.legacy) disk_file.open() @@ -2156,7 +2262,7 @@ class TestObjectController(unittest.TestCase): metadata = {'X-Timestamp': timestamp, 'name': '/a/c/o', 'Content-Length': 6, 'ETag': etag} diskfile.write_metadata(disk_file._fp, metadata) - self.assertEquals(os.listdir(disk_file._datadir)[0], file_name) + self.assertEqual(os.listdir(disk_file._datadir)[0], file_name) req = Request.blank('/sda1/p/a/c/o') req.range = 'bytes=0-4' # partial resp = req.get_response(self.object_controller) @@ -2164,11 +2270,11 @@ class TestObjectController(unittest.TestCase): self.testdir, 'sda1', 'quarantined', 'objects', os.path.basename(os.path.dirname(disk_file._data_file))) resp.body - self.assertEquals(os.listdir(disk_file._datadir)[0], file_name) + self.assertEqual(os.listdir(disk_file._datadir)[0], file_name) self.assertFalse(os.path.isdir(quar_dir)) req = Request.blank('/sda1/p/a/c/o') resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) req = Request.blank('/sda1/p/a/c/o') req.range = 'bytes=1-6' # partial @@ -2177,7 +2283,7 @@ class TestObjectController(unittest.TestCase): self.testdir, 'sda1', 'quarantined', 'objects', os.path.basename(os.path.dirname(disk_file._data_file))) resp.body - self.assertEquals(os.listdir(disk_file._datadir)[0], file_name) + self.assertEqual(os.listdir(disk_file._datadir)[0], file_name) self.assertFalse(os.path.isdir(quar_dir)) req = Request.blank('/sda1/p/a/c/o') @@ -2186,12 +2292,12 @@ class TestObjectController(unittest.TestCase): quar_dir = os.path.join( self.testdir, 'sda1', 'quarantined', 'objects', os.path.basename(os.path.dirname(disk_file._data_file))) - self.assertEquals(os.listdir(disk_file._datadir)[0], file_name) + self.assertEqual(os.listdir(disk_file._datadir)[0], file_name) resp.body self.assertTrue(os.path.isdir(quar_dir)) req = Request.blank('/sda1/p/a/c/o') resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) @mock.patch("time.time", mock_time) def test_DELETE(self): @@ -2199,12 +2305,12 @@ class TestObjectController(unittest.TestCase): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) # The following should have created a tombstone file timestamp = normalize_timestamp(1000) @@ -2212,7 +2318,7 @@ class TestObjectController(unittest.TestCase): environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': timestamp}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) ts_1000_file = os.path.join( self.testdir, 'sda1', storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p', @@ -2220,7 +2326,7 @@ class TestObjectController(unittest.TestCase): utils.Timestamp(timestamp).internal + '.ts') self.assertTrue(os.path.isfile(ts_1000_file)) # There should now be a 1000 ts file. - self.assertEquals(len(os.listdir(os.path.dirname(ts_1000_file))), 1) + self.assertEqual(len(os.listdir(os.path.dirname(ts_1000_file))), 1) # The following should *not* have created a tombstone file. timestamp = normalize_timestamp(999) @@ -2228,7 +2334,7 @@ class TestObjectController(unittest.TestCase): environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': timestamp}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) ts_999_file = os.path.join( self.testdir, 'sda1', storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p', @@ -2236,18 +2342,17 @@ class TestObjectController(unittest.TestCase): utils.Timestamp(timestamp).internal + '.ts') self.assertFalse(os.path.isfile(ts_999_file)) self.assertTrue(os.path.isfile(ts_1000_file)) - self.assertEquals(len(os.listdir(os.path.dirname(ts_1000_file))), 1) + self.assertEqual(len(os.listdir(os.path.dirname(ts_1000_file))), 1) orig_timestamp = utils.Timestamp(1002).internal + headers = {'X-Timestamp': orig_timestamp, + 'Content-Type': 'application/octet-stream', + 'Content-Length': '4'} req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, - headers={ - 'X-Timestamp': orig_timestamp, - 'Content-Type': 'application/octet-stream', - 'Content-Length': '4', - }) + headers=headers) req.body = 'test' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) # There should now be 1000 ts and a 1001 data file. data_1002_file = os.path.join( self.testdir, 'sda1', @@ -2255,7 +2360,7 @@ class TestObjectController(unittest.TestCase): hash_path('a', 'c', 'o')), orig_timestamp + '.data') self.assertTrue(os.path.isfile(data_1002_file)) - self.assertEquals(len(os.listdir(os.path.dirname(data_1002_file))), 1) + self.assertEqual(len(os.listdir(os.path.dirname(data_1002_file))), 1) # The following should *not* have created a tombstone file. timestamp = normalize_timestamp(1001) @@ -2263,7 +2368,7 @@ class TestObjectController(unittest.TestCase): environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': timestamp}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 409) + self.assertEqual(resp.status_int, 409) self.assertEqual(resp.headers['X-Backend-Timestamp'], orig_timestamp) ts_1001_file = os.path.join( self.testdir, 'sda1', @@ -2272,21 +2377,21 @@ class TestObjectController(unittest.TestCase): utils.Timestamp(timestamp).internal + '.ts') self.assertFalse(os.path.isfile(ts_1001_file)) self.assertTrue(os.path.isfile(data_1002_file)) - self.assertEquals(len(os.listdir(os.path.dirname(ts_1001_file))), 1) + self.assertEqual(len(os.listdir(os.path.dirname(ts_1001_file))), 1) timestamp = normalize_timestamp(1003) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': timestamp}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) ts_1003_file = os.path.join( self.testdir, 'sda1', storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p', hash_path('a', 'c', 'o')), utils.Timestamp(timestamp).internal + '.ts') self.assertTrue(os.path.isfile(ts_1003_file)) - self.assertEquals(len(os.listdir(os.path.dirname(ts_1003_file))), 1) + self.assertEqual(len(os.listdir(os.path.dirname(ts_1003_file))), 1) def test_DELETE_container_updates(self): # Test swift.obj.server.ObjectController.DELETE and container @@ -2294,15 +2399,14 @@ class TestObjectController(unittest.TestCase): # state. start = time() orig_timestamp = utils.Timestamp(start) + headers = {'X-Timestamp': orig_timestamp.internal, + 'Content-Type': 'application/octet-stream', + 'Content-Length': '4'} req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, - headers={ - 'X-Timestamp': orig_timestamp.internal, - 'Content-Type': 'application/octet-stream', - 'Content-Length': '4', - }) + headers=headers) req.body = 'test' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) calls_made = [0] @@ -2319,7 +2423,7 @@ class TestObjectController(unittest.TestCase): environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': timestamp.internal}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 409) + self.assertEqual(resp.status_int, 409) self.assertEqual(resp.headers['x-backend-timestamp'], orig_timestamp.internal) objfile = os.path.join( @@ -2328,8 +2432,8 @@ class TestObjectController(unittest.TestCase): hash_path('a', 'c', 'o')), utils.Timestamp(timestamp).internal + '.ts') self.assertFalse(os.path.isfile(objfile)) - self.assertEquals(len(os.listdir(os.path.dirname(objfile))), 1) - self.assertEquals(0, calls_made[0]) + self.assertEqual(len(os.listdir(os.path.dirname(objfile))), 1) + self.assertEqual(0, calls_made[0]) # The following request should return 204, and the object should # be truly deleted (container update is performed) because this @@ -2340,15 +2444,15 @@ class TestObjectController(unittest.TestCase): environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': timestamp.internal}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) objfile = os.path.join( self.testdir, 'sda1', storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p', hash_path('a', 'c', 'o')), utils.Timestamp(timestamp).internal + '.ts') - self.assert_(os.path.isfile(objfile)) - self.assertEquals(1, calls_made[0]) - self.assertEquals(len(os.listdir(os.path.dirname(objfile))), 1) + self.assertTrue(os.path.isfile(objfile)) + self.assertEqual(1, calls_made[0]) + self.assertEqual(len(os.listdir(os.path.dirname(objfile))), 1) # The following request should return a 404, as the object should # already have been deleted, but it should have also performed a @@ -2359,15 +2463,15 @@ class TestObjectController(unittest.TestCase): environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': timestamp.internal}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) objfile = os.path.join( self.testdir, 'sda1', storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p', hash_path('a', 'c', 'o')), utils.Timestamp(timestamp).internal + '.ts') - self.assert_(os.path.isfile(objfile)) - self.assertEquals(2, calls_made[0]) - self.assertEquals(len(os.listdir(os.path.dirname(objfile))), 1) + self.assertTrue(os.path.isfile(objfile)) + self.assertEqual(2, calls_made[0]) + self.assertEqual(len(os.listdir(os.path.dirname(objfile))), 1) # The following request should return a 404, as the object should # already have been deleted, and it should not have performed a @@ -2378,15 +2482,15 @@ class TestObjectController(unittest.TestCase): environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': timestamp.internal}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) objfile = os.path.join( self.testdir, 'sda1', storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p', hash_path('a', 'c', 'o')), utils.Timestamp(timestamp).internal + '.ts') self.assertFalse(os.path.isfile(objfile)) - self.assertEquals(2, calls_made[0]) - self.assertEquals(len(os.listdir(os.path.dirname(objfile))), 1) + self.assertEqual(2, calls_made[0]) + self.assertEqual(len(os.listdir(os.path.dirname(objfile))), 1) finally: self.object_controller.container_update = orig_cu @@ -2405,12 +2509,12 @@ class TestObjectController(unittest.TestCase): 'X-Container-Device': 'sda1', 'X-Container-Partition': 'p', 'Content-Type': 'text/plain'}) - with mocked_http_conn( + with fake_spawn(), mocked_http_conn( 200, give_connect=capture_updates) as fake_conn: resp = req.get_response(self.object_controller) self.assertRaises(StopIteration, fake_conn.code_iter.next) self.assertEqual(resp.status_int, 201) - self.assertEquals(1, len(container_updates)) + self.assertEqual(1, len(container_updates)) for update in container_updates: ip, port, method, path, headers = update self.assertEqual(ip, '10.0.0.1') @@ -2444,12 +2548,12 @@ class TestObjectController(unittest.TestCase): 'X-Container-Device': 'sda1', 'X-Container-Partition': 'p', 'Content-Type': 'text/html'}) - with mocked_http_conn( + with fake_spawn(), mocked_http_conn( 200, give_connect=capture_updates) as fake_conn: resp = req.get_response(self.object_controller) self.assertRaises(StopIteration, fake_conn.code_iter.next) self.assertEqual(resp.status_int, 201) - self.assertEquals(1, len(container_updates)) + self.assertEqual(1, len(container_updates)) for update in container_updates: ip, port, method, path, headers = update self.assertEqual(ip, '10.0.0.1') @@ -2482,12 +2586,12 @@ class TestObjectController(unittest.TestCase): 'X-Container-Device': 'sda1', 'X-Container-Partition': 'p', 'Content-Type': 'text/enriched'}) - with mocked_http_conn( + with fake_spawn(), mocked_http_conn( 200, give_connect=capture_updates) as fake_conn: resp = req.get_response(self.object_controller) self.assertRaises(StopIteration, fake_conn.code_iter.next) self.assertEqual(resp.status_int, 201) - self.assertEquals(1, len(container_updates)) + self.assertEqual(1, len(container_updates)) for update in container_updates: ip, port, method, path, headers = update self.assertEqual(ip, '10.0.0.1') @@ -2520,12 +2624,12 @@ class TestObjectController(unittest.TestCase): 'X-Container-Host': '10.0.0.1:8080', 'X-Container-Device': 'sda1', 'X-Container-Partition': 'p'}) - with mocked_http_conn( + with fake_spawn(), mocked_http_conn( 200, give_connect=capture_updates) as fake_conn: resp = req.get_response(self.object_controller) self.assertRaises(StopIteration, fake_conn.code_iter.next) self.assertEqual(resp.status_int, 204) - self.assertEquals(1, len(container_updates)) + self.assertEqual(1, len(container_updates)) for update in container_updates: ip, port, method, path, headers = update self.assertEqual(ip, '10.0.0.1') @@ -2551,12 +2655,12 @@ class TestObjectController(unittest.TestCase): 'X-Container-Host': '10.0.0.1:8080', 'X-Container-Device': 'sda1', 'X-Container-Partition': 'p'}) - with mocked_http_conn( + with fake_spawn(), mocked_http_conn( 200, give_connect=capture_updates) as fake_conn: resp = req.get_response(self.object_controller) self.assertRaises(StopIteration, fake_conn.code_iter.next) self.assertEqual(resp.status_int, 404) - self.assertEquals(1, len(container_updates)) + self.assertEqual(1, len(container_updates)) for update in container_updates: ip, port, method, path, headers = update self.assertEqual(ip, '10.0.0.1') @@ -2578,7 +2682,7 @@ class TestObjectController(unittest.TestCase): def test_call_bad_request(self): # Test swift.obj.server.ObjectController.__call__ - inbuf = WsgiStringIO() + inbuf = WsgiBytesIO() errbuf = StringIO() outbuf = StringIO() @@ -2601,11 +2705,11 @@ class TestObjectController(unittest.TestCase): 'wsgi.multiprocess': False, 'wsgi.run_once': False}, start_response) - self.assertEquals(errbuf.getvalue(), '') - self.assertEquals(outbuf.getvalue()[:4], '400 ') + self.assertEqual(errbuf.getvalue(), '') + self.assertEqual(outbuf.getvalue()[:4], '400 ') def test_call_not_found(self): - inbuf = WsgiStringIO() + inbuf = WsgiBytesIO() errbuf = StringIO() outbuf = StringIO() @@ -2628,11 +2732,11 @@ class TestObjectController(unittest.TestCase): 'wsgi.multiprocess': False, 'wsgi.run_once': False}, start_response) - self.assertEquals(errbuf.getvalue(), '') - self.assertEquals(outbuf.getvalue()[:4], '404 ') + self.assertEqual(errbuf.getvalue(), '') + self.assertEqual(outbuf.getvalue()[:4], '404 ') def test_call_bad_method(self): - inbuf = WsgiStringIO() + inbuf = WsgiBytesIO() errbuf = StringIO() outbuf = StringIO() @@ -2655,8 +2759,8 @@ class TestObjectController(unittest.TestCase): 'wsgi.multiprocess': False, 'wsgi.run_once': False}, start_response) - self.assertEquals(errbuf.getvalue(), '') - self.assertEquals(outbuf.getvalue()[:4], '405 ') + self.assertEqual(errbuf.getvalue(), '') + self.assertEqual(outbuf.getvalue()[:4], '405 ') def test_call_name_collision(self): def my_check(*args): @@ -2668,7 +2772,7 @@ class TestObjectController(unittest.TestCase): with mock.patch("swift.obj.diskfile.hash_path", my_hash_path): with mock.patch("swift.obj.server.check_object_creation", my_check): - inbuf = WsgiStringIO() + inbuf = WsgiBytesIO() errbuf = StringIO() outbuf = StringIO() @@ -2694,10 +2798,10 @@ class TestObjectController(unittest.TestCase): 'wsgi.multiprocess': False, 'wsgi.run_once': False}, start_response) - self.assertEquals(errbuf.getvalue(), '') - self.assertEquals(outbuf.getvalue()[:4], '201 ') + self.assertEqual(errbuf.getvalue(), '') + self.assertEqual(outbuf.getvalue()[:4], '201 ') - inbuf = WsgiStringIO() + inbuf = WsgiBytesIO() errbuf = StringIO() outbuf = StringIO() @@ -2723,8 +2827,8 @@ class TestObjectController(unittest.TestCase): 'wsgi.multiprocess': False, 'wsgi.run_once': False}, start_response) - self.assertEquals(errbuf.getvalue(), '') - self.assertEquals(outbuf.getvalue()[:4], '403 ') + self.assertEqual(errbuf.getvalue(), '') + self.assertEqual(outbuf.getvalue()[:4], '403 ') def test_invalid_method_doesnt_exist(self): errbuf = StringIO() @@ -2737,8 +2841,8 @@ class TestObjectController(unittest.TestCase): 'REQUEST_METHOD': 'method_doesnt_exist', 'PATH_INFO': '/sda1/p/a/c/o'}, start_response) - self.assertEquals(errbuf.getvalue(), '') - self.assertEquals(outbuf.getvalue()[:4], '405 ') + self.assertEqual(errbuf.getvalue(), '') + self.assertEqual(outbuf.getvalue()[:4], '405 ') def test_invalid_method_is_not_public(self): errbuf = StringIO() @@ -2750,8 +2854,8 @@ class TestObjectController(unittest.TestCase): self.object_controller.__call__({'REQUEST_METHOD': '__init__', 'PATH_INFO': '/sda1/p/a/c/o'}, start_response) - self.assertEquals(errbuf.getvalue(), '') - self.assertEquals(outbuf.getvalue()[:4], '405 ') + self.assertEqual(errbuf.getvalue(), '') + self.assertEqual(outbuf.getvalue()[:4], '405 ') def test_chunked_put(self): listener = listen(('localhost', 0)) @@ -2769,7 +2873,7 @@ class TestObjectController(unittest.TestCase): fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 201' - self.assertEquals(headers[:len(exp)], exp) + self.assertEqual(headers[:len(exp)], exp) sock = connect_tcp(('localhost', port)) fd = sock.makefile() fd.write('GET /sda1/p/a/c/o HTTP/1.1\r\nHost: localhost\r\n' @@ -2777,9 +2881,9 @@ class TestObjectController(unittest.TestCase): fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) + self.assertEqual(headers[:len(exp)], exp) response = fd.read() - self.assertEquals(response, 'oh hai') + self.assertEqual(response, 'oh hai') killer.kill() def test_chunked_content_length_mismatch_zero(self): @@ -2799,7 +2903,7 @@ class TestObjectController(unittest.TestCase): fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 201' - self.assertEquals(headers[:len(exp)], exp) + self.assertEqual(headers[:len(exp)], exp) sock = connect_tcp(('localhost', port)) fd = sock.makefile() fd.write('GET /sda1/p/a/c/o HTTP/1.1\r\nHost: localhost\r\n' @@ -2807,9 +2911,9 @@ class TestObjectController(unittest.TestCase): fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) + self.assertEqual(headers[:len(exp)], exp) response = fd.read() - self.assertEquals(response, 'oh hai') + self.assertEqual(response, 'oh hai') killer.kill() def test_max_object_name_length(self): @@ -2823,7 +2927,7 @@ class TestObjectController(unittest.TestCase): 'Content-Type': 'application/octet-stream'}) req.body = 'DATA' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank( '/sda1/p/a/c/' + ('2' * (max_name_len + 1)), environ={'REQUEST_METHOD': 'PUT'}, @@ -2832,7 +2936,7 @@ class TestObjectController(unittest.TestCase): 'Content-Type': 'application/octet-stream'}) req.body = 'DATA' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) def test_max_upload_time(self): @@ -2857,7 +2961,7 @@ class TestObjectController(unittest.TestCase): headers={'X-Timestamp': normalize_timestamp(time()), 'Content-Length': '4', 'Content-Type': 'text/plain'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) self.object_controller.max_upload_time = 0.1 req = Request.blank( '/sda1/p/a/c/o', @@ -2865,7 +2969,7 @@ class TestObjectController(unittest.TestCase): headers={'X-Timestamp': normalize_timestamp(time()), 'Content-Length': '4', 'Content-Type': 'text/plain'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 408) + self.assertEqual(resp.status_int, 408) def test_short_body(self): @@ -2889,7 +2993,7 @@ class TestObjectController(unittest.TestCase): headers={'X-Timestamp': normalize_timestamp(time()), 'Content-Length': '4', 'Content-Type': 'text/plain'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 499) + self.assertEqual(resp.status_int, 499) def test_bad_sinces(self): req = Request.blank( @@ -2898,17 +3002,17 @@ class TestObjectController(unittest.TestCase): 'Content-Length': '4', 'Content-Type': 'text/plain'}, body=' ') resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}, headers={'If-Unmodified-Since': 'Not a valid date'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}, headers={'If-Modified-Since': 'Not a valid date'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) too_big_date_list = list(datetime.datetime.max.timetuple()) too_big_date_list[0] += 1 # bump up the year @@ -2918,7 +3022,7 @@ class TestObjectController(unittest.TestCase): '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}, headers={'If-Unmodified-Since': too_big_date}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) def test_content_encoding(self): req = Request.blank( @@ -2928,16 +3032,16 @@ class TestObjectController(unittest.TestCase): 'Content-Encoding': 'gzip'}, body=' ') resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) - self.assertEquals(resp.headers['content-encoding'], 'gzip') + self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.headers['content-encoding'], 'gzip') req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) - self.assertEquals(resp.headers['content-encoding'], 'gzip') + self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.headers['content-encoding'], 'gzip') def test_async_update_http_connect(self): policy = random.choice(list(POLICIES)) @@ -2958,7 +3062,7 @@ class TestObjectController(unittest.TestCase): policy) finally: object_server.http_connect = orig_http_connect - self.assertEquals( + self.assertEqual( given_args, ['127.0.0.1', '1234', 'sdc1', 1, 'PUT', '/a/c/o', { 'x-timestamp': '1', 'x-out': 'set', @@ -3020,16 +3124,16 @@ class TestObjectController(unittest.TestCase): 'X-Delete-At-Partition': '6237', 'X-Delete-At-Device': 'sdp,sdq'}) - with mock.patch.object(object_server, 'http_connect', - fake_http_connect): + with fake_spawn(), mock.patch.object( + object_server, 'http_connect', fake_http_connect): resp = req.get_response(self.object_controller) self.assertEqual(resp.status_int, 201) http_connect_args.sort(key=operator.itemgetter('ipaddr')) - self.assertEquals(len(http_connect_args), 3) - self.assertEquals( + self.assertEqual(len(http_connect_args), 3) + self.assertEqual( http_connect_args[0], {'ipaddr': '1.2.3.4', 'port': '5', @@ -3043,12 +3147,11 @@ class TestObjectController(unittest.TestCase): 'x-etag': 'd41d8cd98f00b204e9800998ecf8427e', 'x-size': '0', 'x-timestamp': utils.Timestamp('12345').internal, - 'X-Backend-Storage-Policy-Index': '37', 'referer': 'PUT http://localhost/sda1/p/a/c/o', 'user-agent': 'object-server %d' % os.getpid(), 'X-Backend-Storage-Policy-Index': int(policy), 'x-trans-id': '-'})}) - self.assertEquals( + self.assertEqual( http_connect_args[1], {'ipaddr': '10.1.1.1', 'port': '6001', @@ -3067,7 +3170,7 @@ class TestObjectController(unittest.TestCase): # system account storage policy is 0 'X-Backend-Storage-Policy-Index': 0, 'x-trans-id': '-'})}) - self.assertEquals( + self.assertEqual( http_connect_args[2], {'ipaddr': '10.2.2.2', 'port': '6002', @@ -3133,14 +3236,14 @@ class TestObjectController(unittest.TestCase): 'X-Container-Host': '1.2.3.4:5, 6.7.8.9:10', 'X-Container-Device': 'sdb1, sdf1'}) - with mock.patch.object(object_server, 'http_connect', - fake_http_connect): + with fake_spawn(), mock.patch.object( + object_server, 'http_connect', fake_http_connect): req.get_response(self.object_controller) http_connect_args.sort(key=operator.itemgetter('ipaddr')) - self.assertEquals(len(http_connect_args), 2) - self.assertEquals( + self.assertEqual(len(http_connect_args), 2) + self.assertEqual( http_connect_args[0], {'ipaddr': '1.2.3.4', 'port': '5', @@ -3158,7 +3261,7 @@ class TestObjectController(unittest.TestCase): 'referer': 'PUT http://localhost/sda1/p/a/c/o', 'user-agent': 'object-server %d' % os.getpid(), 'x-trans-id': '-'})}) - self.assertEquals( + self.assertEqual( http_connect_args[1], {'ipaddr': '6.7.8.9', 'port': '10', @@ -3210,12 +3313,12 @@ class TestObjectController(unittest.TestCase): headers['X-Object-Sysmeta-Ec-Frag-Index'] = '2' req = Request.blank( '/sda1/p/a/c/o', method='PUT', body='', headers=headers) - with mocked_http_conn( + with fake_spawn(), mocked_http_conn( 500, 500, give_connect=capture_updates) as fake_conn: resp = req.get_response(self.object_controller) self.assertRaises(StopIteration, fake_conn.code_iter.next) self.assertEqual(resp.status_int, 201) - self.assertEquals(2, len(container_updates)) + self.assertEqual(2, len(container_updates)) delete_at_update, container_update = container_updates # delete_at_update ip, port, method, path, headers = delete_at_update @@ -3253,11 +3356,11 @@ class TestObjectController(unittest.TestCase): found_files.append(async_file) data = pickle.load(open(async_file)) if data['account'] == 'a': - self.assertEquals( + self.assertEqual( int(data['headers'] ['X-Backend-Storage-Policy-Index']), int(policy)) elif data['account'] == '.expiring_objects': - self.assertEquals( + self.assertEqual( int(data['headers'] ['X-Backend-Storage-Policy-Index']), 0) else: @@ -3285,7 +3388,7 @@ class TestObjectController(unittest.TestCase): object_server.http_connect = orig_http_connect utils.HASH_PATH_PREFIX = _prefix async_dir = diskfile.get_async_dir(policy) - self.assertEquals( + self.assertEqual( pickle.load(open(os.path.join( self.testdir, 'sda1', async_dir, 'a83', '06fbf0b514e5199dfc4e00f42eb5ea83-%s' % @@ -3326,7 +3429,7 @@ class TestObjectController(unittest.TestCase): 'X-Backend-Storage-Policy-Index': int(policy)}, 'sda1', policy) async_dir = diskfile.get_async_dir(policy) - self.assertEquals( + self.assertEqual( pickle.load(open(os.path.join( self.testdir, 'sda1', async_dir, 'a83', '06fbf0b514e5199dfc4e00f42eb5ea83-%s' % @@ -3429,7 +3532,7 @@ class TestObjectController(unittest.TestCase): 'x-size': '0', 'x-etag': 'd41d8cd98f00b204e9800998ecf8427e', 'x-content-type': 'text/plain', 'x-timestamp': '1'}, 'sda1', policy) - self.assertEquals(given_args, []) + self.assertEqual(given_args, []) def test_container_update_success(self): container_updates = [] @@ -3446,7 +3549,7 @@ class TestObjectController(unittest.TestCase): 'X-Container-Partition': 'cpartition', 'X-Container-Device': 'cdevice', 'Content-Type': 'text/plain'}, body='') - with mocked_http_conn( + with fake_spawn(), mocked_http_conn( 200, give_connect=capture_updates) as fake_conn: resp = req.get_response(self.object_controller) self.assertRaises(StopIteration, fake_conn.code_iter.next) @@ -3487,7 +3590,7 @@ class TestObjectController(unittest.TestCase): } req = Request.blank('/sda1/0/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers=headers, body='') - with mocked_http_conn( + with fake_spawn(), mocked_http_conn( 200, give_connect=capture_updates) as fake_conn: resp = req.get_response(self.object_controller) self.assertRaises(StopIteration, fake_conn.code_iter.next) @@ -3528,7 +3631,7 @@ class TestObjectController(unittest.TestCase): given_args[:] = args diskfile_mgr = self.object_controller._diskfile_router[policy] diskfile_mgr.pickle_async_update = fake_pickle_async_update - with mocked_http_conn(500) as fake_conn: + with fake_spawn(), mocked_http_conn(500) as fake_conn: resp = req.get_response(self.object_controller) self.assertRaises(StopIteration, fake_conn.code_iter.next) self.assertEqual(resp.status_int, 201) @@ -3556,6 +3659,104 @@ class TestObjectController(unittest.TestCase): 'container': 'c', 'op': 'PUT'}) + def test_container_update_as_greenthread(self): + greenthreads = [] + saved_spawn_calls = [] + called_async_update_args = [] + + def local_fake_spawn(func, *a, **kw): + saved_spawn_calls.append((func, a, kw)) + return mock.MagicMock() + + def local_fake_async_update(*a, **kw): + # just capture the args to see that we would have called + called_async_update_args.append([a, kw]) + + req = Request.blank( + '/sda1/p/a/c/o', + environ={'REQUEST_METHOD': 'PUT'}, + headers={'X-Timestamp': '12345', + 'Content-Type': 'application/burrito', + 'Content-Length': '0', + 'X-Backend-Storage-Policy-Index': 0, + 'X-Container-Partition': '20', + 'X-Container-Host': '1.2.3.4:5', + 'X-Container-Device': 'sdb1'}) + with mock.patch.object(object_server, 'spawn', + local_fake_spawn): + with mock.patch.object(self.object_controller, + 'async_update', + local_fake_async_update): + resp = req.get_response(self.object_controller) + # check the response is completed and successful + self.assertEqual(resp.status_int, 201) + # check that async_update hasn't been called + self.assertFalse(len(called_async_update_args)) + # now do the work in greenthreads + for func, a, kw in saved_spawn_calls: + gt = spawn(func, *a, **kw) + greenthreads.append(gt) + # wait for the greenthreads to finish + for gt in greenthreads: + gt.wait() + # check that the calls to async_update have happened + headers_out = {'X-Size': '0', + 'X-Content-Type': 'application/burrito', + 'X-Timestamp': '0000012345.00000', + 'X-Trans-Id': '-', + 'Referer': 'PUT http://localhost/sda1/p/a/c/o', + 'X-Backend-Storage-Policy-Index': '0', + 'X-Etag': 'd41d8cd98f00b204e9800998ecf8427e'} + expected = [('PUT', 'a', 'c', 'o', '1.2.3.4:5', '20', 'sdb1', + headers_out, 'sda1', POLICIES[0]), + {'logger_thread_locals': (None, None)}] + self.assertEqual(called_async_update_args, [expected]) + + def test_container_update_as_greenthread_with_timeout(self): + ''' + give it one container to update (for only one greenthred) + fake the greenthred so it will raise a timeout + test that the right message is logged and the method returns None + ''' + called_async_update_args = [] + + def local_fake_spawn(func, *a, **kw): + m = mock.MagicMock() + + def wait_with_error(): + raise Timeout() + m.wait = wait_with_error # because raise can't be in a lambda + return m + + def local_fake_async_update(*a, **kw): + # just capture the args to see that we would have called + called_async_update_args.append([a, kw]) + + req = Request.blank( + '/sda1/p/a/c/o', + environ={'REQUEST_METHOD': 'PUT'}, + headers={'X-Timestamp': '12345', + 'Content-Type': 'application/burrito', + 'Content-Length': '0', + 'X-Backend-Storage-Policy-Index': 0, + 'X-Container-Partition': '20', + 'X-Container-Host': '1.2.3.4:5', + 'X-Container-Device': 'sdb1'}) + with mock.patch.object(object_server, 'spawn', + local_fake_spawn): + with mock.patch.object(self.object_controller, + 'container_update_timeout', + 1.414213562): + resp = req.get_response(self.object_controller) + # check the response is completed and successful + self.assertEqual(resp.status_int, 201) + # check that the timeout was logged + expected_logged_error = "Container update timeout (1.4142s) " \ + "waiting for [('1.2.3.4:5', 'sdb1')]" + self.assertTrue( + expected_logged_error in + self.object_controller.logger.get_lines_for_level('debug')) + def test_container_update_bad_args(self): policy = random.choice(list(POLICIES)) given_args = [] @@ -3608,7 +3809,7 @@ class TestObjectController(unittest.TestCase): fake_async_update): self.object_controller.delete_at_update( 'DELETE', 2, 'a', 'c', 'o', req, 'sda1', policy) - self.assertEquals( + self.assertEqual( given_args, [ 'DELETE', '.expiring_objects', '0000000000', '0000000002-a/c/o', None, None, None, @@ -3638,7 +3839,7 @@ class TestObjectController(unittest.TestCase): int(policy)}) self.object_controller.delete_at_update( 'DELETE', -2, 'a', 'c', 'o', req, 'sda1', policy) - self.assertEquals(given_args, [ + self.assertEqual(given_args, [ 'DELETE', '.expiring_objects', '0000000000', '0000000000-a/c/o', None, None, None, HeaderKeyDict({ @@ -3674,7 +3875,7 @@ class TestObjectController(unittest.TestCase): 86400, 'a', 'c', 'o') self.assertEqual(expiring_obj_container, expected_exp_cont) - self.assertEquals(given_args, [ + self.assertEqual(given_args, [ 'DELETE', '.expiring_objects', '9999999999-a/c/o', None, None, None, HeaderKeyDict({ @@ -3707,7 +3908,7 @@ class TestObjectController(unittest.TestCase): 'X-Backend-Storage-Policy-Index': int(policy)}) self.object_controller.delete_at_update('PUT', 2, 'a', 'c', 'o', req, 'sda1', policy) - self.assertEquals( + self.assertEqual( given_args, [ 'PUT', '.expiring_objects', '0000000000', '0000000002-a/c/o', '127.0.0.1:1234', @@ -3744,7 +3945,7 @@ class TestObjectController(unittest.TestCase): 'X-Backend-Storage-Policy-Index': int(policy)}) self.object_controller.delete_at_update('PUT', 2, 'a', 'c', 'o', req, 'sda1', policy) - self.assertEquals( + self.assertEqual( self.logger.get_lines_for_level('warning'), ['X-Delete-At-Container header must be specified for expiring ' 'objects background PUT to work properly. Making best guess as ' @@ -3766,7 +3967,7 @@ class TestObjectController(unittest.TestCase): 'X-Backend-Storage-Policy-Index': int(policy)}) self.object_controller.delete_at_update('DELETE', 2, 'a', 'c', 'o', req, 'sda1', policy) - self.assertEquals( + self.assertEqual( given_args, [ 'DELETE', '.expiring_objects', '0000000000', '0000000002-a/c/o', None, None, @@ -3796,7 +3997,7 @@ class TestObjectController(unittest.TestCase): 'X-Backend-Storage-Policy-Index': int(policy)}) self.object_controller.delete_at_update( 'DELETE', -2, 'a', 'c', 'o', req, 'sda1', policy) - self.assertEquals(given_args, []) + self.assertEqual(given_args, []) def test_POST_calls_delete_at(self): policy = random.choice(list(POLICIES)) @@ -3816,8 +4017,8 @@ class TestObjectController(unittest.TestCase): 'X-Object-Sysmeta-Ec-Frag-Index': 2}) req.body = 'TEST' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) - self.assertEquals(given_args, []) + self.assertEqual(resp.status_int, 201) + self.assertEqual(given_args, []) sleep(.00001) req = Request.blank( @@ -3827,8 +4028,8 @@ class TestObjectController(unittest.TestCase): 'Content-Type': 'application/x-test', 'X-Backend-Storage-Policy-Index': int(policy)}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 202) - self.assertEquals(given_args, []) + self.assertEqual(resp.status_int, 202) + self.assertEqual(given_args, []) sleep(.00001) timestamp1 = normalize_timestamp(time()) @@ -3841,8 +4042,8 @@ class TestObjectController(unittest.TestCase): 'X-Delete-At': delete_at_timestamp1, 'X-Backend-Storage-Policy-Index': int(policy)}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 202) - self.assertEquals( + self.assertEqual(resp.status_int, 202) + self.assertEqual( given_args, [ 'PUT', int(delete_at_timestamp1), 'a', 'c', 'o', given_args[5], 'sda1', policy]) @@ -3861,8 +4062,8 @@ class TestObjectController(unittest.TestCase): 'X-Delete-At': delete_at_timestamp2, 'X-Backend-Storage-Policy-Index': int(policy)}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 202) - self.assertEquals( + self.assertEqual(resp.status_int, 202) + self.assertEqual( given_args, [ 'PUT', int(delete_at_timestamp2), 'a', 'c', 'o', given_args[5], 'sda1', policy, @@ -3887,8 +4088,8 @@ class TestObjectController(unittest.TestCase): 'X-Object-Sysmeta-Ec-Frag-Index': 4}) req.body = 'TEST' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) - self.assertEquals(given_args, []) + self.assertEqual(resp.status_int, 201) + self.assertEqual(given_args, []) sleep(.00001) timestamp1 = normalize_timestamp(time()) @@ -3903,8 +4104,8 @@ class TestObjectController(unittest.TestCase): 'X-Object-Sysmeta-Ec-Frag-Index': 3}) req.body = 'TEST' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) - self.assertEquals( + self.assertEqual(resp.status_int, 201) + self.assertEqual( given_args, [ 'PUT', int(delete_at_timestamp1), 'a', 'c', 'o', given_args[5], 'sda1', policy]) @@ -3926,8 +4127,8 @@ class TestObjectController(unittest.TestCase): 'X-Object-Sysmeta-Ec-Frag-Index': 3}) req.body = 'TEST' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) - self.assertEquals( + self.assertEqual(resp.status_int, 201) + self.assertEqual( given_args, [ 'PUT', int(delete_at_timestamp2), 'a', 'c', 'o', given_args[5], 'sda1', policy, @@ -3950,13 +4151,13 @@ class TestObjectController(unittest.TestCase): 'Content-Type': 'application/octet-stream'}) req.body = 'TEST' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}, headers={'X-Timestamp': normalize_timestamp(test_time)}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) orig_time = object_server.time.time try: @@ -3978,13 +4179,13 @@ class TestObjectController(unittest.TestCase): 'Content-Type': 'application/octet-stream'}) req.body = 'TEST' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}, headers={'X-Timestamp': normalize_timestamp(test_time)}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) finally: object_server.time.time = orig_time @@ -3997,9 +4198,9 @@ class TestObjectController(unittest.TestCase): environ={'REQUEST_METHOD': 'GET'}, headers={'X-Timestamp': normalize_timestamp(t)}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 404) - self.assertEquals(resp.headers['X-Backend-Timestamp'], - utils.Timestamp(put_timestamp)) + self.assertEqual(resp.status_int, 404) + self.assertEqual(resp.headers['X-Backend-Timestamp'], + utils.Timestamp(put_timestamp)) finally: object_server.time.time = orig_time @@ -4019,14 +4220,14 @@ class TestObjectController(unittest.TestCase): 'Content-Type': 'application/octet-stream'}) req.body = 'TEST' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}, headers={'X-Timestamp': normalize_timestamp(test_time)}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) orig_time = object_server.time.time try: @@ -4048,13 +4249,13 @@ class TestObjectController(unittest.TestCase): 'Content-Type': 'application/octet-stream'}) req.body = 'TEST' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}, headers={'X-Timestamp': normalize_timestamp(test_time)}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) finally: object_server.time.time = orig_time @@ -4067,9 +4268,9 @@ class TestObjectController(unittest.TestCase): environ={'REQUEST_METHOD': 'HEAD'}, headers={'X-Timestamp': normalize_timestamp(time())}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 404) - self.assertEquals(resp.headers['X-Backend-Timestamp'], - utils.Timestamp(put_timestamp)) + self.assertEqual(resp.status_int, 404) + self.assertEqual(resp.headers['X-Backend-Timestamp'], + utils.Timestamp(put_timestamp)) finally: object_server.time.time = orig_time @@ -4089,14 +4290,14 @@ class TestObjectController(unittest.TestCase): 'Content-Type': 'application/octet-stream'}) req.body = 'TEST' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': normalize_timestamp(test_time - 1500)}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 202) + self.assertEqual(resp.status_int, 202) delete_at_timestamp = int(time() + 1) delete_at_container = str( @@ -4112,7 +4313,7 @@ class TestObjectController(unittest.TestCase): 'Content-Type': 'application/octet-stream'}) req.body = 'TEST' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) orig_time = object_server.time.time try: @@ -4123,7 +4324,7 @@ class TestObjectController(unittest.TestCase): environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': normalize_timestamp(time())}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) finally: object_server.time.time = orig_time @@ -4143,7 +4344,7 @@ class TestObjectController(unittest.TestCase): 'Content-Type': 'application/octet-stream'}) req.body = 'TEST' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) orig_time = object_server.time.time try: @@ -4154,7 +4355,7 @@ class TestObjectController(unittest.TestCase): environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': normalize_timestamp(time())}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) finally: object_server.time.time = orig_time @@ -4176,21 +4377,21 @@ class TestObjectController(unittest.TestCase): 'Content-Type': 'application/octet-stream'}) req.body = 'TEST' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) # sanity req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'}, headers={'X-Timestamp': test_timestamp}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) - self.assertEquals(resp.body, 'TEST') + self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.body, 'TEST') objfile = os.path.join( self.testdir, 'sda1', storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p', hash_path('a', 'c', 'o')), utils.Timestamp(test_timestamp).internal + '.data') - self.assert_(os.path.isfile(objfile)) + self.assertTrue(os.path.isfile(objfile)) # move time past expirery with mock.patch('swift.obj.diskfile.time') as mock_time: @@ -4200,9 +4401,9 @@ class TestObjectController(unittest.TestCase): headers={'X-Timestamp': test_timestamp}) resp = req.get_response(self.object_controller) # request will 404 - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) # but file still exists - self.assert_(os.path.isfile(objfile)) + self.assertTrue(os.path.isfile(objfile)) # make the x-if-delete-at with some wrong bits req = Request.blank( @@ -4211,7 +4412,7 @@ class TestObjectController(unittest.TestCase): headers={'X-Timestamp': delete_at_timestamp, 'X-If-Delete-At': int(time() + 1)}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 412) + self.assertEqual(resp.status_int, 412) self.assertTrue(os.path.isfile(objfile)) # make the x-if-delete-at with all the right bits @@ -4221,7 +4422,7 @@ class TestObjectController(unittest.TestCase): headers={'X-Timestamp': delete_at_timestamp, 'X-If-Delete-At': delete_at_timestamp}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) self.assertFalse(os.path.isfile(objfile)) # make the x-if-delete-at with all the right bits (again) @@ -4231,7 +4432,7 @@ class TestObjectController(unittest.TestCase): headers={'X-Timestamp': delete_at_timestamp, 'X-If-Delete-At': delete_at_timestamp}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 412) + self.assertEqual(resp.status_int, 412) self.assertFalse(os.path.isfile(objfile)) # make the x-if-delete-at for some not found @@ -4241,7 +4442,7 @@ class TestObjectController(unittest.TestCase): headers={'X-Timestamp': delete_at_timestamp, 'X-If-Delete-At': delete_at_timestamp}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) def test_DELETE_if_delete_at(self): test_time = time() + 10000 @@ -4252,14 +4453,14 @@ class TestObjectController(unittest.TestCase): 'Content-Type': 'application/octet-stream'}) req.body = 'TEST' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': normalize_timestamp(test_time - 98)}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) delete_at_timestamp = int(test_time - 1) delete_at_container = str( @@ -4275,7 +4476,7 @@ class TestObjectController(unittest.TestCase): 'Content-Type': 'application/octet-stream'}) req.body = 'TEST' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank( '/sda1/p/a/c/o', @@ -4283,14 +4484,14 @@ class TestObjectController(unittest.TestCase): headers={'X-Timestamp': normalize_timestamp(test_time - 95), 'X-If-Delete-At': str(int(test_time))}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 412) + self.assertEqual(resp.status_int, 412) req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': normalize_timestamp(test_time - 95)}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) delete_at_timestamp = int(test_time - 1) delete_at_container = str( @@ -4306,28 +4507,28 @@ class TestObjectController(unittest.TestCase): 'Content-Type': 'application/octet-stream'}) req.body = 'TEST' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': normalize_timestamp(test_time - 92), 'X-If-Delete-At': str(int(test_time))}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 412) + self.assertEqual(resp.status_int, 412) req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': normalize_timestamp(test_time - 92), 'X-If-Delete-At': delete_at_timestamp}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': normalize_timestamp(test_time - 92), 'X-If-Delete-At': 'abc'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) def test_DELETE_calls_delete_at(self): given_args = [] @@ -4351,8 +4552,8 @@ class TestObjectController(unittest.TestCase): 'X-Delete-At-Container': delete_at_container1}) req.body = 'TEST' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) - self.assertEquals(given_args, [ + self.assertEqual(resp.status_int, 201) + self.assertEqual(given_args, [ 'PUT', int(delete_at_timestamp1), 'a', 'c', 'o', given_args[5], 'sda1', POLICIES[0]]) @@ -4367,8 +4568,8 @@ class TestObjectController(unittest.TestCase): headers={'X-Timestamp': timestamp2, 'Content-Type': 'application/octet-stream'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 204) - self.assertEquals(given_args, [ + self.assertEqual(resp.status_int, 204) + self.assertEqual(given_args, [ 'DELETE', int(delete_at_timestamp1), 'a', 'c', 'o', given_args[5], 'sda1', POLICIES[0]]) @@ -4381,7 +4582,7 @@ class TestObjectController(unittest.TestCase): 'Content-Type': 'application/octet-stream'}) req.body = 'TEST' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) self.assertTrue('X-Delete-At in past' in resp.body) def test_POST_delete_at_in_past(self): @@ -4393,7 +4594,7 @@ class TestObjectController(unittest.TestCase): 'Content-Type': 'application/octet-stream'}) req.body = 'TEST' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) req = Request.blank( '/sda1/p/a/c/o', @@ -4401,7 +4602,7 @@ class TestObjectController(unittest.TestCase): headers={'X-Timestamp': normalize_timestamp(time() + 1), 'X-Delete-At': str(int(time() - 1))}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) self.assertTrue('X-Delete-At in past' in resp.body) def test_REPLICATE_works(self): @@ -4421,9 +4622,9 @@ class TestObjectController(unittest.TestCase): environ={'REQUEST_METHOD': 'REPLICATE'}, headers={}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) p_data = pickle.loads(resp.body) - self.assertEquals(p_data, {1: 2}) + self.assertEqual(p_data, {1: 2}) finally: tpool.execute = was_tpool_exe diskfile.DiskFileManager._get_hashes = was_get_hashes @@ -4502,7 +4703,7 @@ class TestObjectController(unittest.TestCase): 'Content-Type': 'application/octet-stream', 'Expect': '100-continue'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 507) + self.assertEqual(resp.status_int, 507) self.assertFalse(body_reader.read_called) finally: diskfile.fallocate = orig_fallocate @@ -4544,7 +4745,7 @@ class TestObjectController(unittest.TestCase): def test_serv_reserv(self): # Test replication_server flag was set from configuration file. conf = {'devices': self.testdir, 'mount_check': 'false'} - self.assertEquals( + self.assertEqual( object_server.ObjectController(conf).replication_server, None) for val in [True, '1', 'True', 'true']: conf['replication_server'] = val @@ -4564,12 +4765,12 @@ class TestObjectController(unittest.TestCase): self.assertFalse(hasattr(method, 'replication')) for method_name in repl_methods: method = getattr(self.object_controller, method_name) - self.assertEquals(method.replication, True) + self.assertEqual(method.replication, True) def test_correct_allowed_method(self): # Test correct work for allowed method using # swift.obj.server.ObjectController.__call__ - inbuf = WsgiStringIO() + inbuf = WsgiBytesIO() errbuf = StringIO() outbuf = StringIO() self.object_controller = object_server.app_factory( @@ -4607,7 +4808,7 @@ class TestObjectController(unittest.TestCase): def test_not_allowed_method(self): # Test correct work for NOT allowed method using # swift.obj.server.ObjectController.__call__ - inbuf = WsgiStringIO() + inbuf = WsgiBytesIO() errbuf = StringIO() outbuf = StringIO() self.object_controller = object_server.ObjectController( @@ -4686,11 +4887,11 @@ class TestObjectController(unittest.TestCase): 'wsgi.multiprocess': False, 'wsgi.run_once': False} self.object_controller(env, start_response) - self.assertEquals(errbuf.getvalue(), '') - self.assertEquals(outbuf.getvalue()[:4], '405 ') + self.assertEqual(errbuf.getvalue(), '') + self.assertEqual(outbuf.getvalue()[:4], '405 ') def test_not_utf8_and_not_logging_requests(self): - inbuf = WsgiStringIO() + inbuf = WsgiBytesIO() errbuf = StringIO() outbuf = StringIO() self.object_controller = object_server.ObjectController( @@ -4728,7 +4929,7 @@ class TestObjectController(unittest.TestCase): self.assertEqual(self.logger.get_lines_for_level('info'), []) def test__call__returns_500(self): - inbuf = WsgiStringIO() + inbuf = WsgiBytesIO() errbuf = StringIO() outbuf = StringIO() self.logger = debug_logger('test') @@ -4774,7 +4975,7 @@ class TestObjectController(unittest.TestCase): self.assertEqual(self.logger.get_lines_for_level('info'), []) def test_PUT_slow(self): - inbuf = WsgiStringIO() + inbuf = WsgiBytesIO() errbuf = StringIO() outbuf = StringIO() self.object_controller = object_server.ObjectController( @@ -4854,7 +5055,7 @@ class TestObjectController(unittest.TestCase): object_dir = self.testdir + "/sda1/objects-1" self.assertFalse(os.path.isdir(object_dir)) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) self.assertTrue(os.path.isdir(object_dir)) # make sure no idx in header uses policy 0 data_dir @@ -4871,7 +5072,7 @@ class TestObjectController(unittest.TestCase): with mock.patch.object(POLICIES, 'get_by_index', lambda _: True): resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) self.assertTrue(os.path.isdir(object_dir)) def test_storage_policy_index_is_validated(self): @@ -4913,7 +5114,7 @@ class TestObjectController(unittest.TestCase): req.body = 'VERIFY' object_dir = self.testdir + "/sda1/objects-%s" % index resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 503) + self.assertEqual(resp.status_int, 503) self.assertFalse(os.path.isdir(object_dir)) def test_race_doesnt_quarantine(self): @@ -4942,7 +5143,7 @@ class TestObjectController(unittest.TestCase): 'Content-Type': 'application/octet-stream'}) req.body = 'some data' resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) return listing with mock.patch('os.listdir', mock_listdir): @@ -4950,7 +5151,7 @@ class TestObjectController(unittest.TestCase): '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': delete_timestamp}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) qdir = os.path.join(self.testdir, 'sda1', 'quarantined') self.assertFalse(os.path.exists(qdir)) @@ -4958,8 +5159,8 @@ class TestObjectController(unittest.TestCase): req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.object_controller) - self.assertEquals(resp.status_int, 200) - self.assertEquals(resp.headers['X-Timestamp'], put_timestamp) + self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.headers['X-Timestamp'], put_timestamp) @patch_policies(test_policies) @@ -5286,16 +5487,16 @@ class TestObjectServer(unittest.TestCase): def __exit__(self, typ, value, tb): in_a_timeout[0] = False - class PickyWsgiStringIO(WsgiStringIO): + class PickyWsgiBytesIO(WsgiBytesIO): def read(self, *a, **kw): if not in_a_timeout[0]: raise NotInATimeout() - return WsgiStringIO.read(self, *a, **kw) + return WsgiBytesIO.read(self, *a, **kw) def readline(self, *a, **kw): if not in_a_timeout[0]: raise NotInATimeout() - return WsgiStringIO.readline(self, *a, **kw) + return WsgiBytesIO.readline(self, *a, **kw) test_data = 'obj data' footer_meta = { @@ -5326,6 +5527,8 @@ class TestObjectServer(unittest.TestCase): "potato potato potato potato potato potato potato", "--boundary123--" )) + if six.PY3: + test_doc = test_doc.encode('utf-8') # phase1 - PUT request with object metadata in footer and # multiphase commit conversation @@ -5340,7 +5543,7 @@ class TestObjectServer(unittest.TestCase): 'X-Backend-Obj-Metadata-Footer': 'yes', 'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary123', } - wsgi_input = PickyWsgiStringIO(test_doc) + wsgi_input = PickyWsgiBytesIO(test_doc) req = Request.blank( "/sda1/0/a/c/o", environ={'REQUEST_METHOD': 'PUT', 'wsgi.input': wsgi_input}, diff --git a/test/unit/obj/test_ssync_receiver.py b/test/unit/obj/test_ssync_receiver.py index a6eddf0bf3..38654ffd61 100644 --- a/test/unit/obj/test_ssync_receiver.py +++ b/test/unit/obj/test_ssync_receiver.py @@ -16,12 +16,12 @@ import contextlib import os import shutil -import StringIO import tempfile import unittest import eventlet import mock +import six from swift.common import bufferedhttp from swift.common import exceptions @@ -31,7 +31,7 @@ from swift.common import utils from swift.common.swob import HTTPException from swift.obj import diskfile from swift.obj import server -from swift.obj import ssync_receiver +from swift.obj import ssync_receiver, ssync_sender from swift.obj.reconstructor import ObjectReconstructor from test import unit @@ -408,10 +408,10 @@ class TestReceiver(unittest.TestCase): def test_SSYNC_Exception(self): - class _Wrapper(StringIO.StringIO): + class _Wrapper(six.StringIO): def __init__(self, value): - StringIO.StringIO.__init__(self, value) + six.StringIO.__init__(self, value) self.mock_socket = mock.MagicMock() def get_socket(self): @@ -443,10 +443,10 @@ class TestReceiver(unittest.TestCase): def test_SSYNC_Exception_Exception(self): - class _Wrapper(StringIO.StringIO): + class _Wrapper(six.StringIO): def __init__(self, value): - StringIO.StringIO.__init__(self, value) + six.StringIO.__init__(self, value) self.mock_socket = mock.MagicMock() def get_socket(self): @@ -479,14 +479,14 @@ class TestReceiver(unittest.TestCase): def test_MISSING_CHECK_timeout(self): - class _Wrapper(StringIO.StringIO): + class _Wrapper(six.StringIO): def __init__(self, value): - StringIO.StringIO.__init__(self, value) + six.StringIO.__init__(self, value) self.mock_socket = mock.MagicMock() def readline(self, sizehint=-1): - line = StringIO.StringIO.readline(self) + line = six.StringIO.readline(self) if line.startswith('hash'): eventlet.sleep(0.1) return line @@ -521,14 +521,14 @@ class TestReceiver(unittest.TestCase): def test_MISSING_CHECK_other_exception(self): - class _Wrapper(StringIO.StringIO): + class _Wrapper(six.StringIO): def __init__(self, value): - StringIO.StringIO.__init__(self, value) + six.StringIO.__init__(self, value) self.mock_socket = mock.MagicMock() def readline(self, sizehint=-1): - line = StringIO.StringIO.readline(self) + line = six.StringIO.readline(self) if line.startswith('hash'): raise Exception('test exception') return line @@ -766,14 +766,14 @@ class TestReceiver(unittest.TestCase): def test_UPDATES_timeout(self): - class _Wrapper(StringIO.StringIO): + class _Wrapper(six.StringIO): def __init__(self, value): - StringIO.StringIO.__init__(self, value) + six.StringIO.__init__(self, value) self.mock_socket = mock.MagicMock() def readline(self, sizehint=-1): - line = StringIO.StringIO.readline(self) + line = six.StringIO.readline(self) if line.startswith('DELETE'): eventlet.sleep(0.1) return line @@ -813,14 +813,14 @@ class TestReceiver(unittest.TestCase): def test_UPDATES_other_exception(self): - class _Wrapper(StringIO.StringIO): + class _Wrapper(six.StringIO): def __init__(self, value): - StringIO.StringIO.__init__(self, value) + six.StringIO.__init__(self, value) self.mock_socket = mock.MagicMock() def readline(self, sizehint=-1): - line = StringIO.StringIO.readline(self) + line = six.StringIO.readline(self) if line.startswith('DELETE'): raise Exception('test exception') return line @@ -859,10 +859,10 @@ class TestReceiver(unittest.TestCase): def test_UPDATES_no_problems_no_hard_disconnect(self): - class _Wrapper(StringIO.StringIO): + class _Wrapper(six.StringIO): def __init__(self, value): - StringIO.StringIO.__init__(self, value) + six.StringIO.__init__(self, value) self.mock_socket = mock.MagicMock() def get_socket(self): @@ -1190,6 +1190,7 @@ class TestReceiver(unittest.TestCase): ':UPDATES: START\r\n' 'PUT /a/c/o\r\n' 'Content-Length: 1\r\n' + 'Etag: c4ca4238a0b923820dcc509a6f75849b\r\n' 'X-Timestamp: 1364456113.12344\r\n' 'X-Object-Meta-Test1: one\r\n' 'Content-Encoding: gzip\r\n' @@ -1204,11 +1205,12 @@ class TestReceiver(unittest.TestCase): self.assertEqual(resp.status_int, 200) self.assertFalse(self.controller.logger.exception.called) self.assertFalse(self.controller.logger.error.called) - self.assertEquals(len(_PUT_request), 1) # sanity + self.assertEqual(len(_PUT_request), 1) # sanity req = _PUT_request[0] self.assertEqual(req.path, '/device/partition/a/c/o') self.assertEqual(req.content_length, 1) self.assertEqual(req.headers, { + 'Etag': 'c4ca4238a0b923820dcc509a6f75849b', 'Content-Length': '1', 'X-Timestamp': '1364456113.12344', 'X-Object-Meta-Test1': 'one', @@ -1220,7 +1222,68 @@ class TestReceiver(unittest.TestCase): 'X-Backend-Replication-Headers': ( 'content-length x-timestamp x-object-meta-test1 ' 'content-encoding specialty-header')}) - self.assertEqual(req.read_body, '1') + + def test_UPDATES_PUT_replication_headers(self): + self.controller.logger = mock.MagicMock() + + # sanity check - regular PUT will not persist Specialty-Header + req = swob.Request.blank( + '/sda1/0/a/c/o1', body='1', + environ={'REQUEST_METHOD': 'PUT'}, + headers={'Content-Length': '1', + 'Content-Type': 'text/plain', + 'Etag': 'c4ca4238a0b923820dcc509a6f75849b', + 'X-Timestamp': '1364456113.12344', + 'X-Object-Meta-Test1': 'one', + 'Content-Encoding': 'gzip', + 'Specialty-Header': 'value'}) + resp = req.get_response(self.controller) + self.assertEqual(resp.status_int, 201) + df = self.controller.get_diskfile( + 'sda1', '0', 'a', 'c', 'o1', POLICIES.default) + df.open() + self.assertFalse('Specialty-Header' in df.get_metadata()) + + # an SSYNC request can override PUT header filtering... + req = swob.Request.blank( + '/sda1/0', + environ={'REQUEST_METHOD': 'SSYNC'}, + body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n' + ':UPDATES: START\r\n' + 'PUT /a/c/o2\r\n' + 'Content-Length: 1\r\n' + 'Content-Type: text/plain\r\n' + 'Etag: c4ca4238a0b923820dcc509a6f75849b\r\n' + 'X-Timestamp: 1364456113.12344\r\n' + 'X-Object-Meta-Test1: one\r\n' + 'Content-Encoding: gzip\r\n' + 'Specialty-Header: value\r\n' + '\r\n' + '1') + resp = req.get_response(self.controller) + self.assertEqual( + self.body_lines(resp.body), + [':MISSING_CHECK: START', ':MISSING_CHECK: END', + ':UPDATES: START', ':UPDATES: END']) + self.assertEqual(resp.status_int, 200) + + # verify diskfile has metadata permitted by replication headers + # including Specialty-Header + df = self.controller.get_diskfile( + 'sda1', '0', 'a', 'c', 'o2', POLICIES.default) + df.open() + for chunk in df.reader(): + self.assertEqual('1', chunk) + expected = {'ETag': 'c4ca4238a0b923820dcc509a6f75849b', + 'Content-Length': '1', + 'Content-Type': 'text/plain', + 'X-Timestamp': '1364456113.12344', + 'X-Object-Meta-Test1': 'one', + 'Content-Encoding': 'gzip', + 'Specialty-Header': 'value', + 'name': '/a/c/o2'} + actual = df.get_metadata() + self.assertEqual(expected, actual) def test_UPDATES_with_storage_policy(self): # update router post policy patch @@ -1258,7 +1321,7 @@ class TestReceiver(unittest.TestCase): self.assertEqual(resp.status_int, 200) self.assertFalse(self.controller.logger.exception.called) self.assertFalse(self.controller.logger.error.called) - self.assertEquals(len(_PUT_request), 1) # sanity + self.assertEqual(len(_PUT_request), 1) # sanity req = _PUT_request[0] self.assertEqual(req.path, '/device/partition/a/c/o') self.assertEqual(req.content_length, 1) @@ -1315,7 +1378,7 @@ class TestReceiver(unittest.TestCase): self.assertEqual(resp.status_int, 200) self.assertFalse(self.controller.logger.exception.called) self.assertFalse(self.controller.logger.error.called) - self.assertEquals(len(_PUT_request), 1) # sanity + self.assertEqual(len(_PUT_request), 1) # sanity req = _PUT_request[0] self.assertEqual(req.path, '/device/partition/a/c/o') self.assertEqual(req.content_length, 1) @@ -1360,7 +1423,7 @@ class TestReceiver(unittest.TestCase): self.assertEqual(resp.status_int, 200) self.assertFalse(self.controller.logger.exception.called) self.assertFalse(self.controller.logger.error.called) - self.assertEquals(len(_DELETE_request), 1) # sanity + self.assertEqual(len(_DELETE_request), 1) # sanity req = _DELETE_request[0] self.assertEqual(req.path, '/device/partition/a/c/o') self.assertEqual(req.headers, { @@ -1396,7 +1459,7 @@ class TestReceiver(unittest.TestCase): self.assertEqual(resp.status_int, 200) self.controller.logger.exception.assert_called_once_with( 'None/device/partition EXCEPTION in replication.Receiver') - self.assertEquals(len(_BONK_request), 1) # sanity + self.assertEqual(len(_BONK_request), 1) # sanity self.assertEqual(_BONK_request[0], None) def test_UPDATES_multiple(self): @@ -1457,7 +1520,7 @@ class TestReceiver(unittest.TestCase): self.assertEqual(resp.status_int, 200) self.assertFalse(self.controller.logger.exception.called) self.assertFalse(self.controller.logger.error.called) - self.assertEquals(len(_requests), 6) # sanity + self.assertEqual(len(_requests), 6) # sanity req = _requests.pop(0) self.assertEqual(req.method, 'PUT') self.assertEqual(req.path, '/device/partition/a/c/o1') @@ -1547,13 +1610,13 @@ class TestReceiver(unittest.TestCase): request.read_body = request.environ['wsgi.input'].read(2) return swob.HTTPInternalServerError() - class _IgnoreReadlineHint(StringIO.StringIO): + class _IgnoreReadlineHint(six.StringIO): def __init__(self, value): - StringIO.StringIO.__init__(self, value) + six.StringIO.__init__(self, value) def readline(self, hint=-1): - return StringIO.StringIO.readline(self) + return six.StringIO.readline(self) self.controller.PUT = _PUT self.controller.network_chunk_size = 2 @@ -1582,7 +1645,7 @@ class TestReceiver(unittest.TestCase): self.assertEqual(resp.status_int, 200) self.assertFalse(self.controller.logger.exception.called) self.assertFalse(self.controller.logger.error.called) - self.assertEquals(len(_requests), 2) # sanity + self.assertEqual(len(_requests), 2) # sanity req = _requests.pop(0) self.assertEqual(req.path, '/device/partition/a/c/o1') self.assertEqual(req.content_length, 3) @@ -1642,6 +1705,35 @@ class TestSsyncRxServer(unittest.TestCase): def tearDown(self): shutil.rmtree(self.tmpdir) + def test_SSYNC_disconnect(self): + node = { + 'replication_ip': '127.0.0.1', + 'replication_port': self.rx_port, + 'device': 'sdb1', + } + job = { + 'partition': 0, + 'policy': POLICIES[0], + 'device': 'sdb1', + } + sender = ssync_sender.Sender(self.daemon, node, job, ['abc']) + + # kick off the sender and let the error trigger failure + with mock.patch('swift.obj.ssync_receiver.Receiver.initialize_request')\ + as mock_initialize_request: + mock_initialize_request.side_effect = \ + swob.HTTPInternalServerError() + success, _ = sender() + self.assertFalse(success) + stderr = six.StringIO() + with mock.patch('sys.stderr', stderr): + # let gc and eventlet spin a bit + del sender + for i in range(3): + eventlet.sleep(0) + self.assertNotIn('ValueError: invalid literal for int() with base 16', + stderr.getvalue()) + def test_SSYNC_device_not_available(self): with mock.patch('swift.obj.ssync_receiver.Receiver.missing_check')\ as mock_missing_check: diff --git a/test/unit/obj/test_ssync_sender.py b/test/unit/obj/test_ssync_sender.py index 72817b0aa2..211ab39c46 100644 --- a/test/unit/obj/test_ssync_sender.py +++ b/test/unit/obj/test_ssync_sender.py @@ -16,7 +16,6 @@ import hashlib import os import shutil -import StringIO import tempfile import time import unittest @@ -24,6 +23,7 @@ import unittest import eventlet import itertools import mock +import six from swift.common import exceptions, utils from swift.common.storage_policy import POLICIES @@ -70,6 +70,9 @@ class NullBufferedHTTPConnection(object): def getresponse(*args, **kwargs): pass + def close(*args, **kwargs): + pass + class FakeResponse(object): @@ -77,7 +80,7 @@ class FakeResponse(object): self.status = 200 self.close_called = False if chunk_body: - self.fp = StringIO.StringIO( + self.fp = six.StringIO( '%x\r\n%s\r\n0\r\n\r\n' % (len(chunk_body), chunk_body)) def read(self, *args, **kwargs): @@ -159,7 +162,7 @@ class TestSender(BaseTestSender): self.sender.suffixes = ['abc'] success, candidates = self.sender() self.assertFalse(success) - self.assertEquals(candidates, {}) + self.assertEqual(candidates, {}) error_lines = self.daemon.logger.get_lines_for_level('error') self.assertEqual(1, len(error_lines)) self.assertEqual('1.2.3.4:5678/sda1/9 1 second: test connect', @@ -178,7 +181,7 @@ class TestSender(BaseTestSender): self.sender.suffixes = ['abc'] success, candidates = self.sender() self.assertFalse(success) - self.assertEquals(candidates, {}) + self.assertEqual(candidates, {}) error_lines = self.daemon.logger.get_lines_for_level('error') self.assertEqual(1, len(error_lines)) self.assertEqual('1.2.3.4:5678/sda1/9 test connect', @@ -193,7 +196,7 @@ class TestSender(BaseTestSender): self.sender.connect = 'cause exception' success, candidates = self.sender() self.assertFalse(success) - self.assertEquals(candidates, {}) + self.assertEqual(candidates, {}) error_lines = self.daemon.logger.get_lines_for_level('error') for line in error_lines: self.assertTrue(line.startswith( @@ -206,7 +209,7 @@ class TestSender(BaseTestSender): self.sender.connect = 'cause exception' success, candidates = self.sender() self.assertFalse(success) - self.assertEquals(candidates, {}) + self.assertEqual(candidates, {}) error_lines = self.daemon.logger.get_lines_for_level('error') for line in error_lines: self.assertTrue(line.startswith( @@ -220,7 +223,7 @@ class TestSender(BaseTestSender): self.sender.disconnect = mock.MagicMock() success, candidates = self.sender() self.assertTrue(success) - self.assertEquals(candidates, {}) + self.assertEqual(candidates, {}) self.sender.connect.assert_called_once_with() self.sender.missing_check.assert_called_once_with() self.sender.updates.assert_called_once_with() @@ -235,7 +238,7 @@ class TestSender(BaseTestSender): self.sender.failures = 1 success, candidates = self.sender() self.assertFalse(success) - self.assertEquals(candidates, {}) + self.assertEqual(candidates, {}) self.sender.connect.assert_called_once_with() self.sender.missing_check.assert_called_once_with() self.sender.updates.assert_called_once_with() @@ -270,10 +273,10 @@ class TestSender(BaseTestSender): } for method_name, expected_calls in expectations.items(): mock_method = getattr(mock_conn, method_name) - self.assertEquals(expected_calls, mock_method.mock_calls, - 'connection method "%s" got %r not %r' % ( - method_name, mock_method.mock_calls, - expected_calls)) + self.assertEqual(expected_calls, mock_method.mock_calls, + 'connection method "%s" got %r not %r' % ( + method_name, mock_method.mock_calls, + expected_calls)) def test_connect_handoff(self): node = dict(replication_ip='1.2.3.4', replication_port=5678, @@ -304,10 +307,10 @@ class TestSender(BaseTestSender): } for method_name, expected_calls in expectations.items(): mock_method = getattr(mock_conn, method_name) - self.assertEquals(expected_calls, mock_method.mock_calls, - 'connection method "%s" got %r not %r' % ( - method_name, mock_method.mock_calls, - expected_calls)) + self.assertEqual(expected_calls, mock_method.mock_calls, + 'connection method "%s" got %r not %r' % ( + method_name, mock_method.mock_calls, + expected_calls)) def test_connect_handoff_replicated(self): node = dict(replication_ip='1.2.3.4', replication_port=5678, @@ -339,10 +342,10 @@ class TestSender(BaseTestSender): } for method_name, expected_calls in expectations.items(): mock_method = getattr(mock_conn, method_name) - self.assertEquals(expected_calls, mock_method.mock_calls, - 'connection method "%s" got %r not %r' % ( - method_name, mock_method.mock_calls, - expected_calls)) + self.assertEqual(expected_calls, mock_method.mock_calls, + 'connection method "%s" got %r not %r' % ( + method_name, mock_method.mock_calls, + expected_calls)) def test_call(self): def patch_sender(sender): @@ -535,7 +538,7 @@ class TestSender(BaseTestSender): 'putrequest', putrequest): success, candidates = self.sender() self.assertFalse(success) - self.assertEquals(candidates, {}) + self.assertEqual(candidates, {}) error_lines = self.daemon.logger.get_lines_for_level('error') for line in error_lines: self.assertTrue(line.startswith( @@ -559,7 +562,7 @@ class TestSender(BaseTestSender): FakeBufferedHTTPConnection): success, candidates = self.sender() self.assertFalse(success) - self.assertEquals(candidates, {}) + self.assertEqual(candidates, {}) error_lines = self.daemon.logger.get_lines_for_level('error') for line in error_lines: self.assertTrue(line.startswith( @@ -586,7 +589,7 @@ class TestSender(BaseTestSender): self.daemon, node, job, ['abc']) success, candidates = self.sender() self.assertFalse(success) - self.assertEquals(candidates, {}) + self.assertEqual(candidates, {}) error_lines = self.daemon.logger.get_lines_for_level('error') for line in error_lines: self.assertTrue(line.startswith( @@ -607,39 +610,39 @@ class TestSender(BaseTestSender): def test_readline_at_start_of_chunk(self): self.sender.response = FakeResponse() - self.sender.response.fp = StringIO.StringIO('2\r\nx\n\r\n') + self.sender.response.fp = six.StringIO('2\r\nx\n\r\n') self.assertEqual(self.sender.readline(), 'x\n') def test_readline_chunk_with_extension(self): self.sender.response = FakeResponse() - self.sender.response.fp = StringIO.StringIO( + self.sender.response.fp = six.StringIO( '2 ; chunk=extension\r\nx\n\r\n') self.assertEqual(self.sender.readline(), 'x\n') def test_readline_broken_chunk(self): self.sender.response = FakeResponse() - self.sender.response.fp = StringIO.StringIO('q\r\nx\n\r\n') + self.sender.response.fp = six.StringIO('q\r\nx\n\r\n') self.assertRaises( exceptions.ReplicationException, self.sender.readline) self.assertTrue(self.sender.response.close_called) def test_readline_terminated_chunk(self): self.sender.response = FakeResponse() - self.sender.response.fp = StringIO.StringIO('b\r\nnot enough') + self.sender.response.fp = six.StringIO('b\r\nnot enough') self.assertRaises( exceptions.ReplicationException, self.sender.readline) self.assertTrue(self.sender.response.close_called) def test_readline_all(self): self.sender.response = FakeResponse() - self.sender.response.fp = StringIO.StringIO('2\r\nx\n\r\n0\r\n\r\n') + self.sender.response.fp = six.StringIO('2\r\nx\n\r\n0\r\n\r\n') self.assertEqual(self.sender.readline(), 'x\n') self.assertEqual(self.sender.readline(), '') self.assertEqual(self.sender.readline(), '') def test_readline_all_trailing_not_newline_termed(self): self.sender.response = FakeResponse() - self.sender.response.fp = StringIO.StringIO( + self.sender.response.fp = six.StringIO( '2\r\nx\n\r\n3\r\n123\r\n0\r\n\r\n') self.assertEqual(self.sender.readline(), 'x\n') self.assertEqual(self.sender.readline(), '123') @@ -1077,7 +1080,7 @@ class TestSender(BaseTestSender): args, _kwargs = self.sender.send_put.call_args path, df = args self.assertEqual(path, '/a/c/o') - self.assert_(isinstance(df, diskfile.DiskFile)) + self.assertTrue(isinstance(df, diskfile.DiskFile)) self.assertEqual(expected, df.get_metadata()) # note that the put line isn't actually sent since we mock send_put; # send_put is tested separately. @@ -1112,7 +1115,7 @@ class TestSender(BaseTestSender): args, _kwargs = self.sender.send_put.call_args path, df = args self.assertEqual(path, '/a/c/o') - self.assert_(isinstance(df, diskfile.DiskFile)) + self.assertTrue(isinstance(df, diskfile.DiskFile)) self.assertEqual(expected, df.get_metadata()) self.assertEqual(os.path.join(self.testdir, 'dev/objects/9/', object_hash[-3:], object_hash), @@ -1453,9 +1456,6 @@ class TestBaseSsync(BaseTestSender): continue else: self.assertEqual(v, rx_metadata.pop(k), k) - # ugh, ssync duplicates ETag with Etag so have to clear it out here - if 'Etag' in rx_metadata: - rx_metadata.pop('Etag') self.assertFalse(rx_metadata) expected_body = '%s___%s' % (tx_df._name, frag_index) actual_body = ''.join([chunk for chunk in rx_df.reader()]) diff --git a/test/unit/obj/test_updater.py b/test/unit/obj/test_updater.py index 901dd5bdc0..c863d2bae8 100644 --- a/test/unit/obj/test_updater.py +++ b/test/unit/obj/test_updater.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import cPickle as pickle +import six.moves.cPickle as pickle import mock import os import unittest @@ -85,13 +85,13 @@ class TestObjectUpdater(unittest.TestCase): 'interval': '1', 'concurrency': '2', 'node_timeout': '5'}) - self.assert_(hasattr(cu, 'logger')) - self.assert_(cu.logger is not None) - self.assertEquals(cu.devices, self.devices_dir) - self.assertEquals(cu.interval, 1) - self.assertEquals(cu.concurrency, 2) - self.assertEquals(cu.node_timeout, 5) - self.assert_(cu.get_container_ring() is not None) + self.assertTrue(hasattr(cu, 'logger')) + self.assertTrue(cu.logger is not None) + self.assertEqual(cu.devices, self.devices_dir) + self.assertEqual(cu.interval, 1) + self.assertEqual(cu.concurrency, 2) + self.assertEqual(cu.node_timeout, 5) + self.assertTrue(cu.get_container_ring() is not None) @mock.patch('os.listdir') def test_listdir_with_exception(self, mock_listdir): @@ -183,15 +183,16 @@ class TestObjectUpdater(unittest.TestCase): 'node_timeout': '5'}) cu.logger = mock_logger = mock.MagicMock() cu.object_sweep(self.sda1) - self.assertEquals(mock_logger.warn.call_count, warn) - self.assert_(os.path.exists(os.path.join(self.sda1, 'not_a_dir'))) + self.assertEqual(mock_logger.warn.call_count, warn) + self.assertTrue( + os.path.exists(os.path.join(self.sda1, 'not_a_dir'))) if should_skip: # if we were supposed to skip over the dir, we didn't process # anything at all self.assertTrue(os.path.exists(prefix_dir)) self.assertEqual(set(), seen) else: - self.assert_(not os.path.exists(prefix_dir)) + self.assertTrue(not os.path.exists(prefix_dir)) self.assertEqual(expected, seen) # test cleanup: the tempdir gets cleaned up between runs, but this @@ -220,7 +221,7 @@ class TestObjectUpdater(unittest.TestCase): async_dir = os.path.join(self.sda1, get_async_dir(POLICIES[0])) os.mkdir(async_dir) cu.run_once() - self.assert_(os.path.exists(async_dir)) + self.assertTrue(os.path.exists(async_dir)) # mount_check == False means no call to ismount self.assertEqual([], mock_ismount.mock_calls) @@ -235,8 +236,8 @@ class TestObjectUpdater(unittest.TestCase): 'to be here') os.mkdir(odd_dir) cu.run_once() - self.assert_(os.path.exists(async_dir)) - self.assert_(os.path.exists(odd_dir)) # skipped - not mounted! + self.assertTrue(os.path.exists(async_dir)) + self.assertTrue(os.path.exists(odd_dir)) # skipped - not mounted! # mount_check == True means ismount was checked self.assertEqual([ mock.call(self.sda1), @@ -257,7 +258,7 @@ class TestObjectUpdater(unittest.TestCase): async_dir = os.path.join(self.sda1, get_async_dir(POLICIES[0])) os.mkdir(async_dir) cu.run_once() - self.assert_(os.path.exists(async_dir)) + self.assertTrue(os.path.exists(async_dir)) # mount_check == False means no call to ismount self.assertEqual([], mock_ismount.mock_calls) @@ -272,8 +273,8 @@ class TestObjectUpdater(unittest.TestCase): 'to be here') os.mkdir(odd_dir) cu.run_once() - self.assert_(os.path.exists(async_dir)) - self.assert_(not os.path.exists(odd_dir)) + self.assertTrue(os.path.exists(async_dir)) + self.assertTrue(not os.path.exists(odd_dir)) # mount_check == True means ismount was checked self.assertEqual([ mock.call(self.sda1), @@ -297,8 +298,8 @@ class TestObjectUpdater(unittest.TestCase): normalize_timestamp(0)}}, async_pending) cu.run_once() - self.assert_(not os.path.exists(older_op_path)) - self.assert_(os.path.exists(op_path)) + self.assertTrue(not os.path.exists(older_op_path)) + self.assertTrue(os.path.exists(op_path)) self.assertEqual(cu.logger.get_increment_counts(), {'failures': 1, 'unlinks': 1}) self.assertEqual(None, @@ -314,8 +315,8 @@ class TestObjectUpdater(unittest.TestCase): out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' % return_code) out.flush() - self.assertEquals(inc.readline(), - 'PUT /sda1/0/a/c/o HTTP/1.1\r\n') + self.assertEqual(inc.readline(), + 'PUT /sda1/0/a/c/o HTTP/1.1\r\n') headers = swob.HeaderKeyDict() line = inc.readline() while line and line != '\r\n': @@ -356,7 +357,7 @@ class TestObjectUpdater(unittest.TestCase): err = event.wait() if err: raise err - self.assert_(os.path.exists(op_path)) + self.assertTrue(os.path.exists(op_path)) self.assertEqual(cu.logger.get_increment_counts(), {'failures': 1}) self.assertEqual([0], @@ -368,7 +369,7 @@ class TestObjectUpdater(unittest.TestCase): err = event.wait() if err: raise err - self.assert_(os.path.exists(op_path)) + self.assertTrue(os.path.exists(op_path)) self.assertEqual(cu.logger.get_increment_counts(), {'failures': 1}) self.assertEqual([0, 1], @@ -380,7 +381,7 @@ class TestObjectUpdater(unittest.TestCase): err = event.wait() if err: raise err - self.assert_(not os.path.exists(op_path)) + self.assertTrue(not os.path.exists(op_path)) self.assertEqual(cu.logger.get_increment_counts(), {'unlinks': 1, 'successes': 1}) diff --git a/test/unit/proxy/controllers/test_account.py b/test/unit/proxy/controllers/test_account.py index 23ad0a1c4e..a73c3ca713 100644 --- a/test/unit/proxy/controllers/test_account.py +++ b/test/unit/proxy/controllers/test_account.py @@ -57,7 +57,7 @@ class TestAccountController(unittest.TestCase): with mock.patch('swift.proxy.controllers.base.http_connect', fake_http_connect(200, headers=owner_headers)): resp = controller.HEAD(req) - self.assertEquals(2, resp.status_int // 100) + self.assertEqual(2, resp.status_int // 100) for key in owner_headers: self.assertTrue(key not in resp.headers) @@ -65,7 +65,7 @@ class TestAccountController(unittest.TestCase): with mock.patch('swift.proxy.controllers.base.http_connect', fake_http_connect(200, headers=owner_headers)): resp = controller.HEAD(req) - self.assertEquals(2, resp.status_int // 100) + self.assertEqual(2, resp.status_int // 100) for key in owner_headers: self.assertTrue(key in resp.headers) @@ -79,7 +79,7 @@ class TestAccountController(unittest.TestCase): with mock.patch('swift.proxy.controllers.base.http_connect', fake_http_connect(404, headers=resp_headers)): resp = controller.HEAD(req) - self.assertEquals(410, resp.status_int) + self.assertEqual(410, resp.status_int) def test_long_acct_names(self): long_acct_name = '%sLongAccountName' % ( @@ -90,17 +90,17 @@ class TestAccountController(unittest.TestCase): with mock.patch('swift.proxy.controllers.base.http_connect', fake_http_connect(200)): resp = controller.HEAD(req) - self.assertEquals(400, resp.status_int) + self.assertEqual(400, resp.status_int) with mock.patch('swift.proxy.controllers.base.http_connect', fake_http_connect(200)): resp = controller.GET(req) - self.assertEquals(400, resp.status_int) + self.assertEqual(400, resp.status_int) with mock.patch('swift.proxy.controllers.base.http_connect', fake_http_connect(200)): resp = controller.POST(req) - self.assertEquals(400, resp.status_int) + self.assertEqual(400, resp.status_int) def _make_callback_func(self, context): def callback(ipaddr, port, device, partition, method, path, @@ -193,7 +193,7 @@ class TestAccountController(unittest.TestCase): self.assertEqual(resp.headers.get(header), value) else: # blank ACLs should result in no header - self.assert_(header not in resp.headers) + self.assertTrue(header not in resp.headers) def test_add_acls_impossible_cases(self): # For test coverage: verify that defensive coding does defend, in cases diff --git a/test/unit/proxy/controllers/test_base.py b/test/unit/proxy/controllers/test_base.py index 3938f03b20..48300340c8 100644 --- a/test/unit/proxy/controllers/test_base.py +++ b/test/unit/proxy/controllers/test_base.py @@ -199,34 +199,34 @@ class TestFuncs(unittest.TestCase): env = {} info_a = get_info(app, env, 'a') # Check that you got proper info - self.assertEquals(info_a['status'], 200) - self.assertEquals(info_a['bytes'], 6666) - self.assertEquals(info_a['total_object_count'], 1000) + self.assertEqual(info_a['status'], 200) + self.assertEqual(info_a['bytes'], 6666) + self.assertEqual(info_a['total_object_count'], 1000) # Make sure the env cache is set - self.assertEquals(env.get('swift.account/a'), info_a) + self.assertEqual(env.get('swift.account/a'), info_a) # Make sure the app was called self.assertEqual(app.responses.stats['account'], 1) # Do an env cached call to account info_a = get_info(app, env, 'a') # Check that you got proper info - self.assertEquals(info_a['status'], 200) - self.assertEquals(info_a['bytes'], 6666) - self.assertEquals(info_a['total_object_count'], 1000) + self.assertEqual(info_a['status'], 200) + self.assertEqual(info_a['bytes'], 6666) + self.assertEqual(info_a['total_object_count'], 1000) # Make sure the env cache is set - self.assertEquals(env.get('swift.account/a'), info_a) + self.assertEqual(env.get('swift.account/a'), info_a) # Make sure the app was NOT called AGAIN self.assertEqual(app.responses.stats['account'], 1) # This time do env cached call to account and non cached to container info_c = get_info(app, env, 'a', 'c') # Check that you got proper info - self.assertEquals(info_c['status'], 200) - self.assertEquals(info_c['bytes'], 6666) - self.assertEquals(info_c['object_count'], 1000) + self.assertEqual(info_c['status'], 200) + self.assertEqual(info_c['bytes'], 6666) + self.assertEqual(info_c['object_count'], 1000) # Make sure the env cache is set - self.assertEquals(env.get('swift.account/a'), info_a) - self.assertEquals(env.get('swift.container/a/c'), info_c) + self.assertEqual(env.get('swift.account/a'), info_a) + self.assertEqual(env.get('swift.container/a/c'), info_c) # Make sure the app was called for container self.assertEqual(app.responses.stats['container'], 1) @@ -236,12 +236,12 @@ class TestFuncs(unittest.TestCase): env = {} # abandon previous call to env info_c = get_info(app, env, 'a', 'c') # Check that you got proper info - self.assertEquals(info_c['status'], 200) - self.assertEquals(info_c['bytes'], 6666) - self.assertEquals(info_c['object_count'], 1000) + self.assertEqual(info_c['status'], 200) + self.assertEqual(info_c['bytes'], 6666) + self.assertEqual(info_c['object_count'], 1000) # Make sure the env cache is set - self.assertEquals(env.get('swift.account/a'), info_a) - self.assertEquals(env.get('swift.container/a/c'), info_c) + self.assertEqual(env.get('swift.account/a'), info_a) + self.assertEqual(env.get('swift.container/a/c'), info_c) # check app calls both account and container self.assertEqual(app.responses.stats['account'], 1) self.assertEqual(app.responses.stats['container'], 1) @@ -251,11 +251,11 @@ class TestFuncs(unittest.TestCase): del(env['swift.account/a']) info_c = get_info(app, env, 'a', 'c') # Check that you got proper info - self.assertEquals(info_a['status'], 200) - self.assertEquals(info_c['bytes'], 6666) - self.assertEquals(info_c['object_count'], 1000) + self.assertEqual(info_a['status'], 200) + self.assertEqual(info_c['bytes'], 6666) + self.assertEqual(info_c['object_count'], 1000) # Make sure the env cache is set and account still not cached - self.assertEquals(env.get('swift.container/a/c'), info_c) + self.assertEqual(env.get('swift.container/a/c'), info_c) # no additional calls were made self.assertEqual(app.responses.stats['account'], 1) self.assertEqual(app.responses.stats['container'], 1) @@ -265,22 +265,22 @@ class TestFuncs(unittest.TestCase): env = {} info_a = get_info(app, env, 'a', ret_not_found=True) # Check that you got proper info - self.assertEquals(info_a['status'], 404) - self.assertEquals(info_a['bytes'], None) - self.assertEquals(info_a['total_object_count'], None) + self.assertEqual(info_a['status'], 404) + self.assertEqual(info_a['bytes'], None) + self.assertEqual(info_a['total_object_count'], None) # Make sure the env cache is set - self.assertEquals(env.get('swift.account/a'), info_a) + self.assertEqual(env.get('swift.account/a'), info_a) # and account was called self.assertEqual(app.responses.stats['account'], 1) # Do a cached call to account not found with ret_not_found info_a = get_info(app, env, 'a', ret_not_found=True) # Check that you got proper info - self.assertEquals(info_a['status'], 404) - self.assertEquals(info_a['bytes'], None) - self.assertEquals(info_a['total_object_count'], None) + self.assertEqual(info_a['status'], 404) + self.assertEqual(info_a['bytes'], None) + self.assertEqual(info_a['total_object_count'], None) # Make sure the env cache is set - self.assertEquals(env.get('swift.account/a'), info_a) + self.assertEqual(env.get('swift.account/a'), info_a) # add account was NOT called AGAIN self.assertEqual(app.responses.stats['account'], 1) @@ -289,16 +289,16 @@ class TestFuncs(unittest.TestCase): env = {} info_a = get_info(app, env, 'a') # Check that you got proper info - self.assertEquals(info_a, None) - self.assertEquals(env['swift.account/a']['status'], 404) + self.assertEqual(info_a, None) + self.assertEqual(env['swift.account/a']['status'], 404) # and account was called self.assertEqual(app.responses.stats['account'], 1) # Do a cached call to account not found without ret_not_found info_a = get_info(None, env, 'a') # Check that you got proper info - self.assertEquals(info_a, None) - self.assertEquals(env['swift.account/a']['status'], 404) + self.assertEqual(info_a, None) + self.assertEqual(env['swift.account/a']['status'], 404) # add account was NOT called AGAIN self.assertEqual(app.responses.stats['account'], 1) @@ -319,9 +319,9 @@ class TestFuncs(unittest.TestCase): req = Request.blank("/v1/AUTH_account/cont", environ={'swift.cache': FakeCache({})}) resp = get_container_info(req.environ, FakeApp()) - self.assertEquals(resp['storage_policy'], '0') - self.assertEquals(resp['bytes'], 6666) - self.assertEquals(resp['object_count'], 1000) + self.assertEqual(resp['storage_policy'], '0') + self.assertEqual(resp['bytes'], 6666) + self.assertEqual(resp['object_count'], 1000) def test_get_container_info_no_account(self): responses = DynamicResponseFactory(404, 200) @@ -336,8 +336,8 @@ class TestFuncs(unittest.TestCase): req = Request.blank("/v1/.system_account/cont") info = get_container_info(req.environ, app) self.assertEqual(info['status'], 200) - self.assertEquals(info['bytes'], 6666) - self.assertEquals(info['object_count'], 1000) + self.assertEqual(info['bytes'], 6666) + self.assertEqual(info['object_count'], 1000) def test_get_container_info_cache(self): cache_stub = { @@ -347,11 +347,11 @@ class TestFuncs(unittest.TestCase): req = Request.blank("/v1/account/cont", environ={'swift.cache': FakeCache(cache_stub)}) resp = get_container_info(req.environ, FakeApp()) - self.assertEquals(resp['storage_policy'], '0') - self.assertEquals(resp['bytes'], 3333) - self.assertEquals(resp['object_count'], 10) - self.assertEquals(resp['status'], 404) - self.assertEquals(resp['versions'], "\xe1\xbd\x8a\x39") + self.assertEqual(resp['storage_policy'], '0') + self.assertEqual(resp['bytes'], 3333) + self.assertEqual(resp['object_count'], 10) + self.assertEqual(resp['status'], 404) + self.assertEqual(resp['versions'], "\xe1\xbd\x8a\x39") def test_get_container_info_env(self): cache_key = get_container_memcache_key("account", "cont") @@ -360,7 +360,7 @@ class TestFuncs(unittest.TestCase): environ={env_key: {'bytes': 3867}, 'swift.cache': FakeCache({})}) resp = get_container_info(req.environ, 'xxx') - self.assertEquals(resp['bytes'], 3867) + self.assertEqual(resp['bytes'], 3867) def test_get_account_info_swift_source(self): app = FakeApp() @@ -373,8 +373,8 @@ class TestFuncs(unittest.TestCase): req = Request.blank("/v1/AUTH_account", environ={'swift.cache': FakeCache({})}) resp = get_account_info(req.environ, app) - self.assertEquals(resp['bytes'], 6666) - self.assertEquals(resp['total_object_count'], 1000) + self.assertEqual(resp['bytes'], 6666) + self.assertEqual(resp['total_object_count'], 1000) def test_get_account_info_cache(self): # The original test that we prefer to preserve @@ -384,9 +384,9 @@ class TestFuncs(unittest.TestCase): req = Request.blank("/v1/account/cont", environ={'swift.cache': FakeCache(cached)}) resp = get_account_info(req.environ, FakeApp()) - self.assertEquals(resp['bytes'], 3333) - self.assertEquals(resp['total_object_count'], 10) - self.assertEquals(resp['status'], 404) + self.assertEqual(resp['bytes'], 3333) + self.assertEqual(resp['total_object_count'], 10) + self.assertEqual(resp['status'], 404) # Here is a more realistic test cached = {'status': 404, @@ -397,11 +397,11 @@ class TestFuncs(unittest.TestCase): req = Request.blank("/v1/account/cont", environ={'swift.cache': FakeCache(cached)}) resp = get_account_info(req.environ, FakeApp()) - self.assertEquals(resp['status'], 404) - self.assertEquals(resp['bytes'], '3333') - self.assertEquals(resp['container_count'], 234) - self.assertEquals(resp['meta'], {}) - self.assertEquals(resp['total_object_count'], '10') + self.assertEqual(resp['status'], 404) + self.assertEqual(resp['bytes'], '3333') + self.assertEqual(resp['container_count'], 234) + self.assertEqual(resp['meta'], {}) + self.assertEqual(resp['total_object_count'], '10') def test_get_account_info_env(self): cache_key = get_account_memcache_key("account") @@ -410,7 +410,7 @@ class TestFuncs(unittest.TestCase): environ={env_key: {'bytes': 3867}, 'swift.cache': FakeCache({})}) resp = get_account_info(req.environ, 'xxx') - self.assertEquals(resp['bytes'], 3867) + self.assertEqual(resp['bytes'], 3867) def test_get_object_info_env(self): cached = {'status': 200, @@ -422,8 +422,8 @@ class TestFuncs(unittest.TestCase): environ={env_key: cached, 'swift.cache': FakeCache({})}) resp = get_object_info(req.environ, 'xxx') - self.assertEquals(resp['length'], 3333) - self.assertEquals(resp['type'], 'application/json') + self.assertEqual(resp['length'], 3333) + self.assertEqual(resp['type'], 'application/json') def test_get_object_info_no_env(self): app = FakeApp() @@ -433,8 +433,8 @@ class TestFuncs(unittest.TestCase): self.assertEqual(app.responses.stats['account'], 0) self.assertEqual(app.responses.stats['container'], 0) self.assertEqual(app.responses.stats['obj'], 1) - self.assertEquals(resp['length'], 5555) - self.assertEquals(resp['type'], 'text/plain') + self.assertEqual(resp['length'], 5555) + self.assertEqual(resp['type'], 'text/plain') def test_options(self): base = Controller(self.app) @@ -469,26 +469,26 @@ class TestFuncs(unittest.TestCase): def test_headers_to_container_info_missing(self): resp = headers_to_container_info({}, 404) - self.assertEquals(resp['status'], 404) - self.assertEquals(resp['read_acl'], None) - self.assertEquals(resp['write_acl'], None) + self.assertEqual(resp['status'], 404) + self.assertEqual(resp['read_acl'], None) + self.assertEqual(resp['write_acl'], None) def test_headers_to_container_info_meta(self): headers = {'X-Container-Meta-Whatevs': 14, 'x-container-meta-somethingelse': 0} resp = headers_to_container_info(headers.items(), 200) - self.assertEquals(len(resp['meta']), 2) - self.assertEquals(resp['meta']['whatevs'], 14) - self.assertEquals(resp['meta']['somethingelse'], 0) + self.assertEqual(len(resp['meta']), 2) + self.assertEqual(resp['meta']['whatevs'], 14) + self.assertEqual(resp['meta']['somethingelse'], 0) def test_headers_to_container_info_sys_meta(self): prefix = get_sys_meta_prefix('container') headers = {'%sWhatevs' % prefix: 14, '%ssomethingelse' % prefix: 0} resp = headers_to_container_info(headers.items(), 200) - self.assertEquals(len(resp['sysmeta']), 2) - self.assertEquals(resp['sysmeta']['whatevs'], 14) - self.assertEquals(resp['sysmeta']['somethingelse'], 0) + self.assertEqual(len(resp['sysmeta']), 2) + self.assertEqual(resp['sysmeta']['whatevs'], 14) + self.assertEqual(resp['sysmeta']['somethingelse'], 0) def test_headers_to_container_info_values(self): headers = { @@ -498,37 +498,37 @@ class TestFuncs(unittest.TestCase): 'x-container-meta-access-control-allow-origin': 'here', } resp = headers_to_container_info(headers.items(), 200) - self.assertEquals(resp['read_acl'], 'readvalue') - self.assertEquals(resp['write_acl'], 'writevalue') - self.assertEquals(resp['cors']['allow_origin'], 'here') + self.assertEqual(resp['read_acl'], 'readvalue') + self.assertEqual(resp['write_acl'], 'writevalue') + self.assertEqual(resp['cors']['allow_origin'], 'here') headers['x-unused-header'] = 'blahblahblah' - self.assertEquals( + self.assertEqual( resp, headers_to_container_info(headers.items(), 200)) def test_headers_to_account_info_missing(self): resp = headers_to_account_info({}, 404) - self.assertEquals(resp['status'], 404) - self.assertEquals(resp['bytes'], None) - self.assertEquals(resp['container_count'], None) + self.assertEqual(resp['status'], 404) + self.assertEqual(resp['bytes'], None) + self.assertEqual(resp['container_count'], None) def test_headers_to_account_info_meta(self): headers = {'X-Account-Meta-Whatevs': 14, 'x-account-meta-somethingelse': 0} resp = headers_to_account_info(headers.items(), 200) - self.assertEquals(len(resp['meta']), 2) - self.assertEquals(resp['meta']['whatevs'], 14) - self.assertEquals(resp['meta']['somethingelse'], 0) + self.assertEqual(len(resp['meta']), 2) + self.assertEqual(resp['meta']['whatevs'], 14) + self.assertEqual(resp['meta']['somethingelse'], 0) def test_headers_to_account_info_sys_meta(self): prefix = get_sys_meta_prefix('account') headers = {'%sWhatevs' % prefix: 14, '%ssomethingelse' % prefix: 0} resp = headers_to_account_info(headers.items(), 200) - self.assertEquals(len(resp['sysmeta']), 2) - self.assertEquals(resp['sysmeta']['whatevs'], 14) - self.assertEquals(resp['sysmeta']['somethingelse'], 0) + self.assertEqual(len(resp['sysmeta']), 2) + self.assertEqual(resp['sysmeta']['whatevs'], 14) + self.assertEqual(resp['sysmeta']['somethingelse'], 0) def test_headers_to_account_info_values(self): headers = { @@ -536,36 +536,36 @@ class TestFuncs(unittest.TestCase): 'x-account-container-count': '20', } resp = headers_to_account_info(headers.items(), 200) - self.assertEquals(resp['total_object_count'], '10') - self.assertEquals(resp['container_count'], '20') + self.assertEqual(resp['total_object_count'], '10') + self.assertEqual(resp['container_count'], '20') headers['x-unused-header'] = 'blahblahblah' - self.assertEquals( + self.assertEqual( resp, headers_to_account_info(headers.items(), 200)) def test_headers_to_object_info_missing(self): resp = headers_to_object_info({}, 404) - self.assertEquals(resp['status'], 404) - self.assertEquals(resp['length'], None) - self.assertEquals(resp['etag'], None) + self.assertEqual(resp['status'], 404) + self.assertEqual(resp['length'], None) + self.assertEqual(resp['etag'], None) def test_headers_to_object_info_meta(self): headers = {'X-Object-Meta-Whatevs': 14, 'x-object-meta-somethingelse': 0} resp = headers_to_object_info(headers.items(), 200) - self.assertEquals(len(resp['meta']), 2) - self.assertEquals(resp['meta']['whatevs'], 14) - self.assertEquals(resp['meta']['somethingelse'], 0) + self.assertEqual(len(resp['meta']), 2) + self.assertEqual(resp['meta']['whatevs'], 14) + self.assertEqual(resp['meta']['somethingelse'], 0) def test_headers_to_object_info_sys_meta(self): prefix = get_sys_meta_prefix('object') headers = {'%sWhatevs' % prefix: 14, '%ssomethingelse' % prefix: 0} resp = headers_to_object_info(headers.items(), 200) - self.assertEquals(len(resp['sysmeta']), 2) - self.assertEquals(resp['sysmeta']['whatevs'], 14) - self.assertEquals(resp['sysmeta']['somethingelse'], 0) + self.assertEqual(len(resp['sysmeta']), 2) + self.assertEqual(resp['sysmeta']['whatevs'], 14) + self.assertEqual(resp['sysmeta']['somethingelse'], 0) def test_headers_to_object_info_values(self): headers = { @@ -573,11 +573,11 @@ class TestFuncs(unittest.TestCase): 'content-type': 'application/json', } resp = headers_to_object_info(headers.items(), 200) - self.assertEquals(resp['length'], '1024') - self.assertEquals(resp['type'], 'application/json') + self.assertEqual(resp['length'], '1024') + self.assertEqual(resp['type'], 'application/json') headers['x-unused-header'] = 'blahblahblah' - self.assertEquals( + self.assertEqual( resp, headers_to_object_info(headers.items(), 200)) @@ -624,24 +624,24 @@ class TestFuncs(unittest.TestCase): req = Request.blank('/') handler = GetOrHeadHandler(None, req, None, None, None, None, {}) handler.fast_forward(50) - self.assertEquals(handler.backend_headers['Range'], 'bytes=50-') + self.assertEqual(handler.backend_headers['Range'], 'bytes=50-') handler = GetOrHeadHandler(None, req, None, None, None, None, {'Range': 'bytes=23-50'}) handler.fast_forward(20) - self.assertEquals(handler.backend_headers['Range'], 'bytes=43-50') + self.assertEqual(handler.backend_headers['Range'], 'bytes=43-50') self.assertRaises(HTTPException, handler.fast_forward, 80) handler = GetOrHeadHandler(None, req, None, None, None, None, {'Range': 'bytes=23-'}) handler.fast_forward(20) - self.assertEquals(handler.backend_headers['Range'], 'bytes=43-') + self.assertEqual(handler.backend_headers['Range'], 'bytes=43-') handler = GetOrHeadHandler(None, req, None, None, None, None, {'Range': 'bytes=-100'}) handler.fast_forward(20) - self.assertEquals(handler.backend_headers['Range'], 'bytes=-80') + self.assertEqual(handler.backend_headers['Range'], 'bytes=-80') def test_transfer_headers_with_sysmeta(self): base = Controller(self.app) diff --git a/test/unit/proxy/controllers/test_container.py b/test/unit/proxy/controllers/test_container.py index 59bbd05783..0fc251fe92 100644 --- a/test/unit/proxy/controllers/test_container.py +++ b/test/unit/proxy/controllers/test_container.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import print_function import mock import unittest @@ -89,7 +90,7 @@ class TestContainerController(TestRingBase): with mock.patch('swift.proxy.controllers.base.http_connect', fake_http_connect(200, 200, headers=owner_headers)): resp = controller.HEAD(req) - self.assertEquals(2, resp.status_int // 100) + self.assertEqual(2, resp.status_int // 100) for key in owner_headers: self.assertTrue(key not in resp.headers) @@ -97,7 +98,7 @@ class TestContainerController(TestRingBase): with mock.patch('swift.proxy.controllers.base.http_connect', fake_http_connect(200, 200, headers=owner_headers)): resp = controller.HEAD(req) - self.assertEquals(2, resp.status_int // 100) + self.assertEqual(2, resp.status_int // 100) for key in owner_headers: self.assertTrue(key in resp.headers) @@ -164,12 +165,12 @@ class TestContainerController(TestRingBase): self.app._error_limiting = {} req = Request.blank('/v1/a/c', method=method) with mocked_http_conn(*statuses) as fake_conn: - print 'a' * 50 + print('a' * 50) resp = req.get_response(self.app) self.assertEqual(resp.status_int, expected) for req in fake_conn.requests: self.assertEqual(req['method'], method) - self.assert_(req['path'].endswith('/a/c')) + self.assertTrue(req['path'].endswith('/a/c')) base_status = [201] * 3 # test happy path diff --git a/test/unit/proxy/controllers/test_obj.py b/test/unit/proxy/controllers/test_obj.py index eeeae1218b..c27f1e9e61 100755 --- a/test/unit/proxy/controllers/test_obj.py +++ b/test/unit/proxy/controllers/test_obj.py @@ -26,6 +26,7 @@ from hashlib import md5 import mock from eventlet import Timeout +from six import BytesIO from six.moves import range import swift @@ -236,7 +237,7 @@ class BaseObjectControllerMixin(object): codes = [204] * self.replicas() with set_http_connect(*codes): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) def test_DELETE_missing_one(self): req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE') @@ -244,14 +245,14 @@ class BaseObjectControllerMixin(object): random.shuffle(codes) with set_http_connect(*codes): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) def test_DELETE_not_found(self): req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE') codes = [404] * (self.replicas() - 1) + [204] with set_http_connect(*codes): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) def test_DELETE_mostly_found(self): req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE') @@ -260,7 +261,7 @@ class BaseObjectControllerMixin(object): self.assertEqual(len(codes), self.replicas()) with set_http_connect(*codes): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) def test_DELETE_mostly_not_found(self): req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE') @@ -269,7 +270,7 @@ class BaseObjectControllerMixin(object): self.assertEqual(len(codes), self.replicas()) with set_http_connect(*codes): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) def test_DELETE_half_not_found_statuses(self): self.obj_ring.set_replicas(4) @@ -277,7 +278,7 @@ class BaseObjectControllerMixin(object): req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE') with set_http_connect(404, 204, 404, 204): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) def test_DELETE_half_not_found_headers_and_body(self): # Transformed responses have bogus bodies and headers, so make sure we @@ -292,16 +293,16 @@ class BaseObjectControllerMixin(object): with set_http_connect(*status_codes, body_iter=bodies, headers=headers): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 204) - self.assertEquals(resp.headers.get('Pick-Me'), 'yes') - self.assertEquals(resp.body, '') + self.assertEqual(resp.status_int, 204) + self.assertEqual(resp.headers.get('Pick-Me'), 'yes') + self.assertEqual(resp.body, '') def test_DELETE_handoff(self): req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE') codes = [204] * self.replicas() with set_http_connect(507, *codes): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) def test_POST_non_int_delete_after(self): t = str(int(time.time() + 100)) + '.1' @@ -381,14 +382,14 @@ class BaseObjectControllerMixin(object): req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD') with set_http_connect(200): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) def test_HEAD_x_newest(self): req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD', headers={'X-Newest': 'true'}) with set_http_connect(200, 200, 200): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) def test_HEAD_x_newest_different_timestamps(self): req = swob.Request.blank('/v1/a/c/o', method='HEAD', @@ -475,7 +476,7 @@ class BaseObjectControllerMixin(object): def test_PUT_requires_length(self): req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT') resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 411) + self.assertEqual(resp.status_int, 411) # end of BaseObjectControllerMixin @@ -491,7 +492,7 @@ class TestReplicatedObjController(BaseObjectControllerMixin, req.headers['content-length'] = '0' with set_http_connect(201, 201, 201): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) def test_PUT_if_none_match(self): req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT') @@ -499,7 +500,7 @@ class TestReplicatedObjController(BaseObjectControllerMixin, req.headers['content-length'] = '0' with set_http_connect(201, 201, 201): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) def test_PUT_if_none_match_denied(self): req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT') @@ -507,7 +508,7 @@ class TestReplicatedObjController(BaseObjectControllerMixin, req.headers['content-length'] = '0' with set_http_connect(201, 412, 201): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 412) + self.assertEqual(resp.status_int, 412) def test_PUT_if_none_match_not_star(self): req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT') @@ -515,7 +516,7 @@ class TestReplicatedObjController(BaseObjectControllerMixin, req.headers['content-length'] = '0' with set_http_connect(): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) def test_PUT_connect_exceptions(self): object_ring = self.app.get_object_ring(None) @@ -574,20 +575,20 @@ class TestReplicatedObjController(BaseObjectControllerMixin, req = swift.common.swob.Request.blank('/v1/a/c/o') with set_http_connect(200): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) def test_GET_error(self): req = swift.common.swob.Request.blank('/v1/a/c/o') with set_http_connect(503, 200): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) def test_GET_handoff(self): req = swift.common.swob.Request.blank('/v1/a/c/o') codes = [503] * self.obj_ring.replicas + [200] with set_http_connect(*codes): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) def test_GET_not_found(self): req = swift.common.swob.Request.blank('/v1/a/c/o') @@ -595,7 +596,7 @@ class TestReplicatedObjController(BaseObjectControllerMixin, self.obj_ring.max_more_nodes) with set_http_connect(*codes): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) def test_POST_as_COPY_simple(self): req = swift.common.swob.Request.blank('/v1/a/c/o', method='POST') @@ -605,8 +606,8 @@ class TestReplicatedObjController(BaseObjectControllerMixin, codes = get_resp + put_resp with set_http_connect(*codes): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 202) - self.assertEquals(req.environ['QUERY_STRING'], '') + self.assertEqual(resp.status_int, 202) + self.assertEqual(req.environ['QUERY_STRING'], '') self.assertTrue('swift.post_as_copy' in req.environ) def test_POST_as_COPY_static_large_object(self): @@ -621,8 +622,8 @@ class TestReplicatedObjController(BaseObjectControllerMixin, headers = {'headers': get_headers} with set_http_connect(*codes, **headers): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 202) - self.assertEquals(req.environ['QUERY_STRING'], '') + self.assertEqual(resp.status_int, 202) + self.assertEqual(req.environ['QUERY_STRING'], '') self.assertTrue('swift.post_as_copy' in req.environ) def test_POST_delete_at(self): @@ -642,12 +643,12 @@ class TestReplicatedObjController(BaseObjectControllerMixin, codes = x_newest_responses + post_resp with set_http_connect(*codes, give_connect=capture_headers): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 200) - self.assertEquals(req.environ['QUERY_STRING'], '') # sanity + self.assertEqual(resp.status_int, 200) + self.assertEqual(req.environ['QUERY_STRING'], '') # sanity self.assertTrue('swift.post_as_copy' in req.environ) for given_headers in post_headers: - self.assertEquals(given_headers.get('X-Delete-At'), t) + self.assertEqual(given_headers.get('X-Delete-At'), t) self.assertTrue('X-Delete-At-Host' in given_headers) self.assertTrue('X-Delete-At-Device' in given_headers) self.assertTrue('X-Delete-At-Partition' in given_headers) @@ -667,9 +668,9 @@ class TestReplicatedObjController(BaseObjectControllerMixin, codes = [201] * self.obj_ring.replicas with set_http_connect(*codes, give_connect=capture_headers): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) for given_headers in put_headers: - self.assertEquals(given_headers.get('X-Delete-At'), t) + self.assertEqual(given_headers.get('X-Delete-At'), t) self.assertTrue('X-Delete-At-Host' in given_headers) self.assertTrue('X-Delete-At-Device' in given_headers) self.assertTrue('X-Delete-At-Partition' in given_headers) @@ -690,11 +691,11 @@ class TestReplicatedObjController(BaseObjectControllerMixin, with set_http_connect(*codes, give_connect=capture_headers): with mock.patch('time.time', lambda: t): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) expected_delete_at = str(int(t) + 60) for given_headers in put_headers: - self.assertEquals(given_headers.get('X-Delete-At'), - expected_delete_at) + self.assertEqual(given_headers.get('X-Delete-At'), + expected_delete_at) self.assertTrue('X-Delete-At-Host' in given_headers) self.assertTrue('X-Delete-At-Device' in given_headers) self.assertTrue('X-Delete-At-Partition' in given_headers) @@ -771,6 +772,43 @@ class TestReplicatedObjController(BaseObjectControllerMixin, resp = req.get_response(self.app) self.assertEqual(resp.status_int, 202) + def test_put_x_timestamp_conflict_with_missing_backend_timestamp(self): + ts = (utils.Timestamp(t) for t in itertools.count(int(time.time()))) + req = swob.Request.blank( + '/v1/a/c/o', method='PUT', headers={ + 'Content-Length': 0, + 'X-Timestamp': ts.next().internal}) + ts_iter = iter([None, None, None]) + codes = [409] * self.obj_ring.replicas + with set_http_connect(*codes, timestamps=ts_iter): + resp = req.get_response(self.app) + self.assertEqual(resp.status_int, 202) + + def test_put_x_timestamp_conflict_with_other_weird_success_response(self): + ts = (utils.Timestamp(t) for t in itertools.count(int(time.time()))) + req = swob.Request.blank( + '/v1/a/c/o', method='PUT', headers={ + 'Content-Length': 0, + 'X-Timestamp': ts.next().internal}) + ts_iter = iter([ts.next().internal, None, None]) + codes = [409] + [(201, 'notused')] * (self.obj_ring.replicas - 1) + with set_http_connect(*codes, timestamps=ts_iter): + resp = req.get_response(self.app) + self.assertEqual(resp.status_int, 202) + + def test_put_x_timestamp_conflict_with_if_none_match(self): + ts = (utils.Timestamp(t) for t in itertools.count(int(time.time()))) + req = swob.Request.blank( + '/v1/a/c/o', method='PUT', headers={ + 'Content-Length': 0, + 'If-None-Match': '*', + 'X-Timestamp': ts.next().internal}) + ts_iter = iter([ts.next().internal, None, None]) + codes = [409] + [(412, 'notused')] * (self.obj_ring.replicas - 1) + with set_http_connect(*codes, timestamps=ts_iter): + resp = req.get_response(self.app) + self.assertEqual(resp.status_int, 412) + def test_container_sync_put_x_timestamp_race(self): ts = (utils.Timestamp(t) for t in itertools.count(int(time.time()))) test_indexes = [None] + [int(p) for p in POLICIES] @@ -824,7 +862,7 @@ class TestReplicatedObjController(BaseObjectControllerMixin, codes = head_resp + put_resp with set_http_connect(*codes): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) def test_PUT_log_info(self): req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT') @@ -839,7 +877,7 @@ class TestReplicatedObjController(BaseObjectControllerMixin, with set_http_connect(*codes, headers=resp_headers): resp = req.get_response(self.app) self.assertEqual(resp.status_int, 201) - self.assertEquals( + self.assertEqual( req.environ.get('swift.log_info'), ['x-copy-from:some/where']) # and then check that we don't do that for originating POSTs req = swift.common.swob.Request.blank('/v1/a/c/o') @@ -848,7 +886,7 @@ class TestReplicatedObjController(BaseObjectControllerMixin, with set_http_connect(*codes, headers=resp_headers): resp = req.get_response(self.app) self.assertEqual(resp.status_int, 202) - self.assertEquals(req.environ.get('swift.log_info'), None) + self.assertEqual(req.environ.get('swift.log_info'), None) @patch_policies(legacy_only=True) @@ -876,6 +914,76 @@ class TestObjControllerLegacyCache(TestReplicatedObjController): self.assertEqual(resp.status_int, 503) +class StubResponse(object): + + def __init__(self, status, body='', headers=None): + self.status = status + self.body = body + self.readable = BytesIO(body) + self.headers = swob.HeaderKeyDict(headers) + fake_reason = ('Fake', 'This response is a lie.') + self.reason = swob.RESPONSE_REASONS.get(status, fake_reason)[0] + + def getheader(self, header_name, default=None): + return self.headers.get(header_name, default) + + def getheaders(self): + if 'Content-Length' not in self.headers: + self.headers['Content-Length'] = len(self.body) + return self.headers.items() + + def read(self, amt=0): + return self.readable.read(amt) + + +@contextmanager +def capture_http_requests(get_response): + + class FakeConn(object): + + def __init__(self, req): + self.req = req + self.resp = None + + def getresponse(self): + self.resp = get_response(self.req) + return self.resp + + class ConnectionLog(object): + + def __init__(self): + self.connections = [] + + def __len__(self): + return len(self.connections) + + def __getitem__(self, i): + return self.connections[i] + + def __iter__(self): + return iter(self.connections) + + def __call__(self, ip, port, method, path, headers, qs, ssl): + req = { + 'ip': ip, + 'port': port, + 'method': method, + 'path': path, + 'headers': headers, + 'qs': qs, + 'ssl': ssl, + } + conn = FakeConn(req) + self.connections.append(conn) + return conn + + fake_conn = ConnectionLog() + + with mock.patch('swift.common.bufferedhttp.http_connect_raw', + new=fake_conn): + yield fake_conn + + @patch_policies(with_ec_default=True) class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): container_info = { @@ -904,38 +1012,38 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): expected = {} for i, p in enumerate(putters): expected[p] = i - self.assertEquals(got, expected) + self.assertEqual(got, expected) # now lets make a handoff at the end putters[3].node_index = None got = controller._determine_chunk_destinations(putters) - self.assertEquals(got, expected) + self.assertEqual(got, expected) putters[3].node_index = 3 # now lets make a handoff at the start putters[0].node_index = None got = controller._determine_chunk_destinations(putters) - self.assertEquals(got, expected) + self.assertEqual(got, expected) putters[0].node_index = 0 # now lets make a handoff in the middle putters[2].node_index = None got = controller._determine_chunk_destinations(putters) - self.assertEquals(got, expected) + self.assertEqual(got, expected) putters[2].node_index = 0 # now lets make all of them handoffs for index in range(0, 4): putters[index].node_index = None got = controller._determine_chunk_destinations(putters) - self.assertEquals(got, expected) + self.assertEqual(got, expected) def test_GET_simple(self): req = swift.common.swob.Request.blank('/v1/a/c/o') get_resp = [200] * self.policy.ec_ndata with set_http_connect(*get_resp): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) def test_GET_simple_x_newest(self): req = swift.common.swob.Request.blank('/v1/a/c/o', @@ -943,14 +1051,14 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): codes = [200] * self.policy.ec_ndata with set_http_connect(*codes): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) def test_GET_error(self): req = swift.common.swob.Request.blank('/v1/a/c/o') get_resp = [503] + [200] * self.policy.ec_ndata with set_http_connect(*get_resp): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) def test_GET_with_body(self): req = swift.common.swob.Request.blank('/v1/a/c/o') @@ -974,7 +1082,8 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): self.assertEqual(len(real_body), len(sanity_body)) self.assertEqual(real_body, sanity_body) - node_fragments = zip(*fragment_payloads) + # list(zip(...)) for py3 compatibility (zip is lazy there) + node_fragments = list(zip(*fragment_payloads)) self.assertEqual(len(node_fragments), self.replicas()) # sanity headers = {'X-Object-Sysmeta-Ec-Content-Length': str(len(real_body))} responses = [(200, ''.join(node_fragments[i]), headers) @@ -983,7 +1092,7 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): with set_http_connect(*status_codes, body_iter=body_iter, headers=headers): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) self.assertEqual(len(real_body), len(resp.body)) self.assertEqual(real_body, resp.body) @@ -997,7 +1106,7 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): } with set_http_connect(*codes, expect_headers=expect_headers): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) def test_PUT_with_explicit_commit_status(self): req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT', @@ -1009,7 +1118,7 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): } with set_http_connect(*codes, expect_headers=expect_headers): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) def test_PUT_error(self): req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT', @@ -1021,7 +1130,7 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): } with set_http_connect(*codes, expect_headers=expect_headers): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 503) + self.assertEqual(resp.status_int, 503) def test_PUT_mostly_success(self): req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT', @@ -1035,7 +1144,7 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): } with set_http_connect(*codes, expect_headers=expect_headers): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) def test_PUT_error_commit(self): req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT', @@ -1047,7 +1156,7 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): } with set_http_connect(*codes, expect_headers=expect_headers): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 503) + self.assertEqual(resp.status_int, 503) def test_PUT_mostly_success_commit(self): req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT', @@ -1062,7 +1171,7 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): } with set_http_connect(*codes, expect_headers=expect_headers): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) def test_PUT_mostly_error_commit(self): req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT', @@ -1076,7 +1185,7 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): } with set_http_connect(*codes, expect_headers=expect_headers): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 503) + self.assertEqual(resp.status_int, 503) def test_PUT_commit_timeout(self): req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT', @@ -1089,7 +1198,7 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): } with set_http_connect(*codes, expect_headers=expect_headers): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) def test_PUT_commit_exception(self): req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT', @@ -1102,7 +1211,7 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): } with set_http_connect(*codes, expect_headers=expect_headers): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) def test_PUT_with_body(self): req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT') @@ -1133,7 +1242,7 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): give_connect=capture_headers): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) frag_archives = [] for connection_id, info in put_requests.items(): body = unchunk_body(''.join(info['chunks'])) @@ -1219,7 +1328,7 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): codes, expect_headers = zip(*responses) with set_http_connect(*codes, expect_headers=expect_headers): resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) def test_COPY_cross_policy_type_from_replicated(self): self.app.per_container_info = { @@ -1306,6 +1415,483 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): for fragments in zip(*fragment_payloads)] return ec_archive_bodies + def _make_ec_object_stub(self, test_body=None, policy=None): + policy = policy or self.policy + segment_size = policy.ec_segment_size + test_body = test_body or ( + 'test' * segment_size)[:-random.randint(0, 1000)] + etag = md5(test_body).hexdigest() + ec_archive_bodies = self._make_ec_archive_bodies(test_body, + policy=policy) + return { + 'body': test_body, + 'etag': etag, + 'frags': ec_archive_bodies, + } + + def _fake_ec_node_response(self, node_frags): + """ + Given a list of entries for each node in ring order, where the + entries are a dict (or list of dicts) which describe all of the + fragment(s); create a function suitable for use with + capture_http_requests that will accept a req object and return a + response that will suitably fake the behavior of an object + server who had the given fragments on disk at the time. + """ + node_map = {} + all_nodes = [] + + def _build_node_map(req): + node_key = lambda n: (n['ip'], n['port']) + part = utils.split_path(req['path'], 5, 5, True)[1] + policy = POLICIES[int( + req['headers']['X-Backend-Storage-Policy-Index'])] + all_nodes.extend(policy.object_ring.get_part_nodes(part)) + all_nodes.extend(policy.object_ring.get_more_nodes(part)) + for i, node in enumerate(all_nodes): + node_map[node_key(node)] = i + + # normalize node_frags to a list of fragments for each node even + # if there's only one fragment in the dataset provided. + for i, frags in enumerate(node_frags): + if isinstance(frags, dict): + node_frags[i] = [frags] + + def get_response(req): + if not node_map: + _build_node_map(req) + + try: + node_index = node_map[(req['ip'], req['port'])] + except KeyError: + raise Exception("Couldn't find node %s:%s in %r" % ( + req['ip'], req['port'], all_nodes)) + + try: + frags = node_frags[node_index] + except KeyError: + raise Exception('Found node %r:%r at index %s - ' + 'but only got %s stub response nodes' % ( + req['ip'], req['port'], node_index, + len(node_frags))) + + try: + stub = random.choice(frags) + except IndexError: + stub = None + if stub: + body = stub['obj']['frags'][stub['frag']] + headers = { + 'X-Object-Sysmeta-Ec-Content-Length': len( + stub['obj']['body']), + 'X-Object-Sysmeta-Ec-Etag': stub['obj']['etag'], + 'X-Object-Sysmeta-Ec-Frag-Index': stub['frag'], + } + resp = StubResponse(200, body, headers) + else: + resp = StubResponse(404) + return resp + + return get_response + + def test_GET_with_frags_swapped_around(self): + segment_size = self.policy.ec_segment_size + test_data = ('test' * segment_size)[:-657] + etag = md5(test_data).hexdigest() + ec_archive_bodies = self._make_ec_archive_bodies(test_data) + + _part, primary_nodes = self.obj_ring.get_nodes('a', 'c', 'o') + + node_key = lambda n: (n['ip'], n['port']) + response_map = { + node_key(n): StubResponse(200, ec_archive_bodies[i], { + 'X-Object-Sysmeta-Ec-Content-Length': len(test_data), + 'X-Object-Sysmeta-Ec-Etag': etag, + 'X-Object-Sysmeta-Ec-Frag-Index': i, + }) for i, n in enumerate(primary_nodes) + } + + # swap a parity response into a data node + data_node = random.choice(primary_nodes[:self.policy.ec_ndata]) + parity_node = random.choice(primary_nodes[self.policy.ec_ndata:]) + (response_map[node_key(data_node)], + response_map[node_key(parity_node)]) = \ + (response_map[node_key(parity_node)], + response_map[node_key(data_node)]) + + def get_response(req): + req_key = (req['ip'], req['port']) + return response_map.pop(req_key) + + req = swob.Request.blank('/v1/a/c/o') + with capture_http_requests(get_response) as log: + resp = req.get_response(self.app) + + self.assertEqual(resp.status_int, 200) + self.assertEqual(len(log), self.policy.ec_ndata) + self.assertEqual(len(response_map), + len(primary_nodes) - self.policy.ec_ndata) + + def test_GET_with_single_missed_overwrite_does_not_need_handoff(self): + obj1 = self._make_ec_object_stub() + obj2 = self._make_ec_object_stub() + + node_frags = [ + {'obj': obj2, 'frag': 0}, + {'obj': obj2, 'frag': 1}, + {'obj': obj1, 'frag': 2}, # missed over write + {'obj': obj2, 'frag': 3}, + {'obj': obj2, 'frag': 4}, + {'obj': obj2, 'frag': 5}, + {'obj': obj2, 'frag': 6}, + {'obj': obj2, 'frag': 7}, + {'obj': obj2, 'frag': 8}, + {'obj': obj2, 'frag': 9}, + {'obj': obj2, 'frag': 10}, # parity + {'obj': obj2, 'frag': 11}, # parity + {'obj': obj2, 'frag': 12}, # parity + {'obj': obj2, 'frag': 13}, # parity + # {'obj': obj2, 'frag': 2}, # handoff (not used in this test) + ] + + fake_response = self._fake_ec_node_response(node_frags) + + req = swob.Request.blank('/v1/a/c/o') + with capture_http_requests(fake_response) as log: + resp = req.get_response(self.app) + + self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.headers['etag'], obj2['etag']) + self.assertEqual(md5(resp.body).hexdigest(), obj2['etag']) + + collected_responses = defaultdict(set) + for conn in log: + etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag'] + index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index'] + collected_responses[etag].add(index) + + # because the primary nodes are shuffled, it's possible the proxy + # didn't even notice the missed overwrite frag - but it might have + self.assertLessEqual(len(log), self.policy.ec_ndata + 1) + self.assertLessEqual(len(collected_responses), 2) + + # ... regardless we should never need to fetch more than ec_ndata + # frags for any given etag + for etag, frags in collected_responses.items(): + self.assertTrue(len(frags) <= self.policy.ec_ndata, + 'collected %s frags for etag %s' % ( + len(frags), etag)) + + def test_GET_with_many_missed_overwrite_will_need_handoff(self): + obj1 = self._make_ec_object_stub() + obj2 = self._make_ec_object_stub() + + node_frags = [ + {'obj': obj2, 'frag': 0}, + {'obj': obj2, 'frag': 1}, + {'obj': obj1, 'frag': 2}, # missed + {'obj': obj2, 'frag': 3}, + {'obj': obj2, 'frag': 4}, + {'obj': obj2, 'frag': 5}, + {'obj': obj1, 'frag': 6}, # missed + {'obj': obj2, 'frag': 7}, + {'obj': obj2, 'frag': 8}, + {'obj': obj1, 'frag': 9}, # missed + {'obj': obj1, 'frag': 10}, # missed + {'obj': obj1, 'frag': 11}, # missed + {'obj': obj2, 'frag': 12}, + {'obj': obj2, 'frag': 13}, + {'obj': obj2, 'frag': 6}, # handoff + ] + + fake_response = self._fake_ec_node_response(node_frags) + + req = swob.Request.blank('/v1/a/c/o') + with capture_http_requests(fake_response) as log: + resp = req.get_response(self.app) + + self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.headers['etag'], obj2['etag']) + self.assertEqual(md5(resp.body).hexdigest(), obj2['etag']) + + collected_responses = defaultdict(set) + for conn in log: + etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag'] + index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index'] + collected_responses[etag].add(index) + + # there's not enough of the obj2 etag on the primaries, we would + # have collected responses for both etags, and would have made + # one more request to the handoff node + self.assertEqual(len(log), self.replicas() + 1) + self.assertEqual(len(collected_responses), 2) + + # ... regardless we should never need to fetch more than ec_ndata + # frags for any given etag + for etag, frags in collected_responses.items(): + self.assertTrue(len(frags) <= self.policy.ec_ndata, + 'collected %s frags for etag %s' % ( + len(frags), etag)) + + def test_GET_with_missing_and_mixed_frags_will_dig_deep_but_succeed(self): + obj1 = self._make_ec_object_stub() + obj2 = self._make_ec_object_stub() + + node_frags = [ + {'obj': obj1, 'frag': 0}, + {'obj': obj2, 'frag': 0}, + {}, + {'obj': obj1, 'frag': 1}, + {'obj': obj2, 'frag': 1}, + {}, + {'obj': obj1, 'frag': 2}, + {'obj': obj2, 'frag': 2}, + {}, + {'obj': obj1, 'frag': 3}, + {'obj': obj2, 'frag': 3}, + {}, + {'obj': obj1, 'frag': 4}, + {'obj': obj2, 'frag': 4}, + {}, + {'obj': obj1, 'frag': 5}, + {'obj': obj2, 'frag': 5}, + {}, + {'obj': obj1, 'frag': 6}, + {'obj': obj2, 'frag': 6}, + {}, + {'obj': obj1, 'frag': 7}, + {'obj': obj2, 'frag': 7}, + {}, + {'obj': obj1, 'frag': 8}, + {'obj': obj2, 'frag': 8}, + {}, + {'obj': obj2, 'frag': 9}, + ] + + fake_response = self._fake_ec_node_response(node_frags) + + req = swob.Request.blank('/v1/a/c/o') + with capture_http_requests(fake_response) as log: + resp = req.get_response(self.app) + + self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.headers['etag'], obj2['etag']) + self.assertEqual(md5(resp.body).hexdigest(), obj2['etag']) + + collected_responses = defaultdict(set) + for conn in log: + etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag'] + index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index'] + collected_responses[etag].add(index) + + # we go exactly as long as we have to, finding two different + # etags and some 404's (i.e. collected_responses[None]) + self.assertEqual(len(log), len(node_frags)) + self.assertEqual(len(collected_responses), 3) + + # ... regardless we should never need to fetch more than ec_ndata + # frags for any given etag + for etag, frags in collected_responses.items(): + self.assertTrue(len(frags) <= self.policy.ec_ndata, + 'collected %s frags for etag %s' % ( + len(frags), etag)) + + def test_GET_with_missing_and_mixed_frags_will_dig_deep_but_stop(self): + obj1 = self._make_ec_object_stub() + obj2 = self._make_ec_object_stub() + + node_frags = [ + {'obj': obj1, 'frag': 0}, + {'obj': obj2, 'frag': 0}, + {}, + {'obj': obj1, 'frag': 1}, + {'obj': obj2, 'frag': 1}, + {}, + {'obj': obj1, 'frag': 2}, + {'obj': obj2, 'frag': 2}, + {}, + {'obj': obj1, 'frag': 3}, + {'obj': obj2, 'frag': 3}, + {}, + {'obj': obj1, 'frag': 4}, + {'obj': obj2, 'frag': 4}, + {}, + {'obj': obj1, 'frag': 5}, + {'obj': obj2, 'frag': 5}, + {}, + {'obj': obj1, 'frag': 6}, + {'obj': obj2, 'frag': 6}, + {}, + {'obj': obj1, 'frag': 7}, + {'obj': obj2, 'frag': 7}, + {}, + {'obj': obj1, 'frag': 8}, + {'obj': obj2, 'frag': 8}, + {}, + {}, + ] + + fake_response = self._fake_ec_node_response(node_frags) + + req = swob.Request.blank('/v1/a/c/o') + with capture_http_requests(fake_response) as log: + resp = req.get_response(self.app) + + self.assertEqual(resp.status_int, 404) + + collected_responses = defaultdict(set) + for conn in log: + etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag'] + index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index'] + collected_responses[etag].add(index) + + # default node_iter will exhaust at 2 * replicas + self.assertEqual(len(log), 2 * self.replicas()) + self.assertEqual(len(collected_responses), 3) + + # ... regardless we should never need to fetch more than ec_ndata + # frags for any given etag + for etag, frags in collected_responses.items(): + self.assertTrue(len(frags) <= self.policy.ec_ndata, + 'collected %s frags for etag %s' % ( + len(frags), etag)) + + def test_GET_mixed_success_with_range(self): + fragment_size = self.policy.fragment_size + + ec_stub = self._make_ec_object_stub() + frag_archives = ec_stub['frags'] + frag_archive_size = len(ec_stub['frags'][0]) + + headers = { + 'Content-Type': 'text/plain', + 'Content-Length': fragment_size, + 'Content-Range': 'bytes 0-%s/%s' % (fragment_size - 1, + frag_archive_size), + 'X-Object-Sysmeta-Ec-Content-Length': len(ec_stub['body']), + 'X-Object-Sysmeta-Ec-Etag': ec_stub['etag'], + } + responses = [ + StubResponse(206, frag_archives[0][:fragment_size], headers), + StubResponse(206, frag_archives[1][:fragment_size], headers), + StubResponse(206, frag_archives[2][:fragment_size], headers), + StubResponse(206, frag_archives[3][:fragment_size], headers), + StubResponse(206, frag_archives[4][:fragment_size], headers), + # data nodes with old frag + StubResponse(416), + StubResponse(416), + StubResponse(206, frag_archives[7][:fragment_size], headers), + StubResponse(206, frag_archives[8][:fragment_size], headers), + StubResponse(206, frag_archives[9][:fragment_size], headers), + # hopefully we ask for two more + StubResponse(206, frag_archives[10][:fragment_size], headers), + StubResponse(206, frag_archives[11][:fragment_size], headers), + ] + + def get_response(req): + return responses.pop(0) if responses else StubResponse(404) + + req = swob.Request.blank('/v1/a/c/o', headers={'Range': 'bytes=0-3'}) + with capture_http_requests(get_response) as log: + resp = req.get_response(self.app) + + self.assertEqual(resp.status_int, 206) + self.assertEqual(resp.body, 'test') + self.assertEqual(len(log), self.policy.ec_ndata + 2) + + def test_GET_with_range_unsatisfiable_mixed_success(self): + responses = [ + StubResponse(416), + StubResponse(416), + StubResponse(416), + StubResponse(416), + StubResponse(416), + StubResponse(416), + StubResponse(416), + # sneak in bogus extra responses + StubResponse(404), + StubResponse(206), + # and then just "enough" more 416's + StubResponse(416), + StubResponse(416), + StubResponse(416), + ] + + def get_response(req): + return responses.pop(0) if responses else StubResponse(404) + + req = swob.Request.blank('/v1/a/c/o', headers={ + 'Range': 'bytes=%s-' % 100000000000000}) + with capture_http_requests(get_response) as log: + resp = req.get_response(self.app) + + self.assertEqual(resp.status_int, 416) + # ec_ndata responses that must agree, plus the bogus extras + self.assertEqual(len(log), self.policy.ec_ndata + 2) + + def test_GET_mixed_ranged_responses_success(self): + segment_size = self.policy.ec_segment_size + fragment_size = self.policy.fragment_size + new_data = ('test' * segment_size)[:-492] + new_etag = md5(new_data).hexdigest() + new_archives = self._make_ec_archive_bodies(new_data) + old_data = ('junk' * segment_size)[:-492] + old_etag = md5(old_data).hexdigest() + old_archives = self._make_ec_archive_bodies(old_data) + frag_archive_size = len(new_archives[0]) + + new_headers = { + 'Content-Type': 'text/plain', + 'Content-Length': fragment_size, + 'Content-Range': 'bytes 0-%s/%s' % (fragment_size - 1, + frag_archive_size), + 'X-Object-Sysmeta-Ec-Content-Length': len(new_data), + 'X-Object-Sysmeta-Ec-Etag': new_etag, + } + old_headers = { + 'Content-Type': 'text/plain', + 'Content-Length': fragment_size, + 'Content-Range': 'bytes 0-%s/%s' % (fragment_size - 1, + frag_archive_size), + 'X-Object-Sysmeta-Ec-Content-Length': len(old_data), + 'X-Object-Sysmeta-Ec-Etag': old_etag, + } + # 7 primaries with stale frags, 3 handoffs failed to get new frags + responses = [ + StubResponse(206, old_archives[0][:fragment_size], old_headers), + StubResponse(206, new_archives[1][:fragment_size], new_headers), + StubResponse(206, old_archives[2][:fragment_size], old_headers), + StubResponse(206, new_archives[3][:fragment_size], new_headers), + StubResponse(206, old_archives[4][:fragment_size], old_headers), + StubResponse(206, new_archives[5][:fragment_size], new_headers), + StubResponse(206, old_archives[6][:fragment_size], old_headers), + StubResponse(206, new_archives[7][:fragment_size], new_headers), + StubResponse(206, old_archives[8][:fragment_size], old_headers), + StubResponse(206, new_archives[9][:fragment_size], new_headers), + StubResponse(206, old_archives[10][:fragment_size], old_headers), + StubResponse(206, new_archives[11][:fragment_size], new_headers), + StubResponse(206, old_archives[12][:fragment_size], old_headers), + StubResponse(206, new_archives[13][:fragment_size], new_headers), + StubResponse(206, new_archives[0][:fragment_size], new_headers), + StubResponse(404), + StubResponse(404), + StubResponse(206, new_archives[6][:fragment_size], new_headers), + StubResponse(404), + StubResponse(206, new_archives[10][:fragment_size], new_headers), + StubResponse(206, new_archives[12][:fragment_size], new_headers), + ] + + def get_response(req): + return responses.pop(0) if responses else StubResponse(404) + + req = swob.Request.blank('/v1/a/c/o') + with capture_http_requests(get_response) as log: + resp = req.get_response(self.app) + + self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.body, new_data[:segment_size]) + self.assertEqual(len(log), self.policy.ec_ndata + 10) + def test_GET_mismatched_fragment_archives(self): segment_size = self.policy.ec_segment_size test_data1 = ('test' * segment_size)[:-333] @@ -1429,6 +2015,34 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): self.assertEqual(1, len(error_lines)) self.assertTrue('retrying' in error_lines[0]) + def test_fix_response_HEAD(self): + headers = {'X-Object-Sysmeta-Ec-Content-Length': '10', + 'X-Object-Sysmeta-Ec-Etag': 'foo'} + + # sucsessful HEAD + responses = [(200, '', headers)] + status_codes, body_iter, headers = zip(*responses) + req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD') + with set_http_connect(*status_codes, body_iter=body_iter, + headers=headers): + resp = req.get_response(self.app) + self.assertEquals(resp.status_int, 200) + self.assertEquals(resp.body, '') + # 200OK shows original object content length + self.assertEquals(resp.headers['Content-Length'], '10') + self.assertEquals(resp.headers['Etag'], 'foo') + + # not found HEAD + responses = [(404, '', {})] * self.replicas() * 2 + status_codes, body_iter, headers = zip(*responses) + req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD') + with set_http_connect(*status_codes, body_iter=body_iter, + headers=headers): + resp = req.get_response(self.app) + self.assertEquals(resp.status_int, 404) + # 404 shows actual response body size (i.e. 0 for HEAD) + self.assertEquals(resp.headers['Content-Length'], '0') + def test_PUT_with_slow_commits(self): # It's important that this timeout be much less than the delay in # the slow commit responses so that the slow commits are not waited @@ -1441,7 +2055,7 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): codes = [FakeStatus(201, response_sleep=response_sleep) for i in range(self.replicas())] # swap out some with regular fast responses - number_of_fast_responses_needed_to_be_quick_enough = 2 + number_of_fast_responses_needed_to_be_quick_enough = 5 fast_indexes = random.sample( range(self.replicas()), number_of_fast_responses_needed_to_be_quick_enough) @@ -1455,9 +2069,118 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): start = time.time() resp = req.get_response(self.app) response_time = time.time() - start - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) self.assertTrue(response_time < response_sleep) + def test_PUT_with_less_durable_responses(self): + req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT', + body='') + + codes = [201] * self.policy.ec_nparity + codes += [503] * (self.policy.ec_ndata - 1) + random.shuffle(codes) + expect_headers = { + 'X-Obj-Metadata-Footer': 'yes', + 'X-Obj-Multiphase-Commit': 'yes' + } + with set_http_connect(*codes, expect_headers=expect_headers): + resp = req.get_response(self.app) + self.assertEqual(resp.status_int, 503) + + def test_COPY_with_ranges(self): + req = swift.common.swob.Request.blank( + '/v1/a/c/o', method='COPY', + headers={'Destination': 'c1/o', + 'Range': 'bytes=5-10'}) + # turn a real body into fragments + segment_size = self.policy.ec_segment_size + real_body = ('asdf' * segment_size)[:-10] + + # split it up into chunks + chunks = [real_body[x:x + segment_size] + for x in range(0, len(real_body), segment_size)] + + # we need only first chunk to rebuild 5-10 range + fragments = self.policy.pyeclib_driver.encode(chunks[0]) + fragment_payloads = [] + fragment_payloads.append(fragments) + + node_fragments = zip(*fragment_payloads) + self.assertEqual(len(node_fragments), self.replicas()) # sanity + headers = {'X-Object-Sysmeta-Ec-Content-Length': str(len(real_body))} + responses = [(200, ''.join(node_fragments[i]), headers) + for i in range(POLICIES.default.ec_ndata)] + responses += [(201, '', {})] * self.obj_ring.replicas + status_codes, body_iter, headers = zip(*responses) + expect_headers = { + 'X-Obj-Metadata-Footer': 'yes', + 'X-Obj-Multiphase-Commit': 'yes' + } + with set_http_connect(*status_codes, body_iter=body_iter, + headers=headers, expect_headers=expect_headers): + resp = req.get_response(self.app) + self.assertEqual(resp.status_int, 201) + + def test_GET_with_invalid_ranges(self): + # reall body size is segment_size - 10 (just 1 segment) + segment_size = self.policy.ec_segment_size + real_body = ('a' * segment_size)[:-10] + + # range is out of real body but in segment size + self._test_invalid_ranges('GET', real_body, + segment_size, '%s-' % (segment_size - 10)) + # range is out of both real body and segment size + self._test_invalid_ranges('GET', real_body, + segment_size, '%s-' % (segment_size + 10)) + + def test_COPY_with_invalid_ranges(self): + # reall body size is segment_size - 10 (just 1 segment) + segment_size = self.policy.ec_segment_size + real_body = ('a' * segment_size)[:-10] + + # range is out of real body but in segment size + self._test_invalid_ranges('COPY', real_body, + segment_size, '%s-' % (segment_size - 10)) + # range is out of both real body and segment size + self._test_invalid_ranges('COPY', real_body, + segment_size, '%s-' % (segment_size + 10)) + + def _test_invalid_ranges(self, method, real_body, segment_size, req_range): + # make a request with range starts from more than real size. + req = swift.common.swob.Request.blank( + '/v1/a/c/o', method=method, + headers={'Destination': 'c1/o', + 'Range': 'bytes=%s' % (req_range)}) + + fragments = self.policy.pyeclib_driver.encode(real_body) + fragment_payloads = [fragments] + + node_fragments = zip(*fragment_payloads) + self.assertEqual(len(node_fragments), self.replicas()) # sanity + headers = {'X-Object-Sysmeta-Ec-Content-Length': str(len(real_body))} + start = int(req_range.split('-')[0]) + self.assertTrue(start >= 0) # sanity + title, exp = swob.RESPONSE_REASONS[416] + range_not_satisfiable_body = \ + '

%s

%s

' % (title, exp) + if start >= segment_size: + responses = [(416, range_not_satisfiable_body, headers) + for i in range(POLICIES.default.ec_ndata)] + else: + responses = [(200, ''.join(node_fragments[i]), headers) + for i in range(POLICIES.default.ec_ndata)] + status_codes, body_iter, headers = zip(*responses) + expect_headers = { + 'X-Obj-Metadata-Footer': 'yes', + 'X-Obj-Multiphase-Commit': 'yes' + } + with set_http_connect(*status_codes, body_iter=body_iter, + headers=headers, expect_headers=expect_headers): + resp = req.get_response(self.app) + self.assertEquals(resp.status_int, 416) + self.assertEquals(resp.content_length, len(range_not_satisfiable_body)) + self.assertEquals(resp.body, range_not_satisfiable_body) + if __name__ == '__main__': unittest.main() diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index d03a35d3a4..4081f3a025 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import print_function import email.parser import logging import math @@ -25,7 +26,6 @@ import unittest from contextlib import closing, contextmanager, nested from gzip import GzipFile from shutil import rmtree -from StringIO import StringIO import gc import time from textwrap import dedent @@ -42,6 +42,8 @@ import random import mock from eventlet import sleep, spawn, wsgi, listen, Timeout +from six import BytesIO +from six import StringIO from six.moves import range from swift.common.utils import hash_path, json, storage_directory, \ parse_content_type, iter_multipart_mime_documents, public @@ -55,7 +57,7 @@ from swift.proxy.controllers.obj import ReplicatedObjectController from swift.account import server as account_server from swift.container import server as container_server from swift.obj import server as object_server -from swift.common.middleware import proxy_logging +from swift.common.middleware import proxy_logging, versioned_writes from swift.common.middleware.acl import parse_acl, format_acl from swift.common.exceptions import ChunkReadTimeout, DiskFileNotExist, \ APIVersionError @@ -69,7 +71,7 @@ from swift.proxy.controllers.base import get_container_memcache_key, \ import swift.proxy.controllers import swift.proxy.controllers.obj from swift.common.swob import Request, Response, HTTPUnauthorized, \ - HTTPException, HTTPForbidden, HeaderKeyDict + HTTPException, HeaderKeyDict from swift.common import storage_policy from swift.common.storage_policy import StoragePolicy, ECStoragePolicy, \ StoragePolicyCollection, POLICIES @@ -106,7 +108,7 @@ def do_setup(the_object_server): conf = {'devices': _testdir, 'swift_dir': _testdir, 'mount_check': 'false', 'allowed_headers': 'content-encoding, x-object-manifest, content-disposition, foo', - 'allow_versions': 'True'} + 'allow_versions': 't'} prolis = listen(('localhost', 0)) acc1lis = listen(('localhost', 0)) acc2lis = listen(('localhost', 0)) @@ -448,7 +450,7 @@ class TestController(unittest.TestCase): self.controller.transfer_headers(src_headers, dst_headers) expected_headers = {'x-base-meta-owner': '', 'x-base-meta-size': '151M'} - self.assertEquals(dst_headers, expected_headers) + self.assertEqual(dst_headers, expected_headers) def check_account_info_return(self, partition, nodes, is_none=False): if is_none: @@ -463,26 +465,26 @@ class TestController(unittest.TestCase): set_http_connect(200, count=123) partition, nodes, count = \ self.controller.account_info(self.account) - self.assertEquals(count, 123) + self.assertEqual(count, 123) with save_globals(): set_http_connect(200, count='123') partition, nodes, count = \ self.controller.account_info(self.account) - self.assertEquals(count, 123) + self.assertEqual(count, 123) with save_globals(): cache_key = get_account_memcache_key(self.account) account_info = {'status': 200, 'container_count': 1234} self.memcache.set(cache_key, account_info) partition, nodes, count = \ self.controller.account_info(self.account) - self.assertEquals(count, 1234) + self.assertEqual(count, 1234) with save_globals(): cache_key = get_account_memcache_key(self.account) account_info = {'status': 200, 'container_count': '1234'} self.memcache.set(cache_key, account_info) partition, nodes, count = \ self.controller.account_info(self.account) - self.assertEquals(count, 1234) + self.assertEqual(count, 1234) def test_make_requests(self): with save_globals(): @@ -501,7 +503,7 @@ class TestController(unittest.TestCase): partition, nodes, count = \ self.controller.account_info(self.account, self.request) self.check_account_info_return(partition, nodes) - self.assertEquals(count, 12345) + self.assertEqual(count, 12345) # Test the internal representation in memcache # 'container_count' changed from int to str @@ -512,14 +514,14 @@ class TestController(unittest.TestCase): 'bytes': None, 'meta': {}, 'sysmeta': {}} - self.assertEquals(container_info, - self.memcache.get(cache_key)) + self.assertEqual(container_info, + self.memcache.get(cache_key)) set_http_connect() partition, nodes, count = \ self.controller.account_info(self.account, self.request) self.check_account_info_return(partition, nodes) - self.assertEquals(count, 12345) + self.assertEqual(count, 12345) # tests if 404 is cached and used def test_account_info_404(self): @@ -528,7 +530,7 @@ class TestController(unittest.TestCase): partition, nodes, count = \ self.controller.account_info(self.account, self.request) self.check_account_info_return(partition, nodes, True) - self.assertEquals(count, None) + self.assertEqual(count, None) # Test the internal representation in memcache # 'container_count' changed from 0 to None @@ -539,14 +541,14 @@ class TestController(unittest.TestCase): 'bytes': None, 'meta': {}, 'sysmeta': {}} - self.assertEquals(account_info, - self.memcache.get(cache_key)) + self.assertEqual(account_info, + self.memcache.get(cache_key)) set_http_connect() partition, nodes, count = \ self.controller.account_info(self.account, self.request) self.check_account_info_return(partition, nodes, True) - self.assertEquals(count, None) + self.assertEqual(count, None) # tests if some http status codes are not cached def test_account_info_no_cache(self): @@ -556,7 +558,7 @@ class TestController(unittest.TestCase): self.controller.account_info(self.account, self.request) self.assertEqual(len(self.memcache.keys()), 0) self.check_account_info_return(partition, nodes, True) - self.assertEquals(count, None) + self.assertEqual(count, None) with save_globals(): # We cache if we have two 404 responses - fail if only one @@ -572,7 +574,7 @@ class TestController(unittest.TestCase): partition, nodes, count = \ self.controller.account_info(self.account, self.request) self.check_account_info_return(partition, nodes, is_none=True) - self.assertEquals(count, None) + self.assertEqual(count, None) def check_container_info_return(self, ret, is_none=False): if is_none: @@ -613,7 +615,7 @@ class TestController(unittest.TestCase): self.container) cache_value = self.memcache.get(cache_key) self.assertTrue(isinstance(cache_value, dict)) - self.assertEquals(200, cache_value.get('status')) + self.assertEqual(200, cache_value.get('status')) set_http_connect() ret = self.controller.container_info( @@ -636,7 +638,7 @@ class TestController(unittest.TestCase): self.container) cache_value = self.memcache.get(cache_key) self.assertTrue(isinstance(cache_value, dict)) - self.assertEquals(404, cache_value.get('status')) + self.assertEqual(404, cache_value.get('status')) set_http_connect() ret = self.controller.container_info( @@ -652,7 +654,7 @@ class TestController(unittest.TestCase): self.container) cache_value = self.memcache.get(cache_key) self.assertTrue(isinstance(cache_value, dict)) - self.assertEquals(404, cache_value.get('status')) + self.assertEqual(404, cache_value.get('status')) set_http_connect() ret = self.controller.container_info( @@ -716,7 +718,7 @@ class TestProxyServer(unittest.TestCase): req = Request.blank('/v1/account', environ={'REQUEST_METHOD': 'HEAD'}) app.update_request(req) resp = app.handle_request(req) - self.assertEquals(resp.status_int, 500) + self.assertEqual(resp.status_int, 500) def test_internal_method_request(self): baseapp = proxy_server.Application({}, @@ -725,7 +727,7 @@ class TestProxyServer(unittest.TestCase): account_ring=FakeRing()) resp = baseapp.handle_request( Request.blank('/v1/a', environ={'REQUEST_METHOD': '__init__'})) - self.assertEquals(resp.status, '405 Method Not Allowed') + self.assertEqual(resp.status, '405 Method Not Allowed') def test_inexistent_method_request(self): baseapp = proxy_server.Application({}, @@ -734,7 +736,7 @@ class TestProxyServer(unittest.TestCase): account_ring=FakeRing()) resp = baseapp.handle_request( Request.blank('/v1/a', environ={'REQUEST_METHOD': '!invalid'})) - self.assertEquals(resp.status, '405 Method Not Allowed') + self.assertEqual(resp.status, '405 Method Not Allowed') def test_calls_authorize_allow(self): called = [False] @@ -750,7 +752,7 @@ class TestProxyServer(unittest.TestCase): req.environ['swift.authorize'] = authorize app.update_request(req) app.handle_request(req) - self.assert_(called[0]) + self.assertTrue(called[0]) def test_calls_authorize_deny(self): called = [False] @@ -765,7 +767,7 @@ class TestProxyServer(unittest.TestCase): req.environ['swift.authorize'] = authorize app.update_request(req) app.handle_request(req) - self.assert_(called[0]) + self.assertTrue(called[0]) def test_negative_content_length(self): swift_dir = mkdtemp() @@ -775,12 +777,12 @@ class TestProxyServer(unittest.TestCase): FakeRing(), FakeRing()) resp = baseapp.handle_request( Request.blank('/', environ={'CONTENT_LENGTH': '-1'})) - self.assertEquals(resp.status, '400 Bad Request') - self.assertEquals(resp.body, 'Invalid Content-Length') + self.assertEqual(resp.status, '400 Bad Request') + self.assertEqual(resp.body, 'Invalid Content-Length') resp = baseapp.handle_request( Request.blank('/', environ={'CONTENT_LENGTH': '-123'})) - self.assertEquals(resp.status, '400 Bad Request') - self.assertEquals(resp.body, 'Invalid Content-Length') + self.assertEqual(resp.status, '400 Bad Request') + self.assertEqual(resp.body, 'Invalid Content-Length') finally: rmtree(swift_dir, ignore_errors=True) @@ -833,7 +835,7 @@ class TestProxyServer(unittest.TestCase): resp = baseapp.handle_request( Request.blank('/v1/a/c/o', environ={'HTTP_HOST': 'invalid_host.com'})) - self.assertEquals(resp.status, '403 Forbidden') + self.assertEqual(resp.status, '403 Forbidden') finally: rmtree(swift_dir, ignore_errors=True) @@ -842,27 +844,27 @@ class TestProxyServer(unittest.TestCase): FakeMemcache(), container_ring=FakeRing(), account_ring=FakeRing()) - self.assertEquals(baseapp.node_timings, {}) + self.assertEqual(baseapp.node_timings, {}) req = Request.blank('/v1/account', environ={'REQUEST_METHOD': 'HEAD'}) baseapp.update_request(req) resp = baseapp.handle_request(req) - self.assertEquals(resp.status_int, 503) # couldn't connect to anything + self.assertEqual(resp.status_int, 503) # couldn't connect to anything exp_timings = {} - self.assertEquals(baseapp.node_timings, exp_timings) + self.assertEqual(baseapp.node_timings, exp_timings) times = [time.time()] exp_timings = {'127.0.0.1': (0.1, times[0] + baseapp.timing_expiry)} with mock.patch('swift.proxy.server.time', lambda: times.pop(0)): baseapp.set_node_timing({'ip': '127.0.0.1'}, 0.1) - self.assertEquals(baseapp.node_timings, exp_timings) + self.assertEqual(baseapp.node_timings, exp_timings) nodes = [{'ip': '127.0.0.1'}, {'ip': '127.0.0.2'}, {'ip': '127.0.0.3'}] with mock.patch('swift.proxy.server.shuffle', lambda l: l): res = baseapp.sort_nodes(nodes) exp_sorting = [{'ip': '127.0.0.2'}, {'ip': '127.0.0.3'}, {'ip': '127.0.0.1'}] - self.assertEquals(res, exp_sorting) + self.assertEqual(res, exp_sorting) def test_node_affinity(self): baseapp = proxy_server.Application({'sorting_method': 'affinity', @@ -877,7 +879,7 @@ class TestProxyServer(unittest.TestCase): app_sorted = baseapp.sort_nodes(nodes) exp_sorted = [{'region': 1, 'zone': 2, 'ip': '127.0.0.2'}, {'region': 2, 'zone': 1, 'ip': '127.0.0.1'}] - self.assertEquals(exp_sorted, app_sorted) + self.assertEqual(exp_sorted, app_sorted) def test_info_defaults(self): app = proxy_server.Application({}, FakeMemcache(), @@ -1118,7 +1120,7 @@ class TestProxyServerLoading(unittest.TestCase): # all rings exist, app should load loadapp(conf_path) for policy in POLICIES: - self.assert_(policy.object_ring) + self.assertTrue(policy.object_ring) @patch_policies([StoragePolicy(0, 'zero', True, @@ -1131,6 +1133,8 @@ class TestObjectController(unittest.TestCase): logger=debug_logger('proxy-ut'), account_ring=FakeRing(), container_ring=FakeRing()) + # clear proxy logger result for each test + _test_servers[0].logger._clear() def tearDown(self): self.app.account_ring.set_replicas(3) @@ -1171,7 +1175,7 @@ class TestObjectController(unittest.TestCase): res = method(req) except HTTPException as res: pass - self.assertEquals(res.status_int, expected) + self.assertEqual(res.status_int, expected) # repeat test set_http_connect(*statuses, **kwargs) @@ -1184,7 +1188,7 @@ class TestObjectController(unittest.TestCase): res = method(req) except HTTPException as res: pass - self.assertEquals(res.status_int, expected) + self.assertEqual(res.status_int, expected) @unpatch_policies def test_policy_IO(self): @@ -1300,7 +1304,7 @@ class TestObjectController(unittest.TestCase): req = Request.blank( '/v1/a/c1/wrong-o', environ={'REQUEST_METHOD': 'PUT', - 'wsgi.input': StringIO("hello")}, + 'wsgi.input': BytesIO(b"hello")}, headers={'Content-Type': 'text/plain', 'Content-Length': '5', 'X-Backend-Storage-Policy-Index': '2'}) @@ -1350,7 +1354,7 @@ class TestObjectController(unittest.TestCase): try: df.open() except DiskFileNotExist as e: - self.assert_(float(e.timestamp) > 0) + self.assertTrue(float(e.timestamp) > 0) else: self.fail('did not raise DiskFileNotExist') @@ -1455,6 +1459,46 @@ class TestObjectController(unittest.TestCase): 'bytes 4123-4523/5800') self.assertEqual(second_range_body, obj[4123:4524]) + @unpatch_policies + def test_GET_bad_range_zero_byte(self): + prolis = _test_sockets[0] + prosrv = _test_servers[0] + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + + path = '/v1/a/c/o.zerobyte' + fd.write('PUT %s HTTP/1.1\r\n' + 'Host: localhost\r\n' + 'Connection: close\r\n' + 'X-Storage-Token: t\r\n' + 'Content-Length: 0\r\n' + 'Content-Type: application/octet-stream\r\n' + '\r\n' % (path,)) + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 201' + self.assertEqual(headers[:len(exp)], exp) + + # bad byte-range + req = Request.blank( + path, + environ={'REQUEST_METHOD': 'GET'}, + headers={'Content-Type': 'application/octet-stream', + 'Range': 'bytes=spaghetti-carbonara'}) + res = req.get_response(prosrv) + self.assertEqual(res.status_int, 200) + self.assertEqual(res.body, '') + + # not a byte-range + req = Request.blank( + path, + environ={'REQUEST_METHOD': 'GET'}, + headers={'Content-Type': 'application/octet-stream', + 'Range': 'Kotta'}) + res = req.get_response(prosrv) + self.assertEqual(res.status_int, 200) + self.assertEqual(res.body, '') + @unpatch_policies def test_GET_ranges_resuming(self): prolis = _test_sockets[0] @@ -1735,7 +1779,7 @@ class TestObjectController(unittest.TestCase): # go to disk to make sure it's there and all erasure-coded partition, nodes = policy.object_ring.get_nodes('a', 'ec-con', 'o1') conf = {'devices': _testdir, 'mount_check': 'false'} - df_mgr = diskfile.DiskFileManager(conf, FakeLogger()) + df_mgr = diskfile.DiskFileRouter(conf, FakeLogger())[policy] got_pieces = set() got_indices = set() @@ -1823,7 +1867,7 @@ class TestObjectController(unittest.TestCase): 'a', 'ec-con', 'o2') conf = {'devices': _testdir, 'mount_check': 'false'} - df_mgr = diskfile.DiskFileManager(conf, FakeLogger()) + df_mgr = diskfile.DiskFileRouter(conf, FakeLogger())[ec_policy] got_durable = [] fragment_archives = [] @@ -1877,6 +1921,7 @@ class TestObjectController(unittest.TestCase): @unpatch_policies def test_PUT_ec_object_etag_mismatch(self): + ec_policy = POLICIES[3] self.put_container("ec", "ec-con") obj = '90:6A:02:60:B1:08-96da3e706025537fc42464916427727e' @@ -1902,10 +1947,7 @@ class TestObjectController(unittest.TestCase): 'a', 'ec-con', 'o3') conf = {'devices': _testdir, 'mount_check': 'false'} - partition, nodes = prosrv.get_object_ring(3).get_nodes( - 'a', 'ec-con', 'o3') - conf = {'devices': _testdir, 'mount_check': 'false'} - df_mgr = diskfile.DiskFileManager(conf, FakeLogger()) + df_mgr = diskfile.DiskFileRouter(conf, FakeLogger())[ec_policy] for node in nodes: df = df_mgr.get_diskfile(node['device'], partition, @@ -1914,6 +1956,7 @@ class TestObjectController(unittest.TestCase): @unpatch_policies def test_PUT_ec_fragment_archive_etag_mismatch(self): + ec_policy = POLICIES[3] self.put_container("ec", "ec-con") # Cause a hash mismatch by feeding one particular MD5 hasher some @@ -1952,11 +1995,7 @@ class TestObjectController(unittest.TestCase): 'a', 'ec-con', 'pimento') conf = {'devices': _testdir, 'mount_check': 'false'} - partition, nodes = prosrv.get_object_ring(3).get_nodes( - 'a', 'ec-con', 'pimento') - conf = {'devices': _testdir, 'mount_check': 'false'} - - df_mgr = diskfile.DiskFileManager(conf, FakeLogger()) + df_mgr = diskfile.DiskFileRouter(conf, FakeLogger())[ec_policy] found = 0 for node in nodes: @@ -1964,9 +2003,13 @@ class TestObjectController(unittest.TestCase): 'a', 'ec-con', 'pimento', policy=POLICIES[3]) try: - df.open() + # diskfile open won't succeed because no durable was written, + # so look under the hood for data files. + files = os.listdir(df._datadir) + num_data_files = len([f for f in files if f.endswith('.data')]) + self.assertEqual(1, num_data_files) found += 1 - except DiskFileNotExist: + except OSError: pass self.assertEqual(found, 2) @@ -2014,6 +2057,7 @@ class TestObjectController(unittest.TestCase): obj = '0123456' * 11 * 17 prolis = _test_sockets[0] + prosrv = _test_servers[0] sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() fd.write('PUT /v1/a/ec-con/go-get-it HTTP/1.1\r\n' @@ -2053,6 +2097,10 @@ class TestObjectController(unittest.TestCase): break gotten_obj += buf self.assertEqual(gotten_obj, obj) + error_lines = prosrv.logger.get_lines_for_level('error') + warn_lines = prosrv.logger.get_lines_for_level('warning') + self.assertEquals(len(error_lines), 0) # sanity + self.assertEquals(len(warn_lines), 0) # sanity @unpatch_policies def test_conditional_GET_ec(self): @@ -2078,7 +2126,7 @@ class TestObjectController(unittest.TestCase): exp = 'HTTP/1.1 201' self.assertEqual(headers[:len(exp)], exp) - for verb in ('GET', 'HEAD'): + for verb, body in (('GET', obj), ('HEAD', '')): # If-Match req = Request.blank( '/v1/a/ec-con/conditionals', @@ -2086,6 +2134,7 @@ class TestObjectController(unittest.TestCase): headers={'If-Match': etag}) resp = req.get_response(prosrv) self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.body, body) req = Request.blank( '/v1/a/ec-con/conditionals', @@ -2100,6 +2149,7 @@ class TestObjectController(unittest.TestCase): headers={'If-Match': "*"}) resp = req.get_response(prosrv) self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.body, body) # If-None-Match req = Request.blank( @@ -2115,6 +2165,7 @@ class TestObjectController(unittest.TestCase): headers={'If-None-Match': not_etag}) resp = req.get_response(prosrv) self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.body, body) req = Request.blank( '/v1/a/ec-con/conditionals', @@ -2122,6 +2173,10 @@ class TestObjectController(unittest.TestCase): headers={'If-None-Match': "*"}) resp = req.get_response(prosrv) self.assertEqual(resp.status_int, 304) + error_lines = prosrv.logger.get_lines_for_level('error') + warn_lines = prosrv.logger.get_lines_for_level('warning') + self.assertEquals(len(error_lines), 0) # sanity + self.assertEquals(len(warn_lines), 0) # sanity @unpatch_policies def test_GET_ec_big(self): @@ -2135,6 +2190,7 @@ class TestObjectController(unittest.TestCase): "object is too small for proper testing") prolis = _test_sockets[0] + prosrv = _test_servers[0] sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() fd.write('PUT /v1/a/ec-con/big-obj-get HTTP/1.1\r\n' @@ -2176,6 +2232,10 @@ class TestObjectController(unittest.TestCase): # of garbage and demolishes your terminal's scrollback buffer. self.assertEqual(len(gotten_obj), len(obj)) self.assertEqual(gotten_obj, obj) + error_lines = prosrv.logger.get_lines_for_level('error') + warn_lines = prosrv.logger.get_lines_for_level('warning') + self.assertEquals(len(error_lines), 0) # sanity + self.assertEquals(len(warn_lines), 0) # sanity @unpatch_policies def test_GET_ec_failure_handling(self): @@ -2260,6 +2320,7 @@ class TestObjectController(unittest.TestCase): obj = '0123456' * 11 * 17 prolis = _test_sockets[0] + prosrv = _test_servers[0] sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() fd.write('PUT /v1/a/ec-con/go-head-it HTTP/1.1\r\n' @@ -2291,12 +2352,17 @@ class TestObjectController(unittest.TestCase): self.assertEqual(str(len(obj)), headers['Content-Length']) self.assertEqual(md5(obj).hexdigest(), headers['Etag']) self.assertEqual('chartreuse', headers['X-Object-Meta-Color']) + error_lines = prosrv.logger.get_lines_for_level('error') + warn_lines = prosrv.logger.get_lines_for_level('warning') + self.assertEquals(len(error_lines), 0) # sanity + self.assertEquals(len(warn_lines), 0) # sanity @unpatch_policies def test_GET_ec_404(self): self.put_container("ec", "ec-con") prolis = _test_sockets[0] + prosrv = _test_servers[0] sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() fd.write('GET /v1/a/ec-con/yes-we-have-no-bananas HTTP/1.1\r\n' @@ -2308,12 +2374,17 @@ class TestObjectController(unittest.TestCase): headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 404' self.assertEqual(headers[:len(exp)], exp) + error_lines = prosrv.logger.get_lines_for_level('error') + warn_lines = prosrv.logger.get_lines_for_level('warning') + self.assertEquals(len(error_lines), 0) # sanity + self.assertEquals(len(warn_lines), 0) # sanity @unpatch_policies def test_HEAD_ec_404(self): self.put_container("ec", "ec-con") prolis = _test_sockets[0] + prosrv = _test_servers[0] sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() fd.write('HEAD /v1/a/ec-con/yes-we-have-no-bananas HTTP/1.1\r\n' @@ -2325,6 +2396,10 @@ class TestObjectController(unittest.TestCase): headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 404' self.assertEqual(headers[:len(exp)], exp) + error_lines = prosrv.logger.get_lines_for_level('error') + warn_lines = prosrv.logger.get_lines_for_level('warning') + self.assertEquals(len(error_lines), 0) # sanity + self.assertEquals(len(warn_lines), 0) # sanity def test_PUT_expect_header_zero_content_length(self): test_errors = [] @@ -2710,163 +2785,6 @@ class TestObjectController(unittest.TestCase): exp = 'HTTP/1.1 200' self.assertEqual(headers[:len(exp)], exp) - def test_expirer_DELETE_on_versioned_object(self): - test_errors = [] - - def test_connect(ipaddr, port, device, partition, method, path, - headers=None, query_string=None): - if method == 'DELETE': - if 'x-if-delete-at' in headers or 'X-If-Delete-At' in headers: - test_errors.append('X-If-Delete-At in headers') - - body = json.dumps( - [{"name": "001o/1", - "hash": "x", - "bytes": 0, - "content_type": "text/plain", - "last_modified": "1970-01-01T00:00:01.000000"}]) - body_iter = ('', '', body, '', '', '', '', '', '', '', '', '', '', '') - with save_globals(): - controller = ReplicatedObjectController( - self.app, 'a', 'c', 'o') - # HEAD HEAD GET GET HEAD GET GET GET PUT PUT - # PUT DEL DEL DEL - set_http_connect(200, 200, 200, 200, 200, 200, 200, 200, 201, 201, - 201, 204, 204, 204, - give_connect=test_connect, - body_iter=body_iter, - headers={'x-versions-location': 'foo'}) - self.app.memcache.store = {} - req = Request.blank('/v1/a/c/o', - headers={'X-If-Delete-At': 1}, - environ={'REQUEST_METHOD': 'DELETE'}) - self.app.update_request(req) - controller.DELETE(req) - self.assertEquals(test_errors, []) - - @patch_policies([ - StoragePolicy(0, 'zero', False, object_ring=FakeRing()), - StoragePolicy(1, 'one', True, object_ring=FakeRing()) - ]) - def test_DELETE_on_expired_versioned_object(self): - # reset the router post patch_policies - self.app.obj_controller_router = proxy_server.ObjectControllerRouter() - methods = set() - authorize_call_count = [0] - - def test_connect(ipaddr, port, device, partition, method, path, - headers=None, query_string=None): - methods.add((method, path)) - - def fake_container_info(account, container, req): - return {'status': 200, 'sync_key': None, - 'meta': {}, 'cors': {'allow_origin': None, - 'expose_headers': None, - 'max_age': None}, - 'sysmeta': {}, 'read_acl': None, 'object_count': None, - 'write_acl': None, 'versions': 'foo', - 'partition': 1, 'bytes': None, 'storage_policy': '1', - 'nodes': [{'zone': 0, 'ip': '10.0.0.0', 'region': 0, - 'id': 0, 'device': 'sda', 'port': 1000}, - {'zone': 1, 'ip': '10.0.0.1', 'region': 1, - 'id': 1, 'device': 'sdb', 'port': 1001}, - {'zone': 2, 'ip': '10.0.0.2', 'region': 0, - 'id': 2, 'device': 'sdc', 'port': 1002}]} - - def fake_list_iter(container, prefix, env): - object_list = [{'name': '1'}, {'name': '2'}, {'name': '3'}] - for obj in object_list: - yield obj - - def fake_authorize(req): - authorize_call_count[0] += 1 - return None # allow the request - - with save_globals(): - controller = ReplicatedObjectController( - self.app, 'a', 'c', 'o') - controller.container_info = fake_container_info - controller._listing_iter = fake_list_iter - set_http_connect(404, 404, 404, # get for the previous version - 200, 200, 200, # get for the pre-previous - 201, 201, 201, # put move the pre-previous - 204, 204, 204, # delete for the pre-previous - give_connect=test_connect) - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'DELETE', - 'swift.authorize': fake_authorize}) - - self.app.memcache.store = {} - self.app.update_request(req) - controller.DELETE(req) - exp_methods = [('GET', '/a/foo/3'), - ('GET', '/a/foo/2'), - ('PUT', '/a/c/o'), - ('DELETE', '/a/foo/2')] - self.assertEquals(set(exp_methods), (methods)) - self.assertEquals(authorize_call_count[0], 2) - - @patch_policies([ - StoragePolicy(0, 'zero', False, object_ring=FakeRing()), - StoragePolicy(1, 'one', True, object_ring=FakeRing()) - ]) - def test_denied_DELETE_of_versioned_object(self): - """ - Verify that a request with read access to a versions container - is unable to cause any write operations on the versioned container. - """ - # reset the router post patch_policies - self.app.obj_controller_router = proxy_server.ObjectControllerRouter() - methods = set() - authorize_call_count = [0] - - def test_connect(ipaddr, port, device, partition, method, path, - headers=None, query_string=None): - methods.add((method, path)) - - def fake_container_info(account, container, req): - return {'status': 200, 'sync_key': None, - 'meta': {}, 'cors': {'allow_origin': None, - 'expose_headers': None, - 'max_age': None}, - 'sysmeta': {}, 'read_acl': None, 'object_count': None, - 'write_acl': None, 'versions': 'foo', - 'partition': 1, 'bytes': None, 'storage_policy': '1', - 'nodes': [{'zone': 0, 'ip': '10.0.0.0', 'region': 0, - 'id': 0, 'device': 'sda', 'port': 1000}, - {'zone': 1, 'ip': '10.0.0.1', 'region': 1, - 'id': 1, 'device': 'sdb', 'port': 1001}, - {'zone': 2, 'ip': '10.0.0.2', 'region': 0, - 'id': 2, 'device': 'sdc', 'port': 1002}]} - - def fake_list_iter(container, prefix, env): - object_list = [{'name': '1'}, {'name': '2'}, {'name': '3'}] - for obj in object_list: - yield obj - - def fake_authorize(req): - # deny write access - authorize_call_count[0] += 1 - return HTTPForbidden(req) # allow the request - - with save_globals(): - controller = ReplicatedObjectController(self.app, 'a', 'c', 'o') - controller.container_info = fake_container_info - # patching _listing_iter simulates request being authorized - # to list versions container - controller._listing_iter = fake_list_iter - set_http_connect(give_connect=test_connect) - req = Request.blank('/v1/a/c/o', - environ={'REQUEST_METHOD': 'DELETE', - 'swift.authorize': fake_authorize}) - - self.app.memcache.store = {} - self.app.update_request(req) - resp = controller.DELETE(req) - self.assertEqual(403, resp.status_int) - self.assertFalse(methods, methods) - self.assertEquals(authorize_call_count[0], 1) - def test_PUT_auto_content_type(self): with save_globals(): controller = ReplicatedObjectController( @@ -2879,8 +2797,8 @@ class TestObjectController(unittest.TestCase): # servers) set_http_connect(201, 201, 201, 201, 201, give_content_type=lambda content_type: - self.assertEquals(content_type, - next(expected))) + self.assertEqual(content_type, + next(expected))) # We need into include a transfer-encoding to get past # constraints.check_object_creation() req = Request.blank('/v1/a/c/%s' % filename, {}, @@ -2890,7 +2808,7 @@ class TestObjectController(unittest.TestCase): res = controller.PUT(req) # If we don't check the response here we could miss problems # in PUT() - self.assertEquals(res.status_int, 201) + self.assertEqual(res.status_int, 201) test_content_type('test.jpg', iter(['', '', 'image/jpeg', 'image/jpeg', 'image/jpeg'])) @@ -2907,10 +2825,10 @@ class TestObjectController(unittest.TestCase): proxy_server.Application({'swift_dir': swift_dir}, FakeMemcache(), FakeLogger(), FakeRing(), FakeRing()) - self.assertEquals(proxy_server.mimetypes.guess_type('blah.foo')[0], - 'foo/bar') - self.assertEquals(proxy_server.mimetypes.guess_type('blah.jpg')[0], - 'image/jpeg') + self.assertEqual(proxy_server.mimetypes.guess_type('blah.foo')[0], + 'foo/bar') + self.assertEqual(proxy_server.mimetypes.guess_type('blah.jpg')[0], + 'image/jpeg') finally: rmtree(swift_dir, ignore_errors=True) @@ -2927,7 +2845,7 @@ class TestObjectController(unittest.TestCase): self.app.memcache.store = {} res = controller.PUT(req) expected = str(expected) - self.assertEquals(res.status[:len(expected)], expected) + self.assertEqual(res.status[:len(expected)], expected) test_status_map((200, 200, 201, 201, 201), 201) test_status_map((200, 200, 201, 201, 500), 201) test_status_map((200, 200, 204, 404, 404), 404) @@ -2950,7 +2868,7 @@ class TestObjectController(unittest.TestCase): except HTTPException as res: pass expected = str(expected) - self.assertEquals(res.status[:len(expected)], expected) + self.assertEqual(res.status[:len(expected)], expected) test_status_map((200, 200, 201, 201, -1), 201) # connect exc # connect errors test_status_map((200, 200, Timeout(), 201, 201, ), 201) @@ -2982,7 +2900,7 @@ class TestObjectController(unittest.TestCase): except HTTPException as res: pass expected = str(expected) - self.assertEquals(res.status[:len(expected)], expected) + self.assertEqual(res.status[:len(expected)], expected) test_status_map((200, 200, 201, -1, 201), 201) test_status_map((200, 200, 201, -1, -1), 503) test_status_map((200, 200, 503, 503, -1), 503) @@ -2997,7 +2915,7 @@ class TestObjectController(unittest.TestCase): 'Content-Type': 'foo/bar'}) self.app.update_request(req) res = controller.PUT(req) - self.assertEquals(res.status_int, 413) + self.assertEqual(res.status_int, 413) def test_PUT_bad_content_type(self): with save_globals(): @@ -3008,7 +2926,7 @@ class TestObjectController(unittest.TestCase): 'Content-Length': 0, 'Content-Type': 'foo/bar;swift_hey=45'}) self.app.update_request(req) res = controller.PUT(req) - self.assertEquals(res.status_int, 400) + self.assertEqual(res.status_int, 400) def test_PUT_getresponse_exceptions(self): @@ -3027,8 +2945,8 @@ class TestObjectController(unittest.TestCase): except HTTPException as res: pass expected = str(expected) - self.assertEquals(res.status[:len(str(expected))], - str(expected)) + self.assertEqual(res.status[:len(str(expected))], + str(expected)) test_status_map((200, 200, 201, 201, -1), 201) test_status_map((200, 200, 201, -1, -1), 503) test_status_map((200, 200, 503, 503, -1), 503) @@ -3045,7 +2963,7 @@ class TestObjectController(unittest.TestCase): self.app.update_request(req) res = req.get_response(self.app) expected = str(expected) - self.assertEquals(res.status[:len(expected)], expected) + self.assertEqual(res.status[:len(expected)], expected) test_status_map((200, 200, 202, 202, 202), 202) test_status_map((200, 200, 202, 202, 500), 202) test_status_map((200, 200, 202, 500, 500), 503) @@ -3195,7 +3113,7 @@ class TestObjectController(unittest.TestCase): self.app.update_request(req) res = req.get_response(self.app) expected = str(expected) - self.assertEquals(res.status[:len(expected)], expected) + self.assertEqual(res.status[:len(expected)], expected) test_status_map((200, 200, 200, 200, 200, 202, 202, 202), 202) test_status_map((200, 200, 200, 200, 200, 202, 202, 500), 202) test_status_map((200, 200, 200, 200, 200, 202, 500, 500), 503) @@ -3212,8 +3130,8 @@ class TestObjectController(unittest.TestCase): req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'DELETE'}) self.app.update_request(req) res = req.get_response(self.app) - self.assertEquals(res.status[:len(str(expected))], - str(expected)) + self.assertEqual(res.status[:len(str(expected))], + str(expected)) test_status_map((200, 200, 204, 204, 204), 204) test_status_map((200, 200, 204, 204, 500), 204) test_status_map((200, 200, 204, 404, 404), 404) @@ -3229,13 +3147,13 @@ class TestObjectController(unittest.TestCase): req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'HEAD'}) self.app.update_request(req) res = req.get_response(self.app) - self.assertEquals(res.status[:len(str(expected))], - str(expected)) + self.assertEqual(res.status[:len(str(expected))], + str(expected)) if expected < 400: - self.assert_('x-works' in res.headers) - self.assertEquals(res.headers['x-works'], 'yes') - self.assert_('accept-ranges' in res.headers) - self.assertEquals(res.headers['accept-ranges'], 'bytes') + self.assertTrue('x-works' in res.headers) + self.assertEqual(res.headers['x-works'], 'yes') + self.assertTrue('accept-ranges' in res.headers) + self.assertEqual(res.headers['accept-ranges'], 'bytes') test_status_map((200, 200, 200, 404, 404), 200) test_status_map((200, 200, 200, 500, 404), 200) @@ -3254,10 +3172,10 @@ class TestObjectController(unittest.TestCase): headers={'x-newest': 'true'}) self.app.update_request(req) res = req.get_response(self.app) - self.assertEquals(res.status[:len(str(expected))], - str(expected)) - self.assertEquals(res.headers.get('last-modified'), - expected_timestamp) + self.assertEqual(res.status[:len(str(expected))], + str(expected)) + self.assertEqual(res.headers.get('last-modified'), + expected_timestamp) # acct cont obj obj obj test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1', @@ -3285,10 +3203,10 @@ class TestObjectController(unittest.TestCase): headers={'x-newest': 'true'}) self.app.update_request(req) res = req.get_response(self.app) - self.assertEquals(res.status[:len(str(expected))], - str(expected)) - self.assertEquals(res.headers.get('last-modified'), - expected_timestamp) + self.assertEqual(res.status[:len(str(expected))], + str(expected)) + self.assertEqual(res.headers.get('last-modified'), + expected_timestamp) test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1', '2', '3'), '3') @@ -3311,10 +3229,10 @@ class TestObjectController(unittest.TestCase): req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'HEAD'}) self.app.update_request(req) res = req.get_response(self.app) - self.assertEquals(res.status[:len(str(expected))], - str(expected)) - self.assertEquals(res.headers.get('last-modified'), - expected_timestamp) + self.assertEqual(res.status[:len(str(expected))], + str(expected)) + self.assertEqual(res.headers.get('last-modified'), + expected_timestamp) test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1', '2', '3'), '1') @@ -3340,7 +3258,7 @@ class TestObjectController(unittest.TestCase): 'X-Object-Meta-Foo': 'x' * limit}) self.app.update_request(req) res = req.get_response(self.app) - self.assertEquals(res.status_int, 202) + self.assertEqual(res.status_int, 202) set_http_connect(202, 202, 202) req = Request.blank( '/v1/a/c/o', {'REQUEST_METHOD': 'POST'}, @@ -3348,7 +3266,7 @@ class TestObjectController(unittest.TestCase): 'X-Object-Meta-Foo': 'x' * (limit + 1)}) self.app.update_request(req) res = req.get_response(self.app) - self.assertEquals(res.status_int, 400) + self.assertEqual(res.status_int, 400) def test_POST_as_copy_meta_val_len(self): with save_globals(): @@ -3360,7 +3278,7 @@ class TestObjectController(unittest.TestCase): 'X-Object-Meta-Foo': 'x' * limit}) self.app.update_request(req) res = req.get_response(self.app) - self.assertEquals(res.status_int, 202) + self.assertEqual(res.status_int, 202) set_http_connect(202, 202, 202) req = Request.blank( '/v1/a/c/o', {'REQUEST_METHOD': 'POST'}, @@ -3368,7 +3286,7 @@ class TestObjectController(unittest.TestCase): 'X-Object-Meta-Foo': 'x' * (limit + 1)}) self.app.update_request(req) res = req.get_response(self.app) - self.assertEquals(res.status_int, 400) + self.assertEqual(res.status_int, 400) def test_POST_meta_key_len(self): with save_globals(): @@ -3382,7 +3300,7 @@ class TestObjectController(unittest.TestCase): ('X-Object-Meta-' + 'x' * limit): 'x'}) self.app.update_request(req) res = req.get_response(self.app) - self.assertEquals(res.status_int, 202) + self.assertEqual(res.status_int, 202) set_http_connect(202, 202, 202) req = Request.blank( '/v1/a/c/o', {'REQUEST_METHOD': 'POST'}, @@ -3390,7 +3308,7 @@ class TestObjectController(unittest.TestCase): ('X-Object-Meta-' + 'x' * (limit + 1)): 'x'}) self.app.update_request(req) res = req.get_response(self.app) - self.assertEquals(res.status_int, 400) + self.assertEqual(res.status_int, 400) def test_POST_as_copy_meta_key_len(self): with save_globals(): @@ -3403,7 +3321,7 @@ class TestObjectController(unittest.TestCase): ('X-Object-Meta-' + 'x' * limit): 'x'}) self.app.update_request(req) res = req.get_response(self.app) - self.assertEquals(res.status_int, 202) + self.assertEqual(res.status_int, 202) set_http_connect(202, 202, 202) req = Request.blank( '/v1/a/c/o', {'REQUEST_METHOD': 'POST'}, @@ -3411,7 +3329,7 @@ class TestObjectController(unittest.TestCase): ('X-Object-Meta-' + 'x' * (limit + 1)): 'x'}) self.app.update_request(req) res = req.get_response(self.app) - self.assertEquals(res.status_int, 400) + self.assertEqual(res.status_int, 400) def test_POST_meta_count(self): with save_globals(): @@ -3424,7 +3342,7 @@ class TestObjectController(unittest.TestCase): headers=headers) self.app.update_request(req) res = req.get_response(self.app) - self.assertEquals(res.status_int, 400) + self.assertEqual(res.status_int, 400) def test_POST_meta_size(self): with save_globals(): @@ -3439,7 +3357,7 @@ class TestObjectController(unittest.TestCase): headers=headers) self.app.update_request(req) res = req.get_response(self.app) - self.assertEquals(res.status_int, 400) + self.assertEqual(res.status_int, 400) def test_PUT_not_autodetect_content_type(self): with save_globals(): @@ -3521,7 +3439,7 @@ class TestObjectController(unittest.TestCase): set_http_connect(200, 200, 201, 201, 201) # acct cont obj obj obj resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) self.app.client_timeout = 0.05 req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT', @@ -3532,7 +3450,7 @@ class TestObjectController(unittest.TestCase): set_http_connect(201, 201, 201) # obj obj obj resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 408) + self.assertEqual(resp.status_int, 408) def test_client_disconnect(self): with save_globals(): @@ -3567,7 +3485,7 @@ class TestObjectController(unittest.TestCase): set_http_connect(200, 200, 201, 201, 201) # acct cont obj obj obj resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 499) + self.assertEqual(resp.status_int, 499) def test_node_read_timeout(self): with save_globals(): @@ -3594,7 +3512,7 @@ class TestObjectController(unittest.TestCase): resp.body except ChunkReadTimeout: got_exc = True - self.assert_(not got_exc) + self.assertTrue(not got_exc) self.app.recoverable_node_timeout = 0.1 set_http_connect(200, 200, 200, slow=1.0) resp = req.get_response(self.app) @@ -3603,7 +3521,7 @@ class TestObjectController(unittest.TestCase): resp.body except ChunkReadTimeout: got_exc = True - self.assert_(got_exc) + self.assertTrue(got_exc) def test_node_read_timeout_retry(self): with save_globals(): @@ -3628,40 +3546,40 @@ class TestObjectController(unittest.TestCase): resp = req.get_response(self.app) got_exc = False try: - self.assertEquals('', resp.body) + self.assertEqual('', resp.body) except ChunkReadTimeout: got_exc = True - self.assert_(got_exc) + self.assertTrue(got_exc) set_http_connect(200, 200, 200, body='lalala', slow=[1.0, 1.0]) resp = req.get_response(self.app) got_exc = False try: - self.assertEquals(resp.body, 'lalala') + self.assertEqual(resp.body, 'lalala') except ChunkReadTimeout: got_exc = True - self.assert_(not got_exc) + self.assertTrue(not got_exc) set_http_connect(200, 200, 200, body='lalala', slow=[1.0, 1.0], etags=['a', 'a', 'a']) resp = req.get_response(self.app) got_exc = False try: - self.assertEquals(resp.body, 'lalala') + self.assertEqual(resp.body, 'lalala') except ChunkReadTimeout: got_exc = True - self.assert_(not got_exc) + self.assertTrue(not got_exc) set_http_connect(200, 200, 200, body='lalala', slow=[1.0, 1.0], etags=['a', 'b', 'a']) resp = req.get_response(self.app) got_exc = False try: - self.assertEquals(resp.body, 'lalala') + self.assertEqual(resp.body, 'lalala') except ChunkReadTimeout: got_exc = True - self.assert_(not got_exc) + self.assertTrue(not got_exc) req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'}) set_http_connect(200, 200, 200, body='lalala', @@ -3672,7 +3590,7 @@ class TestObjectController(unittest.TestCase): resp.body except ChunkReadTimeout: got_exc = True - self.assert_(got_exc) + self.assertTrue(got_exc) def test_node_write_timeout(self): with save_globals(): @@ -3697,7 +3615,7 @@ class TestObjectController(unittest.TestCase): self.app.update_request(req) set_http_connect(200, 200, 201, 201, 201, slow=0.1) resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) self.app.node_timeout = 0.1 req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, @@ -3707,14 +3625,14 @@ class TestObjectController(unittest.TestCase): self.app.update_request(req) set_http_connect(201, 201, 201, slow=1.0) resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 503) + self.assertEqual(resp.status_int, 503) def test_node_request_setting(self): baseapp = proxy_server.Application({'request_node_count': '3'}, FakeMemcache(), container_ring=FakeRing(), account_ring=FakeRing()) - self.assertEquals(baseapp.request_node_count(3), 3) + self.assertEqual(baseapp.request_node_count(3), 3) def test_iter_nodes(self): with save_globals(): @@ -3728,7 +3646,7 @@ class TestObjectController(unittest.TestCase): for node in self.app.iter_nodes(object_ring, partition): collected_nodes.append(node) - self.assertEquals(len(collected_nodes), 5) + self.assertEqual(len(collected_nodes), 5) object_ring.max_more_nodes = 20 self.app.request_node_count = lambda r: 20 @@ -3739,7 +3657,7 @@ class TestObjectController(unittest.TestCase): for node in self.app.iter_nodes(object_ring, partition): collected_nodes.append(node) - self.assertEquals(len(collected_nodes), 9) + self.assertEqual(len(collected_nodes), 9) # zero error-limited primary nodes -> no handoff warnings self.app.log_handoffs = True @@ -3752,9 +3670,9 @@ class TestObjectController(unittest.TestCase): collected_nodes = [] for node in self.app.iter_nodes(object_ring, partition): collected_nodes.append(node) - self.assertEquals(len(collected_nodes), 7) - self.assertEquals(self.app.logger.log_dict['warning'], []) - self.assertEquals(self.app.logger.get_increments(), []) + self.assertEqual(len(collected_nodes), 7) + self.assertEqual(self.app.logger.log_dict['warning'], []) + self.assertEqual(self.app.logger.get_increments(), []) # one error-limited primary node -> one handoff warning self.app.log_handoffs = True @@ -3767,11 +3685,11 @@ class TestObjectController(unittest.TestCase): collected_nodes = [] for node in self.app.iter_nodes(object_ring, partition): collected_nodes.append(node) - self.assertEquals(len(collected_nodes), 7) - self.assertEquals(self.app.logger.log_dict['warning'], [ + self.assertEqual(len(collected_nodes), 7) + self.assertEqual(self.app.logger.log_dict['warning'], [ (('Handoff requested (5)',), {})]) - self.assertEquals(self.app.logger.get_increments(), - ['handoff_count']) + self.assertEqual(self.app.logger.get_increments(), + ['handoff_count']) # two error-limited primary nodes -> two handoff warnings self.app.log_handoffs = True @@ -3785,13 +3703,13 @@ class TestObjectController(unittest.TestCase): collected_nodes = [] for node in self.app.iter_nodes(object_ring, partition): collected_nodes.append(node) - self.assertEquals(len(collected_nodes), 7) - self.assertEquals(self.app.logger.log_dict['warning'], [ + self.assertEqual(len(collected_nodes), 7) + self.assertEqual(self.app.logger.log_dict['warning'], [ (('Handoff requested (5)',), {}), (('Handoff requested (6)',), {})]) - self.assertEquals(self.app.logger.get_increments(), - ['handoff_count', - 'handoff_count']) + self.assertEqual(self.app.logger.get_increments(), + ['handoff_count', + 'handoff_count']) # all error-limited primary nodes -> four handoff warnings, # plus a handoff-all metric @@ -3807,18 +3725,18 @@ class TestObjectController(unittest.TestCase): collected_nodes = [] for node in self.app.iter_nodes(object_ring, partition): collected_nodes.append(node) - self.assertEquals(len(collected_nodes), 10) - self.assertEquals(self.app.logger.log_dict['warning'], [ + self.assertEqual(len(collected_nodes), 10) + self.assertEqual(self.app.logger.log_dict['warning'], [ (('Handoff requested (7)',), {}), (('Handoff requested (8)',), {}), (('Handoff requested (9)',), {}), (('Handoff requested (10)',), {})]) - self.assertEquals(self.app.logger.get_increments(), - ['handoff_count', - 'handoff_count', - 'handoff_count', - 'handoff_count', - 'handoff_all_count']) + self.assertEqual(self.app.logger.get_increments(), + ['handoff_count', + 'handoff_count', + 'handoff_count', + 'handoff_count', + 'handoff_all_count']) finally: object_ring.max_more_nodes = 0 @@ -3855,8 +3773,8 @@ class TestObjectController(unittest.TestCase): if not second_nodes: self.app.error_limit(node, 'test') second_nodes.append(node) - self.assertEquals(len(first_nodes), 6) - self.assertEquals(len(second_nodes), 7) + self.assertEqual(len(first_nodes), 6) + self.assertEqual(len(second_nodes), 7) def test_iter_nodes_with_custom_node_iter(self): object_ring = self.app.get_object_ring(None) @@ -3886,7 +3804,7 @@ class TestObjectController(unittest.TestCase): 'Object', headers=[{'X-Test': '1'}, {'X-Test': '2'}, {'X-Test': '3'}]) - self.assertEquals(resp.headers['X-Test'], '1') + self.assertEqual(resp.headers['X-Test'], '1') def test_best_response_sets_etag(self): controller = ReplicatedObjectController( @@ -3894,12 +3812,12 @@ class TestObjectController(unittest.TestCase): req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'}) resp = controller.best_response(req, [200] * 3, ['OK'] * 3, [''] * 3, 'Object') - self.assertEquals(resp.etag, None) + self.assertEqual(resp.etag, None) resp = controller.best_response(req, [200] * 3, ['OK'] * 3, [''] * 3, 'Object', etag='68b329da9893e34099c7d8ad5cb9c940' ) - self.assertEquals(resp.etag, '68b329da9893e34099c7d8ad5cb9c940') + self.assertEqual(resp.etag, '68b329da9893e34099c7d8ad5cb9c940') def test_proxy_passes_content_type(self): with save_globals(): @@ -3907,16 +3825,16 @@ class TestObjectController(unittest.TestCase): self.app.update_request(req) set_http_connect(200, 200, 200) resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 200) - self.assertEquals(resp.content_type, 'x-application/test') + self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.content_type, 'x-application/test') set_http_connect(200, 200, 200) resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 200) - self.assertEquals(resp.content_length, 0) + self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.content_length, 0) set_http_connect(200, 200, 200, slow=True) resp = req.get_response(self.app) - self.assertEquals(resp.status_int, 200) - self.assertEquals(resp.content_length, 4) + self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.content_length, 4) def test_proxy_passes_content_length_on_head(self): with save_globals(): @@ -3927,12 +3845,12 @@ class TestObjectController(unittest.TestCase): self.app, 'account', 'container', 'object') set_http_connect(200, 200, 200) resp = controller.HEAD(req) - self.assertEquals(resp.status_int, 200) - self.assertEquals(resp.content_length, 0) + self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.content_length, 0) set_http_connect(200, 200, 200, slow=True) resp = controller.HEAD(req) - self.assertEquals(resp.status_int, 200) - self.assertEquals(resp.content_length, 4) + self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.content_length, 4) def test_error_limiting(self): with save_globals(): @@ -3942,20 +3860,22 @@ class TestObjectController(unittest.TestCase): object_ring = controller.app.get_object_ring(None) self.assert_status_map(controller.HEAD, (200, 200, 503, 200, 200), 200) - self.assertEquals( + self.assertEqual( node_error_count(controller.app, object_ring.devs[0]), 2) - self.assert_(node_last_error(controller.app, object_ring.devs[0]) - is not None) + self.assertTrue( + node_last_error(controller.app, object_ring.devs[0]) + is not None) for _junk in range(self.app.error_suppression_limit): self.assert_status_map(controller.HEAD, (200, 200, 503, 503, 503), 503) - self.assertEquals( + self.assertEqual( node_error_count(controller.app, object_ring.devs[0]), self.app.error_suppression_limit + 1) self.assert_status_map(controller.HEAD, (200, 200, 200, 200, 200), 503) - self.assert_(node_last_error(controller.app, object_ring.devs[0]) - is not None) + self.assertTrue( + node_last_error(controller.app, object_ring.devs[0]) + is not None) self.assert_status_map(controller.PUT, (200, 200, 200, 201, 201, 201), 503) self.assert_status_map(controller.POST, @@ -3979,14 +3899,15 @@ class TestObjectController(unittest.TestCase): object_ring = controller.app.get_object_ring(None) self.assert_status_map(controller.HEAD, (200, 200, 503, 200, 200), 200) - self.assertEquals( + self.assertEqual( node_error_count(controller.app, object_ring.devs[0]), 2) - self.assert_(node_last_error(controller.app, object_ring.devs[0]) - is not None) + self.assertTrue( + node_last_error(controller.app, object_ring.devs[0]) + is not None) for _junk in range(self.app.error_suppression_limit): self.assert_status_map(controller.HEAD, (200, 200, 503, 503, 503), 503) - self.assertEquals( + self.assertEqual( node_error_count(controller.app, object_ring.devs[0]), self.app.error_suppression_limit + 1) @@ -4011,12 +3932,13 @@ class TestObjectController(unittest.TestCase): # 2, not 1, because assert_status_map() calls the method twice odevs = object_ring.devs - self.assertEquals(node_error_count(controller.app, odevs[0]), 2) - self.assertEquals(node_error_count(controller.app, odevs[1]), 0) - self.assertEquals(node_error_count(controller.app, odevs[2]), 0) - self.assert_(node_last_error(controller.app, odevs[0]) is not None) - self.assert_(node_last_error(controller.app, odevs[1]) is None) - self.assert_(node_last_error(controller.app, odevs[2]) is None) + self.assertEqual(node_error_count(controller.app, odevs[0]), 2) + self.assertEqual(node_error_count(controller.app, odevs[1]), 0) + self.assertEqual(node_error_count(controller.app, odevs[2]), 0) + self.assertTrue( + node_last_error(controller.app, odevs[0]) is not None) + self.assertTrue(node_last_error(controller.app, odevs[1]) is None) + self.assertTrue(node_last_error(controller.app, odevs[2]) is None) def test_PUT_error_limiting_last_node(self): with save_globals(): @@ -4030,12 +3952,13 @@ class TestObjectController(unittest.TestCase): # 2, not 1, because assert_status_map() calls the method twice odevs = object_ring.devs - self.assertEquals(node_error_count(controller.app, odevs[0]), 0) - self.assertEquals(node_error_count(controller.app, odevs[1]), 0) - self.assertEquals(node_error_count(controller.app, odevs[2]), 2) - self.assert_(node_last_error(controller.app, odevs[0]) is None) - self.assert_(node_last_error(controller.app, odevs[1]) is None) - self.assert_(node_last_error(controller.app, odevs[2]) is not None) + self.assertEqual(node_error_count(controller.app, odevs[0]), 0) + self.assertEqual(node_error_count(controller.app, odevs[1]), 0) + self.assertEqual(node_error_count(controller.app, odevs[2]), 2) + self.assertTrue(node_last_error(controller.app, odevs[0]) is None) + self.assertTrue(node_last_error(controller.app, odevs[1]) is None) + self.assertTrue( + node_last_error(controller.app, odevs[2]) is not None) def test_acc_or_con_missing_returns_404(self): with save_globals(): @@ -4048,7 +3971,7 @@ class TestObjectController(unittest.TestCase): environ={'REQUEST_METHOD': 'DELETE'}) self.app.update_request(req) resp = getattr(controller, 'DELETE')(req) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) set_http_connect(404, 404, 404) # acct acct acct @@ -4056,7 +3979,7 @@ class TestObjectController(unittest.TestCase): req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}) resp = getattr(controller, 'DELETE')(req) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) set_http_connect(503, 404, 404) # acct acct acct @@ -4064,7 +3987,7 @@ class TestObjectController(unittest.TestCase): req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}) resp = getattr(controller, 'DELETE')(req) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) set_http_connect(503, 503, 404) # acct acct acct @@ -4072,7 +3995,7 @@ class TestObjectController(unittest.TestCase): req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}) resp = getattr(controller, 'DELETE')(req) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) set_http_connect(503, 503, 503) # acct acct acct @@ -4080,7 +4003,7 @@ class TestObjectController(unittest.TestCase): req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}) resp = getattr(controller, 'DELETE')(req) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) set_http_connect(200, 200, 204, 204, 204) # acct cont obj obj obj @@ -4088,7 +4011,7 @@ class TestObjectController(unittest.TestCase): req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}) resp = getattr(controller, 'DELETE')(req) - self.assertEquals(resp.status_int, 204) + self.assertEqual(resp.status_int, 204) set_http_connect(200, 404, 404, 404) # acct cont cont cont @@ -4096,7 +4019,7 @@ class TestObjectController(unittest.TestCase): req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}) resp = getattr(controller, 'DELETE')(req) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) set_http_connect(200, 503, 503, 503) # acct cont cont cont @@ -4104,7 +4027,7 @@ class TestObjectController(unittest.TestCase): req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}) resp = getattr(controller, 'DELETE')(req) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) for dev in self.app.account_ring.devs: set_node_errors( @@ -4117,7 +4040,7 @@ class TestObjectController(unittest.TestCase): req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}) resp = getattr(controller, 'DELETE')(req) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) for dev in self.app.account_ring.devs: set_node_errors(self.app, dev, 0, last_error=None) @@ -4132,7 +4055,7 @@ class TestObjectController(unittest.TestCase): req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}) resp = getattr(controller, 'DELETE')(req) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) def test_PUT_POST_requires_container_exist(self): with save_globals(): @@ -4146,7 +4069,7 @@ class TestObjectController(unittest.TestCase): environ={'REQUEST_METHOD': 'PUT'}) self.app.update_request(req) resp = controller.PUT(req) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) set_http_connect(200, 404, 404, 404, 200, 200) req = Request.blank('/v1/a/c/o', @@ -4154,7 +4077,7 @@ class TestObjectController(unittest.TestCase): headers={'Content-Type': 'text/plain'}) self.app.update_request(req) resp = controller.POST(req) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) def test_PUT_POST_as_copy_requires_container_exist(self): with save_globals(): @@ -4165,7 +4088,7 @@ class TestObjectController(unittest.TestCase): req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}) self.app.update_request(req) resp = controller.PUT(req) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) set_http_connect(200, 404, 404, 404, 200, 200, 200, 200, 200, 200) req = Request.blank('/v1/a/c/o', @@ -4173,7 +4096,7 @@ class TestObjectController(unittest.TestCase): headers={'Content-Type': 'text/plain'}) self.app.update_request(req) resp = controller.POST(req) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) def test_bad_metadata(self): with save_globals(): @@ -4185,7 +4108,7 @@ class TestObjectController(unittest.TestCase): headers={'Content-Length': '0'}) self.app.update_request(req) resp = controller.PUT(req) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) set_http_connect(201, 201, 201) req = Request.blank( @@ -4195,7 +4118,7 @@ class TestObjectController(unittest.TestCase): 'a' * constraints.MAX_META_NAME_LENGTH): 'v'}) self.app.update_request(req) resp = controller.PUT(req) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) set_http_connect(201, 201, 201) req = Request.blank( '/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, @@ -4205,7 +4128,7 @@ class TestObjectController(unittest.TestCase): 'a' * (constraints.MAX_META_NAME_LENGTH + 1)): 'v'}) self.app.update_request(req) resp = controller.PUT(req) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) set_http_connect(201, 201, 201) req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, @@ -4214,7 +4137,7 @@ class TestObjectController(unittest.TestCase): constraints.MAX_META_VALUE_LENGTH}) self.app.update_request(req) resp = controller.PUT(req) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) set_http_connect(201, 201, 201) req = Request.blank( '/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, @@ -4223,7 +4146,7 @@ class TestObjectController(unittest.TestCase): (constraints.MAX_META_VALUE_LENGTH + 1)}) self.app.update_request(req) resp = controller.PUT(req) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) set_http_connect(201, 201, 201) headers = {'Content-Length': '0'} @@ -4233,7 +4156,7 @@ class TestObjectController(unittest.TestCase): headers=headers) self.app.update_request(req) resp = controller.PUT(req) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) set_http_connect(201, 201, 201) headers = {'Content-Length': '0'} for x in range(constraints.MAX_META_COUNT + 1): @@ -4242,7 +4165,7 @@ class TestObjectController(unittest.TestCase): headers=headers) self.app.update_request(req) resp = controller.PUT(req) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) set_http_connect(201, 201, 201) headers = {'Content-Length': '0'} @@ -4261,7 +4184,7 @@ class TestObjectController(unittest.TestCase): headers=headers) self.app.update_request(req) resp = controller.PUT(req) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) set_http_connect(201, 201, 201) headers['X-Object-Meta-a'] = \ 'a' * (constraints.MAX_META_OVERALL_SIZE - size) @@ -4269,7 +4192,7 @@ class TestObjectController(unittest.TestCase): headers=headers) self.app.update_request(req) resp = controller.PUT(req) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) @contextmanager def controller_context(self, req, *args, **kwargs): @@ -4299,8 +4222,8 @@ class TestObjectController(unittest.TestCase): # acct cont objc objc objc obj obj obj with self.controller_context(req, *status_list) as controller: resp = controller.PUT(req) - self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers['x-copied-from'], 'c/o') + self.assertEqual(resp.status_int, 201) + self.assertEqual(resp.headers['x-copied-from'], 'c/o') def test_basic_put_with_x_copy_from_account(self): req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'}, @@ -4311,9 +4234,9 @@ class TestObjectController(unittest.TestCase): # acct cont acc1 con1 objc objc objc obj obj obj with self.controller_context(req, *status_list) as controller: resp = controller.PUT(req) - self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers['x-copied-from'], 'c/o') - self.assertEquals(resp.headers['x-copied-from-account'], 'a') + self.assertEqual(resp.status_int, 201) + self.assertEqual(resp.headers['x-copied-from'], 'c/o') + self.assertEqual(resp.headers['x-copied-from-account'], 'a') def test_basic_put_with_x_copy_from_across_container(self): req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, @@ -4323,8 +4246,8 @@ class TestObjectController(unittest.TestCase): # acct cont conc objc objc objc obj obj obj with self.controller_context(req, *status_list) as controller: resp = controller.PUT(req) - self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers['x-copied-from'], 'c2/o') + self.assertEqual(resp.status_int, 201) + self.assertEqual(resp.headers['x-copied-from'], 'c2/o') def test_basic_put_with_x_copy_from_across_container_and_account(self): req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'}, @@ -4335,9 +4258,9 @@ class TestObjectController(unittest.TestCase): # acct cont acc1 con1 objc objc objc obj obj obj with self.controller_context(req, *status_list) as controller: resp = controller.PUT(req) - self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers['x-copied-from'], 'c2/o') - self.assertEquals(resp.headers['x-copied-from-account'], 'a') + self.assertEqual(resp.status_int, 201) + self.assertEqual(resp.headers['x-copied-from'], 'c2/o') + self.assertEqual(resp.headers['x-copied-from-account'], 'a') def test_copy_non_zero_content_length(self): req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, @@ -4347,7 +4270,7 @@ class TestObjectController(unittest.TestCase): # acct cont with self.controller_context(req, *status_list) as controller: resp = controller.PUT(req) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) def test_copy_non_zero_content_length_with_account(self): req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'}, @@ -4358,7 +4281,7 @@ class TestObjectController(unittest.TestCase): # acct cont with self.controller_context(req, *status_list) as controller: resp = controller.PUT(req) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) def test_copy_with_slashes_in_x_copy_from(self): # extra source path parsing @@ -4369,8 +4292,8 @@ class TestObjectController(unittest.TestCase): # acct cont objc objc objc obj obj obj with self.controller_context(req, *status_list) as controller: resp = controller.PUT(req) - self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers['x-copied-from'], 'c/o/o2') + self.assertEqual(resp.status_int, 201) + self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2') def test_copy_with_slashes_in_x_copy_from_and_account(self): # extra source path parsing @@ -4382,9 +4305,9 @@ class TestObjectController(unittest.TestCase): # acct cont acc1 con1 objc objc objc obj obj obj with self.controller_context(req, *status_list) as controller: resp = controller.PUT(req) - self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers['x-copied-from'], 'c/o/o2') - self.assertEquals(resp.headers['x-copied-from-account'], 'a') + self.assertEqual(resp.status_int, 201) + self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2') + self.assertEqual(resp.headers['x-copied-from-account'], 'a') def test_copy_with_spaces_in_x_copy_from(self): # space in soure path @@ -4395,8 +4318,8 @@ class TestObjectController(unittest.TestCase): # acct cont objc objc objc obj obj obj with self.controller_context(req, *status_list) as controller: resp = controller.PUT(req) - self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers['x-copied-from'], 'c/o%20o2') + self.assertEqual(resp.status_int, 201) + self.assertEqual(resp.headers['x-copied-from'], 'c/o%20o2') def test_copy_with_spaces_in_x_copy_from_and_account(self): # space in soure path @@ -4408,9 +4331,9 @@ class TestObjectController(unittest.TestCase): # acct cont acc1 con1 objc objc objc obj obj obj with self.controller_context(req, *status_list) as controller: resp = controller.PUT(req) - self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers['x-copied-from'], 'c/o%20o2') - self.assertEquals(resp.headers['x-copied-from-account'], 'a') + self.assertEqual(resp.status_int, 201) + self.assertEqual(resp.headers['x-copied-from'], 'c/o%20o2') + self.assertEqual(resp.headers['x-copied-from-account'], 'a') def test_copy_with_leading_slash_in_x_copy_from(self): # repeat tests with leading / @@ -4421,8 +4344,8 @@ class TestObjectController(unittest.TestCase): # acct cont objc objc objc obj obj obj with self.controller_context(req, *status_list) as controller: resp = controller.PUT(req) - self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers['x-copied-from'], 'c/o') + self.assertEqual(resp.status_int, 201) + self.assertEqual(resp.headers['x-copied-from'], 'c/o') def test_copy_with_leading_slash_in_x_copy_from_and_account(self): # repeat tests with leading / @@ -4434,9 +4357,9 @@ class TestObjectController(unittest.TestCase): # acct cont acc1 con1 objc objc objc obj obj obj with self.controller_context(req, *status_list) as controller: resp = controller.PUT(req) - self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers['x-copied-from'], 'c/o') - self.assertEquals(resp.headers['x-copied-from-account'], 'a') + self.assertEqual(resp.status_int, 201) + self.assertEqual(resp.headers['x-copied-from'], 'c/o') + self.assertEqual(resp.headers['x-copied-from-account'], 'a') def test_copy_with_leading_slash_and_slashes_in_x_copy_from(self): req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, @@ -4446,8 +4369,8 @@ class TestObjectController(unittest.TestCase): # acct cont objc objc objc obj obj obj with self.controller_context(req, *status_list) as controller: resp = controller.PUT(req) - self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers['x-copied-from'], 'c/o/o2') + self.assertEqual(resp.status_int, 201) + self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2') def test_copy_with_leading_slash_and_slashes_in_x_copy_from_acct(self): req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'}, @@ -4458,9 +4381,9 @@ class TestObjectController(unittest.TestCase): # acct cont acc1 con1 objc objc objc obj obj obj with self.controller_context(req, *status_list) as controller: resp = controller.PUT(req) - self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers['x-copied-from'], 'c/o/o2') - self.assertEquals(resp.headers['x-copied-from-account'], 'a') + self.assertEqual(resp.status_int, 201) + self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2') + self.assertEqual(resp.headers['x-copied-from-account'], 'a') def test_copy_with_no_object_in_x_copy_from(self): req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, @@ -4472,7 +4395,7 @@ class TestObjectController(unittest.TestCase): try: controller.PUT(req) except HTTPException as resp: - self.assertEquals(resp.status_int // 100, 4) # client error + self.assertEqual(resp.status_int // 100, 4) # client error else: raise self.fail('Invalid X-Copy-From did not raise ' 'client error') @@ -4488,7 +4411,7 @@ class TestObjectController(unittest.TestCase): try: controller.PUT(req) except HTTPException as resp: - self.assertEquals(resp.status_int // 100, 4) # client error + self.assertEqual(resp.status_int // 100, 4) # client error else: raise self.fail('Invalid X-Copy-From did not raise ' 'client error') @@ -4501,7 +4424,7 @@ class TestObjectController(unittest.TestCase): # acct cont objc objc objc with self.controller_context(req, *status_list) as controller: resp = controller.PUT(req) - self.assertEquals(resp.status_int, 503) + self.assertEqual(resp.status_int, 503) def test_copy_server_error_reading_source_and_account(self): req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'}, @@ -4512,7 +4435,7 @@ class TestObjectController(unittest.TestCase): # acct cont acct cont objc objc objc with self.controller_context(req, *status_list) as controller: resp = controller.PUT(req) - self.assertEquals(resp.status_int, 503) + self.assertEqual(resp.status_int, 503) def test_copy_not_found_reading_source(self): req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, @@ -4523,7 +4446,7 @@ class TestObjectController(unittest.TestCase): # acct cont objc objc objc with self.controller_context(req, *status_list) as controller: resp = controller.PUT(req) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) def test_copy_not_found_reading_source_and_account(self): req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'}, @@ -4535,7 +4458,7 @@ class TestObjectController(unittest.TestCase): # acct cont acct cont objc objc objc with self.controller_context(req, *status_list) as controller: resp = controller.PUT(req) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) def test_copy_with_some_missing_sources(self): req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, @@ -4545,7 +4468,7 @@ class TestObjectController(unittest.TestCase): # acct cont objc objc objc obj obj obj with self.controller_context(req, *status_list) as controller: resp = controller.PUT(req) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) def test_copy_with_some_missing_sources_and_account(self): req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'}, @@ -4556,7 +4479,7 @@ class TestObjectController(unittest.TestCase): # acct cont acct cont objc objc objc obj obj obj with self.controller_context(req, *status_list) as controller: resp = controller.PUT(req) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) def test_copy_with_object_metadata(self): req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, @@ -4568,10 +4491,10 @@ class TestObjectController(unittest.TestCase): # acct cont objc objc objc obj obj obj with self.controller_context(req, *status_list) as controller: resp = controller.PUT(req) - self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers.get('x-object-meta-test'), 'testing') - self.assertEquals(resp.headers.get('x-object-meta-ours'), 'okay') - self.assertEquals(resp.headers.get('x-delete-at'), '9876543210') + self.assertEqual(resp.status_int, 201) + self.assertEqual(resp.headers.get('x-object-meta-test'), 'testing') + self.assertEqual(resp.headers.get('x-object-meta-ours'), 'okay') + self.assertEqual(resp.headers.get('x-delete-at'), '9876543210') def test_copy_with_object_metadata_and_account(self): req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'}, @@ -4584,10 +4507,10 @@ class TestObjectController(unittest.TestCase): # acct cont acct cont objc objc objc obj obj obj with self.controller_context(req, *status_list) as controller: resp = controller.PUT(req) - self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers.get('x-object-meta-test'), 'testing') - self.assertEquals(resp.headers.get('x-object-meta-ours'), 'okay') - self.assertEquals(resp.headers.get('x-delete-at'), '9876543210') + self.assertEqual(resp.status_int, 201) + self.assertEqual(resp.headers.get('x-object-meta-test'), 'testing') + self.assertEqual(resp.headers.get('x-object-meta-ours'), 'okay') + self.assertEqual(resp.headers.get('x-delete-at'), '9876543210') @_limit_max_file_size def test_copy_source_larger_than_max_file_size(self): @@ -4617,7 +4540,7 @@ class TestObjectController(unittest.TestCase): resp = controller.PUT(req) except HTTPException as resp: pass - self.assertEquals(resp.status_int, 413) + self.assertEqual(resp.status_int, 413) def test_basic_COPY(self): req = Request.blank('/v1/a/c/o', @@ -4627,8 +4550,8 @@ class TestObjectController(unittest.TestCase): # acct cont objc objc objc obj obj obj with self.controller_context(req, *status_list) as controller: resp = controller.COPY(req) - self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers['x-copied-from'], 'c/o') + self.assertEqual(resp.status_int, 201) + self.assertEqual(resp.headers['x-copied-from'], 'c/o') def test_basic_COPY_account(self): req = Request.blank('/v1/a/c/o', @@ -4639,9 +4562,9 @@ class TestObjectController(unittest.TestCase): # acct cont acct cont objc objc objc obj obj obj with self.controller_context(req, *status_list) as controller: resp = controller.COPY(req) - self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers['x-copied-from'], 'c/o') - self.assertEquals(resp.headers['x-copied-from-account'], 'a') + self.assertEqual(resp.status_int, 201) + self.assertEqual(resp.headers['x-copied-from'], 'c/o') + self.assertEqual(resp.headers['x-copied-from-account'], 'a') def test_COPY_across_containers(self): req = Request.blank('/v1/a/c/o', @@ -4651,8 +4574,8 @@ class TestObjectController(unittest.TestCase): # acct cont c2 objc objc objc obj obj obj with self.controller_context(req, *status_list) as controller: resp = controller.COPY(req) - self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers['x-copied-from'], 'c/o') + self.assertEqual(resp.status_int, 201) + self.assertEqual(resp.headers['x-copied-from'], 'c/o') def test_COPY_source_with_slashes_in_name(self): req = Request.blank('/v1/a/c/o/o2', @@ -4662,8 +4585,8 @@ class TestObjectController(unittest.TestCase): # acct cont objc objc objc obj obj obj with self.controller_context(req, *status_list) as controller: resp = controller.COPY(req) - self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers['x-copied-from'], 'c/o/o2') + self.assertEqual(resp.status_int, 201) + self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2') def test_COPY_account_source_with_slashes_in_name(self): req = Request.blank('/v1/a/c/o/o2', @@ -4674,9 +4597,9 @@ class TestObjectController(unittest.TestCase): # acct cont acct cont objc objc objc obj obj obj with self.controller_context(req, *status_list) as controller: resp = controller.COPY(req) - self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers['x-copied-from'], 'c/o/o2') - self.assertEquals(resp.headers['x-copied-from-account'], 'a') + self.assertEqual(resp.status_int, 201) + self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2') + self.assertEqual(resp.headers['x-copied-from-account'], 'a') def test_COPY_destination_leading_slash(self): req = Request.blank('/v1/a/c/o', @@ -4686,8 +4609,8 @@ class TestObjectController(unittest.TestCase): # acct cont objc objc objc obj obj obj with self.controller_context(req, *status_list) as controller: resp = controller.COPY(req) - self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers['x-copied-from'], 'c/o') + self.assertEqual(resp.status_int, 201) + self.assertEqual(resp.headers['x-copied-from'], 'c/o') def test_COPY_account_destination_leading_slash(self): req = Request.blank('/v1/a/c/o', @@ -4698,9 +4621,9 @@ class TestObjectController(unittest.TestCase): # acct cont acct cont objc objc objc obj obj obj with self.controller_context(req, *status_list) as controller: resp = controller.COPY(req) - self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers['x-copied-from'], 'c/o') - self.assertEquals(resp.headers['x-copied-from-account'], 'a') + self.assertEqual(resp.status_int, 201) + self.assertEqual(resp.headers['x-copied-from'], 'c/o') + self.assertEqual(resp.headers['x-copied-from-account'], 'a') def test_COPY_source_with_slashes_destination_leading_slash(self): req = Request.blank('/v1/a/c/o/o2', @@ -4710,8 +4633,8 @@ class TestObjectController(unittest.TestCase): # acct cont objc objc objc obj obj obj with self.controller_context(req, *status_list) as controller: resp = controller.COPY(req) - self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers['x-copied-from'], 'c/o/o2') + self.assertEqual(resp.status_int, 201) + self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2') def test_COPY_account_source_with_slashes_destination_leading_slash(self): req = Request.blank('/v1/a/c/o/o2', @@ -4722,9 +4645,9 @@ class TestObjectController(unittest.TestCase): # acct cont acct cont objc objc objc obj obj obj with self.controller_context(req, *status_list) as controller: resp = controller.COPY(req) - self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers['x-copied-from'], 'c/o/o2') - self.assertEquals(resp.headers['x-copied-from-account'], 'a') + self.assertEqual(resp.status_int, 201) + self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2') + self.assertEqual(resp.headers['x-copied-from-account'], 'a') def test_COPY_no_object_in_destination(self): req = Request.blank('/v1/a/c/o', @@ -4751,7 +4674,7 @@ class TestObjectController(unittest.TestCase): # acct cont objc objc objc with self.controller_context(req, *status_list) as controller: resp = controller.COPY(req) - self.assertEquals(resp.status_int, 503) + self.assertEqual(resp.status_int, 503) def test_COPY_account_server_error_reading_source(self): req = Request.blank('/v1/a/c/o', @@ -4762,7 +4685,7 @@ class TestObjectController(unittest.TestCase): # acct cont acct cont objc objc objc with self.controller_context(req, *status_list) as controller: resp = controller.COPY(req) - self.assertEquals(resp.status_int, 503) + self.assertEqual(resp.status_int, 503) def test_COPY_not_found_reading_source(self): req = Request.blank('/v1/a/c/o', @@ -4772,7 +4695,7 @@ class TestObjectController(unittest.TestCase): # acct cont objc objc objc with self.controller_context(req, *status_list) as controller: resp = controller.COPY(req) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) def test_COPY_account_not_found_reading_source(self): req = Request.blank('/v1/a/c/o', @@ -4783,7 +4706,7 @@ class TestObjectController(unittest.TestCase): # acct cont acct cont objc objc objc with self.controller_context(req, *status_list) as controller: resp = controller.COPY(req) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) def test_COPY_with_some_missing_sources(self): req = Request.blank('/v1/a/c/o', @@ -4793,7 +4716,7 @@ class TestObjectController(unittest.TestCase): # acct cont objc objc objc obj obj obj with self.controller_context(req, *status_list) as controller: resp = controller.COPY(req) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) def test_COPY_account_with_some_missing_sources(self): req = Request.blank('/v1/a/c/o', @@ -4804,7 +4727,7 @@ class TestObjectController(unittest.TestCase): # acct cont acct cont objc objc objc obj obj obj with self.controller_context(req, *status_list) as controller: resp = controller.COPY(req) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) def test_COPY_with_metadata(self): req = Request.blank('/v1/a/c/o', @@ -4815,11 +4738,11 @@ class TestObjectController(unittest.TestCase): # acct cont objc objc objc obj obj obj with self.controller_context(req, *status_list) as controller: resp = controller.COPY(req) - self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers.get('x-object-meta-test'), - 'testing') - self.assertEquals(resp.headers.get('x-object-meta-ours'), 'okay') - self.assertEquals(resp.headers.get('x-delete-at'), '9876543210') + self.assertEqual(resp.status_int, 201) + self.assertEqual(resp.headers.get('x-object-meta-test'), + 'testing') + self.assertEqual(resp.headers.get('x-object-meta-ours'), 'okay') + self.assertEqual(resp.headers.get('x-delete-at'), '9876543210') def test_COPY_account_with_metadata(self): req = Request.blank('/v1/a/c/o', @@ -4831,11 +4754,11 @@ class TestObjectController(unittest.TestCase): # acct cont acct cont objc objc objc obj obj obj with self.controller_context(req, *status_list) as controller: resp = controller.COPY(req) - self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers.get('x-object-meta-test'), - 'testing') - self.assertEquals(resp.headers.get('x-object-meta-ours'), 'okay') - self.assertEquals(resp.headers.get('x-delete-at'), '9876543210') + self.assertEqual(resp.status_int, 201) + self.assertEqual(resp.headers.get('x-object-meta-test'), + 'testing') + self.assertEqual(resp.headers.get('x-object-meta-ours'), 'okay') + self.assertEqual(resp.headers.get('x-delete-at'), '9876543210') @_limit_max_file_size def test_COPY_source_larger_than_max_file_size(self): @@ -4861,7 +4784,7 @@ class TestObjectController(unittest.TestCase): resp = controller.COPY(req) except HTTPException as resp: pass - self.assertEquals(resp.status_int, 413) + self.assertEqual(resp.status_int, 413) @_limit_max_file_size def test_COPY_account_source_larger_than_max_file_size(self): @@ -4888,7 +4811,7 @@ class TestObjectController(unittest.TestCase): resp = controller.COPY(req) except HTTPException as resp: pass - self.assertEquals(resp.status_int, 413) + self.assertEqual(resp.status_int, 413) def test_COPY_newest(self): with save_globals(): @@ -4900,14 +4823,14 @@ class TestObjectController(unittest.TestCase): req.account = 'a' controller.object_name = 'o' set_http_connect(200, 200, 200, 200, 200, 201, 201, 201, - #act cont objc objc objc obj obj obj + # act cont objc objc objc obj obj obj timestamps=('1', '1', '1', '3', '2', '4', '4', '4')) self.app.memcache.store = {} resp = controller.COPY(req) - self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers['x-copied-from-last-modified'], - '3') + self.assertEqual(resp.status_int, 201) + self.assertEqual(resp.headers['x-copied-from-last-modified'], + '3') def test_COPY_account_newest(self): with save_globals(): @@ -4920,14 +4843,14 @@ class TestObjectController(unittest.TestCase): req.account = 'a' controller.object_name = 'o' set_http_connect(200, 200, 200, 200, 200, 200, 200, 201, 201, 201, - #act cont acct cont objc objc objc obj obj obj + # act cont acct cont objc objc objc obj obj obj timestamps=('1', '1', '1', '1', '3', '2', '1', '4', '4', '4')) self.app.memcache.store = {} resp = controller.COPY(req) - self.assertEquals(resp.status_int, 201) - self.assertEquals(resp.headers['x-copied-from-last-modified'], - '3') + self.assertEqual(resp.status_int, 201) + self.assertEqual(resp.headers['x-copied-from-last-modified'], + '3') def test_COPY_delete_at(self): with save_globals(): @@ -4952,8 +4875,8 @@ class TestObjectController(unittest.TestCase): for method, path, given_headers in backend_requests: if method != 'PUT': continue - self.assertEquals(given_headers.get('X-Delete-At'), - '9876543210') + self.assertEqual(given_headers.get('X-Delete-At'), + '9876543210') self.assertTrue('X-Delete-At-Host' in given_headers) self.assertTrue('X-Delete-At-Device' in given_headers) self.assertTrue('X-Delete-At-Partition' in given_headers) @@ -4983,8 +4906,8 @@ class TestObjectController(unittest.TestCase): for method, path, given_headers in backend_requests: if method != 'PUT': continue - self.assertEquals(given_headers.get('X-Delete-At'), - '9876543210') + self.assertEqual(given_headers.get('X-Delete-At'), + '9876543210') self.assertTrue('X-Delete-At-Host' in given_headers) self.assertTrue('X-Delete-At-Device' in given_headers) self.assertTrue('X-Delete-At-Partition' in given_headers) @@ -5024,21 +4947,21 @@ class TestObjectController(unittest.TestCase): self.app.memcache.store = {} self.app.update_request(req) res = controller.PUT(req) - self.assertEquals(res.status_int // 100, 2) # success + self.assertEqual(res.status_int // 100, 2) # success # test 413 entity to large set_http_connect(201, 201, 201, 201) req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'COPY'}, headers={'Transfer-Encoding': 'chunked', - 'Content-Type': 'foo/bar'}) + 'Content-Type': 'foo/bar'}) req.body_file = ChunkedFile(11) self.app.memcache.store = {} self.app.update_request(req) with mock.patch('swift.common.constraints.MAX_FILE_SIZE', 10): res = controller.PUT(req) - self.assertEquals(res.status_int, 413) + self.assertEqual(res.status_int, 413) @unpatch_policies def test_chunked_put_bad_version(self): @@ -5052,7 +4975,7 @@ class TestObjectController(unittest.TestCase): fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 412' - self.assertEquals(headers[:len(exp)], exp) + self.assertEqual(headers[:len(exp)], exp) @unpatch_policies def test_chunked_put_bad_path(self): @@ -5066,7 +4989,7 @@ class TestObjectController(unittest.TestCase): fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 404' - self.assertEquals(headers[:len(exp)], exp) + self.assertEqual(headers[:len(exp)], exp) @unpatch_policies def test_chunked_put_bad_utf8(self): @@ -5081,7 +5004,7 @@ class TestObjectController(unittest.TestCase): fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 412' - self.assertEquals(headers[:len(exp)], exp) + self.assertEqual(headers[:len(exp)], exp) @unpatch_policies def test_chunked_put_bad_path_no_controller(self): @@ -5096,7 +5019,7 @@ class TestObjectController(unittest.TestCase): fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 412' - self.assertEquals(headers[:len(exp)], exp) + self.assertEqual(headers[:len(exp)], exp) @unpatch_policies def test_chunked_put_bad_method(self): @@ -5111,7 +5034,7 @@ class TestObjectController(unittest.TestCase): fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 405' - self.assertEquals(headers[:len(exp)], exp) + self.assertEqual(headers[:len(exp)], exp) @unpatch_policies def test_chunked_put_unhandled_exception(self): @@ -5134,7 +5057,7 @@ class TestObjectController(unittest.TestCase): fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 500' - self.assertEquals(headers[:len(exp)], exp) + self.assertEqual(headers[:len(exp)], exp) prosrv.update_request = orig_update_request @unpatch_policies @@ -5152,8 +5075,8 @@ class TestObjectController(unittest.TestCase): fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 204' - self.assertEquals(headers[:len(exp)], exp) - self.assert_('\r\nContent-Length: 0\r\n' in headers) + self.assertEqual(headers[:len(exp)], exp) + self.assertTrue('\r\nContent-Length: 0\r\n' in headers) @unpatch_policies def test_chunked_put_utf8_all_the_way_down(self): @@ -5176,7 +5099,7 @@ class TestObjectController(unittest.TestCase): fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 201' - self.assertEquals(headers[:len(exp)], exp) + self.assertEqual(headers[:len(exp)], exp) # List account with ustr container (test plain) sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() @@ -5186,9 +5109,9 @@ class TestObjectController(unittest.TestCase): fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) + self.assertEqual(headers[:len(exp)], exp) containers = fd.read().split('\n') - self.assert_(ustr in containers) + self.assertTrue(ustr in containers) # List account with ustr container (test json) sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() @@ -5198,9 +5121,9 @@ class TestObjectController(unittest.TestCase): fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) + self.assertEqual(headers[:len(exp)], exp) listing = json.loads(fd.read()) - self.assert_(ustr.decode('utf8') in [l['name'] for l in listing]) + self.assertTrue(ustr.decode('utf8') in [l['name'] for l in listing]) # List account with ustr container (test xml) sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() @@ -5210,8 +5133,8 @@ class TestObjectController(unittest.TestCase): fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) - self.assert_('%s' % ustr in fd.read()) + self.assertEqual(headers[:len(exp)], exp) + self.assertTrue('%s' % ustr in fd.read()) # Create ustr object with ustr metadata in ustr container sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() @@ -5223,7 +5146,7 @@ class TestObjectController(unittest.TestCase): fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 201' - self.assertEquals(headers[:len(exp)], exp) + self.assertEqual(headers[:len(exp)], exp) # List ustr container with ustr object (test plain) sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() @@ -5233,9 +5156,9 @@ class TestObjectController(unittest.TestCase): fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) + self.assertEqual(headers[:len(exp)], exp) objects = fd.read().split('\n') - self.assert_(ustr in objects) + self.assertTrue(ustr in objects) # List ustr container with ustr object (test json) sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() @@ -5246,9 +5169,9 @@ class TestObjectController(unittest.TestCase): fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) + self.assertEqual(headers[:len(exp)], exp) listing = json.loads(fd.read()) - self.assertEquals(listing[0]['name'], ustr.decode('utf8')) + self.assertEqual(listing[0]['name'], ustr.decode('utf8')) # List ustr container with ustr object (test xml) sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() @@ -5259,8 +5182,8 @@ class TestObjectController(unittest.TestCase): fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) - self.assert_('%s' % ustr in fd.read()) + self.assertEqual(headers[:len(exp)], exp) + self.assertTrue('%s' % ustr in fd.read()) # Retrieve ustr object with ustr metadata sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() @@ -5271,9 +5194,9 @@ class TestObjectController(unittest.TestCase): fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) - self.assert_('\r\nX-Object-Meta-%s: %s\r\n' % - (quote(ustr_short).lower(), quote(ustr)) in headers) + self.assertEqual(headers[:len(exp)], exp) + self.assertTrue('\r\nX-Object-Meta-%s: %s\r\n' % + (quote(ustr_short).lower(), quote(ustr)) in headers) @unpatch_policies def test_chunked_put_chunked_put(self): @@ -5292,7 +5215,7 @@ class TestObjectController(unittest.TestCase): fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 201' - self.assertEquals(headers[:len(exp)], exp) + self.assertEqual(headers[:len(exp)], exp) # Ensure we get what we put sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() @@ -5301,396 +5224,9 @@ class TestObjectController(unittest.TestCase): fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) + self.assertEqual(headers[:len(exp)], exp) body = fd.read() - self.assertEquals(body, 'oh hai123456789abcdef') - - @unpatch_policies - def test_version_manifest(self, oc='versions', vc='vers', o='name'): - versions_to_create = 3 - # Create a container for our versioned object testing - (prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, - obj2lis, obj3lis) = _test_sockets - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - pre = quote('%03x' % len(o)) - osub = '%s/sub' % o - presub = quote('%03x' % len(osub)) - osub = quote(osub) - presub = quote(presub) - oc = quote(oc) - vc = quote(vc) - fd.write('PUT /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Storage-Token: t\r\n' - 'Content-Length: 0\r\nX-Versions-Location: %s\r\n\r\n' - % (oc, vc)) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 201' - self.assertEquals(headers[:len(exp)], exp) - # check that the header was set - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Storage-Token: t\r\n\r\n\r\n' % oc) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 2' # 2xx series response - self.assertEquals(headers[:len(exp)], exp) - self.assert_('X-Versions-Location: %s' % vc in headers) - # make the container for the object versions - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('PUT /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Storage-Token: t\r\n' - 'Content-Length: 0\r\n\r\n' % vc) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 201' - self.assertEquals(headers[:len(exp)], exp) - # Create the versioned file - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Storage-Token: ' - 't\r\nContent-Length: 5\r\nContent-Type: text/jibberish0\r\n' - 'X-Object-Meta-Foo: barbaz\r\n\r\n00000\r\n' % (oc, o)) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 201' - self.assertEquals(headers[:len(exp)], exp) - # Create the object versions - for segment in range(1, versions_to_create): - sleep(.01) # guarantee that the timestamp changes - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Storage-Token: ' - 't\r\nContent-Length: 5\r\nContent-Type: text/jibberish%s' - '\r\n\r\n%05d\r\n' % (oc, o, segment, segment)) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 201' - self.assertEquals(headers[:len(exp)], exp) - # Ensure retrieving the manifest file gets the latest version - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a/%s/%s HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n' - '\r\n' % (oc, o)) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) - self.assert_('Content-Type: text/jibberish%s' % segment in headers) - self.assert_('X-Object-Meta-Foo: barbaz' not in headers) - body = fd.read() - self.assertEquals(body, '%05d' % segment) - # Ensure we have the right number of versions saved - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a/%s?prefix=%s%s/ HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r\n' - % (vc, pre, o)) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) - body = fd.read() - versions = [x for x in body.split('\n') if x] - self.assertEquals(len(versions), versions_to_create - 1) - # copy a version and make sure the version info is stripped - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('COPY /v1/a/%s/%s HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Auth-Token: ' - 't\r\nDestination: %s/copied_name\r\n' - 'Content-Length: 0\r\n\r\n' % (oc, o, oc)) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 2' # 2xx series response to the COPY - self.assertEquals(headers[:len(exp)], exp) - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a/%s/copied_name HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r\n' - % oc) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) - body = fd.read() - self.assertEquals(body, '%05d' % segment) - # post and make sure it's updated - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('POST /v1/a/%s/%s HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Auth-Token: ' - 't\r\nContent-Type: foo/bar\r\nContent-Length: 0\r\n' - 'X-Object-Meta-Bar: foo\r\n\r\n' % (oc, o)) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 2' # 2xx series response to the POST - self.assertEquals(headers[:len(exp)], exp) - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a/%s/%s HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r\n' - % (oc, o)) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) - self.assert_('Content-Type: foo/bar' in headers) - self.assert_('X-Object-Meta-Bar: foo' in headers) - body = fd.read() - self.assertEquals(body, '%05d' % segment) - # Delete the object versions - for segment in range(versions_to_create - 1, 0, -1): - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('DELETE /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r' - '\nConnection: close\r\nX-Storage-Token: t\r\n\r\n' - % (oc, o)) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 2' # 2xx series response - self.assertEquals(headers[:len(exp)], exp) - # Ensure retrieving the manifest file gets the latest version - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Auth-Token: t\r\n\r\n' - % (oc, o)) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 200' - self.assertEquals(headers[:len(exp)], exp) - self.assert_('Content-Type: text/jibberish%s' % (segment - 1) - in headers) - body = fd.read() - self.assertEquals(body, '%05d' % (segment - 1)) - # Ensure we have the right number of versions saved - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a/%s?prefix=%s%s/ HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r' - '\n' % (vc, pre, o)) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 2' # 2xx series response - self.assertEquals(headers[:len(exp)], exp) - body = fd.read() - versions = [x for x in body.split('\n') if x] - self.assertEquals(len(versions), segment - 1) - # there is now one segment left (in the manifest) - # Ensure we have no saved versions - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a/%s?prefix=%s%s/ HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r\n' - % (vc, pre, o)) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 204 No Content' - self.assertEquals(headers[:len(exp)], exp) - # delete the last verision - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('DELETE /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Storage-Token: t\r\n\r\n' % (oc, o)) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 2' # 2xx series response - self.assertEquals(headers[:len(exp)], exp) - # Ensure it's all gone - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a/%s/%s HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r\n' - % (oc, o)) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 404' - self.assertEquals(headers[:len(exp)], exp) - - # make sure dlo manifest files don't get versioned - for _junk in range(1, versions_to_create): - sleep(.01) # guarantee that the timestamp changes - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Storage-Token: ' - 't\r\nContent-Length: 0\r\n' - 'Content-Type: text/jibberish0\r\n' - 'Foo: barbaz\r\nX-Object-Manifest: %s/%s/\r\n\r\n' - % (oc, o, oc, o)) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 201' - self.assertEquals(headers[:len(exp)], exp) - - # Ensure we have no saved versions - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a/%s?prefix=%s%s/ HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r\n' - % (vc, pre, o)) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 204 No Content' - self.assertEquals(headers[:len(exp)], exp) - - # DELETE v1/a/c/obj shouldn't delete v1/a/c/obj/sub versions - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Storage-Token: ' - 't\r\nContent-Length: 5\r\nContent-Type: text/jibberish0\r\n' - 'Foo: barbaz\r\n\r\n00000\r\n' % (oc, o)) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 201' - self.assertEquals(headers[:len(exp)], exp) - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Storage-Token: ' - 't\r\nContent-Length: 5\r\nContent-Type: text/jibberish0\r\n' - 'Foo: barbaz\r\n\r\n00001\r\n' % (oc, o)) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 201' - self.assertEquals(headers[:len(exp)], exp) - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Storage-Token: ' - 't\r\nContent-Length: 4\r\nContent-Type: text/jibberish0\r\n' - 'Foo: barbaz\r\n\r\nsub1\r\n' % (oc, osub)) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 201' - self.assertEquals(headers[:len(exp)], exp) - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Storage-Token: ' - 't\r\nContent-Length: 4\r\nContent-Type: text/jibberish0\r\n' - 'Foo: barbaz\r\n\r\nsub2\r\n' % (oc, osub)) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 201' - self.assertEquals(headers[:len(exp)], exp) - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('DELETE /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Storage-Token: t\r\n\r\n' % (oc, o)) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 2' # 2xx series response - self.assertEquals(headers[:len(exp)], exp) - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('GET /v1/a/%s?prefix=%s%s/ HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r\n' - % (vc, presub, osub)) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 2' # 2xx series response - self.assertEquals(headers[:len(exp)], exp) - body = fd.read() - versions = [x for x in body.split('\n') if x] - self.assertEquals(len(versions), 1) - - # Check for when the versions target container doesn't exist - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('PUT /v1/a/%swhoops HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Storage-Token: t\r\n' - 'Content-Length: 0\r\nX-Versions-Location: none\r\n\r\n' % oc) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 201' - self.assertEquals(headers[:len(exp)], exp) - # Create the versioned file - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('PUT /v1/a/%swhoops/foo HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Storage-Token: ' - 't\r\nContent-Length: 5\r\n\r\n00000\r\n' % oc) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 201' - self.assertEquals(headers[:len(exp)], exp) - # Create another version - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('PUT /v1/a/%swhoops/foo HTTP/1.1\r\nHost: ' - 'localhost\r\nConnection: close\r\nX-Storage-Token: ' - 't\r\nContent-Length: 5\r\n\r\n00001\r\n' % oc) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 412' - self.assertEquals(headers[:len(exp)], exp) - # Delete the object - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('DELETE /v1/a/%swhoops/foo HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Storage-Token: t\r\n\r\n' % oc) - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 2' # 2xx response - self.assertEquals(headers[:len(exp)], exp) - - @unpatch_policies - def test_version_manifest_utf8(self): - oc = '0_oc_non_ascii\xc2\xa3' - vc = '0_vc_non_ascii\xc2\xa3' - o = '0_o_non_ascii\xc2\xa3' - self.test_version_manifest(oc, vc, o) - - @unpatch_policies - def test_version_manifest_utf8_container(self): - oc = '1_oc_non_ascii\xc2\xa3' - vc = '1_vc_ascii' - o = '1_o_ascii' - self.test_version_manifest(oc, vc, o) - - @unpatch_policies - def test_version_manifest_utf8_version_container(self): - oc = '2_oc_ascii' - vc = '2_vc_non_ascii\xc2\xa3' - o = '2_o_ascii' - self.test_version_manifest(oc, vc, o) - - @unpatch_policies - def test_version_manifest_utf8_containers(self): - oc = '3_oc_non_ascii\xc2\xa3' - vc = '3_vc_non_ascii\xc2\xa3' - o = '3_o_ascii' - self.test_version_manifest(oc, vc, o) - - @unpatch_policies - def test_version_manifest_utf8_object(self): - oc = '4_oc_ascii' - vc = '4_vc_ascii' - o = '4_o_non_ascii\xc2\xa3' - self.test_version_manifest(oc, vc, o) - - @unpatch_policies - def test_version_manifest_utf8_version_container_utf_object(self): - oc = '5_oc_ascii' - vc = '5_vc_non_ascii\xc2\xa3' - o = '5_o_non_ascii\xc2\xa3' - self.test_version_manifest(oc, vc, o) - - @unpatch_policies - def test_version_manifest_utf8_container_utf_object(self): - oc = '6_oc_non_ascii\xc2\xa3' - vc = '6_vc_ascii' - o = '6_o_non_ascii\xc2\xa3' - self.test_version_manifest(oc, vc, o) + self.assertEqual(body, 'oh hai123456789abcdef') @unpatch_policies def test_conditional_range_get(self): @@ -5706,7 +5242,7 @@ class TestObjectController(unittest.TestCase): fd.flush() exp = 'HTTP/1.1 201' headers = readuntil2crlfs(fd) - self.assertEquals(headers[:len(exp)], exp) + self.assertEqual(headers[:len(exp)], exp) # put an object in it sock = connect_tcp(('localhost', prolis.getsockname()[1])) @@ -5722,7 +5258,7 @@ class TestObjectController(unittest.TestCase): fd.flush() exp = 'HTTP/1.1 201' headers = readuntil2crlfs(fd) - self.assertEquals(headers[:len(exp)], exp) + self.assertEqual(headers[:len(exp)], exp) # request with both If-None-Match and Range etag = md5("abcdefghij").hexdigest() @@ -5738,7 +5274,7 @@ class TestObjectController(unittest.TestCase): fd.flush() exp = 'HTTP/1.1 304' headers = readuntil2crlfs(fd) - self.assertEquals(headers[:len(exp)], exp) + self.assertEqual(headers[:len(exp)], exp) def test_mismatched_etags(self): with save_globals(): @@ -5754,14 +5290,13 @@ class TestObjectController(unittest.TestCase): '68b329da9893e34099c7d8ad5cb9c940', '68b329da9893e34099c7d8ad5cb9c941']) resp = controller.PUT(req) - self.assertEquals(resp.status_int // 100, 5) # server error + self.assertEqual(resp.status_int // 100, 5) # server error # req supplies etag, object servers return 422 - mismatch + headers = {'Content-Length': '0', + 'ETag': '68b329da9893e34099c7d8ad5cb9c940'} req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, - headers={ - 'Content-Length': '0', - 'ETag': '68b329da9893e34099c7d8ad5cb9c940', - }) + headers=headers) self.app.update_request(req) set_http_connect(200, 422, 422, 503, etags=['68b329da9893e34099c7d8ad5cb9c940', @@ -5769,7 +5304,7 @@ class TestObjectController(unittest.TestCase): None, None]) resp = controller.PUT(req) - self.assertEquals(resp.status_int // 100, 4) # client error + self.assertEqual(resp.status_int // 100, 4) # client error def test_response_get_accept_ranges_header(self): with save_globals(): @@ -5779,8 +5314,8 @@ class TestObjectController(unittest.TestCase): self.app, 'account', 'container', 'object') set_http_connect(200, 200, 200) resp = controller.GET(req) - self.assert_('accept-ranges' in resp.headers) - self.assertEquals(resp.headers['accept-ranges'], 'bytes') + self.assertTrue('accept-ranges' in resp.headers) + self.assertEqual(resp.headers['accept-ranges'], 'bytes') def test_response_head_accept_ranges_header(self): with save_globals(): @@ -5791,8 +5326,8 @@ class TestObjectController(unittest.TestCase): self.app, 'account', 'container', 'object') set_http_connect(200, 200, 200) resp = controller.HEAD(req) - self.assert_('accept-ranges' in resp.headers) - self.assertEquals(resp.headers['accept-ranges'], 'bytes') + self.assertTrue('accept-ranges' in resp.headers) + self.assertEqual(resp.headers['accept-ranges'], 'bytes') def test_GET_calls_authorize(self): called = [False] @@ -5808,7 +5343,7 @@ class TestObjectController(unittest.TestCase): req.environ['swift.authorize'] = authorize self.app.update_request(req) controller.GET(req) - self.assert_(called[0]) + self.assertTrue(called[0]) def test_HEAD_calls_authorize(self): called = [False] @@ -5824,7 +5359,7 @@ class TestObjectController(unittest.TestCase): req.environ['swift.authorize'] = authorize self.app.update_request(req) controller.HEAD(req) - self.assert_(called[0]) + self.assertTrue(called[0]) def test_POST_calls_authorize(self): called = [False] @@ -5843,7 +5378,7 @@ class TestObjectController(unittest.TestCase): req.environ['swift.authorize'] = authorize self.app.update_request(req) controller.POST(req) - self.assert_(called[0]) + self.assertTrue(called[0]) def test_POST_as_copy_calls_authorize(self): called = [False] @@ -5861,7 +5396,7 @@ class TestObjectController(unittest.TestCase): req.environ['swift.authorize'] = authorize self.app.update_request(req) controller.POST(req) - self.assert_(called[0]) + self.assertTrue(called[0]) def test_PUT_calls_authorize(self): called = [False] @@ -5878,7 +5413,7 @@ class TestObjectController(unittest.TestCase): req.environ['swift.authorize'] = authorize self.app.update_request(req) controller.PUT(req) - self.assert_(called[0]) + self.assertTrue(called[0]) def test_COPY_calls_authorize(self): called = [False] @@ -5896,7 +5431,7 @@ class TestObjectController(unittest.TestCase): req.environ['swift.authorize'] = authorize self.app.update_request(req) controller.COPY(req) - self.assert_(called[0]) + self.assertTrue(called[0]) def test_POST_converts_delete_after_to_delete_at(self): with save_globals(): @@ -5914,134 +5449,67 @@ class TestObjectController(unittest.TestCase): 'X-Delete-After': '60'}) self.app.update_request(req) res = controller.POST(req) - self.assertEquals(res.status, '202 Fake') - self.assertEquals(req.headers.get('x-delete-at'), - str(int(t + 60))) + self.assertEqual(res.status, '202 Fake') + self.assertEqual(req.headers.get('x-delete-at'), + str(int(t + 60))) finally: time.time = orig_time - @patch_policies([ - StoragePolicy(0, 'zero', False, object_ring=FakeRing()), - StoragePolicy(1, 'one', True, object_ring=FakeRing()) - ]) - def test_PUT_versioning_with_nonzero_default_policy(self): - # reset the router post patch_policies - self.app.obj_controller_router = proxy_server.ObjectControllerRouter() + @unpatch_policies + def test_ec_client_disconnect(self): + prolis = _test_sockets[0] - def test_connect(ipaddr, port, device, partition, method, path, - headers=None, query_string=None): - if method == "HEAD": - self.assertEquals(path, '/a/c/o.jpg') - self.assertNotEquals(None, - headers['X-Backend-Storage-Policy-Index']) - self.assertEquals(1, int(headers - ['X-Backend-Storage-Policy-Index'])) + # create connection + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() - def fake_container_info(account, container, req): - return {'status': 200, 'sync_key': None, 'storage_policy': '1', - 'meta': {}, 'cors': {'allow_origin': None, - 'expose_headers': None, - 'max_age': None}, - 'sysmeta': {}, 'read_acl': None, 'object_count': None, - 'write_acl': None, 'versions': 'c-versions', - 'partition': 1, 'bytes': None, - 'nodes': [{'zone': 0, 'ip': '10.0.0.0', 'region': 0, - 'id': 0, 'device': 'sda', 'port': 1000}, - {'zone': 1, 'ip': '10.0.0.1', 'region': 1, - 'id': 1, 'device': 'sdb', 'port': 1001}, - {'zone': 2, 'ip': '10.0.0.2', 'region': 0, - 'id': 2, 'device': 'sdc', 'port': 1002}]} - with save_globals(): - controller = ReplicatedObjectController( - self.app, 'a', 'c', 'o.jpg') + # create container + fd.write('PUT /v1/a/ec-discon HTTP/1.1\r\n' + 'Host: localhost\r\n' + 'Content-Length: 0\r\n' + 'X-Storage-Token: t\r\n' + 'X-Storage-Policy: ec\r\n' + '\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 2' + self.assertEqual(headers[:len(exp)], exp) - controller.container_info = fake_container_info - set_http_connect(200, 200, 200, # head: for the last version - 200, 200, 200, # get: for the last version - 201, 201, 201, # put: move the current version - 201, 201, 201, # put: save the new version - give_connect=test_connect) - req = Request.blank('/v1/a/c/o.jpg', - environ={'REQUEST_METHOD': 'PUT'}, - headers={'Content-Length': '0'}) - self.app.update_request(req) - self.app.memcache.store = {} - res = controller.PUT(req) - self.assertEquals(201, res.status_int) + # create object + obj = 'a' * 4 * 64 * 2 ** 10 + fd.write('PUT /v1/a/ec-discon/test HTTP/1.1\r\n' + 'Host: localhost\r\n' + 'Content-Length: %d\r\n' + 'X-Storage-Token: t\r\n' + 'Content-Type: donuts\r\n' + '\r\n%s' % (len(obj), obj)) + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 201' + self.assertEqual(headers[:len(exp)], exp) - @patch_policies([ - StoragePolicy(0, 'zero', False, object_ring=FakeRing()), - StoragePolicy(1, 'one', True, object_ring=FakeRing()) - ]) - def test_cross_policy_DELETE_versioning(self): - # reset the router post patch_policies - self.app.obj_controller_router = proxy_server.ObjectControllerRouter() - requests = [] + # get object + fd.write('GET /v1/a/ec-discon/test HTTP/1.1\r\n' + 'Host: localhost\r\n' + 'Connection: close\r\n' + 'X-Storage-Token: t\r\n' + '\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 200' + self.assertEqual(headers[:len(exp)], exp) - def capture_requests(ipaddr, port, device, partition, method, path, - headers=None, query_string=None): - requests.append((method, path, headers)) + # read most of the object, and disconnect + fd.read(10) + fd.close() + sock.close() + sleep(0) - def fake_container_info(app, env, account, container, **kwargs): - info = {'status': 200, 'sync_key': None, 'storage_policy': None, - 'meta': {}, 'cors': {'allow_origin': None, - 'expose_headers': None, - 'max_age': None}, - 'sysmeta': {}, 'read_acl': None, 'object_count': None, - 'write_acl': None, 'versions': None, - 'partition': 1, 'bytes': None, - 'nodes': [{'zone': 0, 'ip': '10.0.0.0', 'region': 0, - 'id': 0, 'device': 'sda', 'port': 1000}, - {'zone': 1, 'ip': '10.0.0.1', 'region': 1, - 'id': 1, 'device': 'sdb', 'port': 1001}, - {'zone': 2, 'ip': '10.0.0.2', 'region': 0, - 'id': 2, 'device': 'sdc', 'port': 1002}]} - if container == 'c': - info['storage_policy'] = '1' - info['versions'] = 'c-versions' - elif container == 'c-versions': - info['storage_policy'] = '0' - else: - self.fail('Unexpected call to get_info for %r' % container) - return info - container_listing = json.dumps([{'name': 'old_version'}]) - with save_globals(): - resp_status = ( - 200, 200, # listings for versions container - 200, 200, 200, # get: for the last version - 201, 201, 201, # put: move the last version - 200, 200, 200, # delete: for the last version - ) - body_iter = iter([container_listing] + [ - '' for x in range(len(resp_status) - 1)]) - set_http_connect(*resp_status, body_iter=body_iter, - give_connect=capture_requests) - req = Request.blank('/v1/a/c/current_version', method='DELETE') - self.app.update_request(req) - self.app.memcache.store = {} - with mock.patch('swift.proxy.controllers.base.get_info', - fake_container_info): - resp = self.app.handle_request(req) - self.assertEquals(200, resp.status_int) - expected = [('GET', '/a/c-versions')] * 2 + \ - [('GET', '/a/c-versions/old_version')] * 3 + \ - [('PUT', '/a/c/current_version')] * 3 + \ - [('DELETE', '/a/c-versions/old_version')] * 3 - self.assertEqual(expected, [(m, p) for m, p, h in requests]) - for method, path, headers in requests: - if 'current_version' in path: - expected_storage_policy = 1 - elif 'old_version' in path: - expected_storage_policy = 0 - else: - continue - storage_policy_index = \ - int(headers['X-Backend-Storage-Policy-Index']) - self.assertEqual( - expected_storage_policy, storage_policy_index, - 'Unexpected %s request for %s ' - 'with storage policy index %s' % ( - method, path, storage_policy_index)) + # check for disconnect message! + expected = ['Client disconnected on read'] * 2 + self.assertEqual( + _test_servers[0].logger.get_lines_for_level('warning'), + expected) @unpatch_policies def test_leak_1(self): @@ -6100,7 +5568,7 @@ class TestObjectController(unittest.TestCase): gc.collect() else: sleep(0) - self.assertEquals( + self.assertEqual( before_request_instances, len(_request_instances)) def test_OPTIONS(self): @@ -6117,7 +5585,7 @@ class TestObjectController(unittest.TestCase): headers={'Origin': 'http://foo.com', 'Access-Control-Request-Method': 'GET'}) resp = controller.OPTIONS(req) - self.assertEquals(401, resp.status_int) + self.assertEqual(401, resp.status_int) def my_empty_origin_container_info(*args): return {'cors': {'allow_origin': None}} @@ -6128,7 +5596,7 @@ class TestObjectController(unittest.TestCase): headers={'Origin': 'http://foo.com', 'Access-Control-Request-Method': 'GET'}) resp = controller.OPTIONS(req) - self.assertEquals(401, resp.status_int) + self.assertEqual(401, resp.status_int) def my_container_info(*args): return { @@ -6145,38 +5613,38 @@ class TestObjectController(unittest.TestCase): 'Access-Control-Request-Method': 'GET'}) req.content_length = 0 resp = controller.OPTIONS(req) - self.assertEquals(200, resp.status_int) - self.assertEquals( + self.assertEqual(200, resp.status_int) + self.assertEqual( 'https://foo.bar', resp.headers['access-control-allow-origin']) for verb in 'OPTIONS COPY GET POST PUT DELETE HEAD'.split(): self.assertTrue( verb in resp.headers['access-control-allow-methods']) - self.assertEquals( + self.assertEqual( len(resp.headers['access-control-allow-methods'].split(', ')), 7) - self.assertEquals('999', resp.headers['access-control-max-age']) + self.assertEqual('999', resp.headers['access-control-max-age']) req = Request.blank( '/v1/a/c/o.jpg', {'REQUEST_METHOD': 'OPTIONS'}, headers={'Origin': 'https://foo.bar'}) req.content_length = 0 resp = controller.OPTIONS(req) - self.assertEquals(401, resp.status_int) + self.assertEqual(401, resp.status_int) req = Request.blank('/v1/a/c/o.jpg', {'REQUEST_METHOD': 'OPTIONS'}) req.content_length = 0 resp = controller.OPTIONS(req) - self.assertEquals(200, resp.status_int) + self.assertEqual(200, resp.status_int) for verb in 'OPTIONS COPY GET POST PUT DELETE HEAD'.split(): self.assertTrue( verb in resp.headers['Allow']) - self.assertEquals(len(resp.headers['Allow'].split(', ')), 7) + self.assertEqual(len(resp.headers['Allow'].split(', ')), 7) req = Request.blank( '/v1/a/c/o.jpg', {'REQUEST_METHOD': 'OPTIONS'}, headers={'Origin': 'http://foo.com'}) resp = controller.OPTIONS(req) - self.assertEquals(401, resp.status_int) + self.assertEqual(401, resp.status_int) req = Request.blank( '/v1/a/c/o.jpg', {'REQUEST_METHOD': 'OPTIONS'}, @@ -6184,7 +5652,7 @@ class TestObjectController(unittest.TestCase): 'Access-Control-Request-Method': 'GET'}) controller.app.cors_allow_origin = ['http://foo.bar', ] resp = controller.OPTIONS(req) - self.assertEquals(200, resp.status_int) + self.assertEqual(200, resp.status_int) def my_container_info_wildcard(*args): return { @@ -6201,15 +5669,15 @@ class TestObjectController(unittest.TestCase): 'Access-Control-Request-Method': 'GET'}) req.content_length = 0 resp = controller.OPTIONS(req) - self.assertEquals(200, resp.status_int) - self.assertEquals('*', resp.headers['access-control-allow-origin']) + self.assertEqual(200, resp.status_int) + self.assertEqual('*', resp.headers['access-control-allow-origin']) for verb in 'OPTIONS COPY GET POST PUT DELETE HEAD'.split(): self.assertTrue( verb in resp.headers['access-control-allow-methods']) - self.assertEquals( + self.assertEqual( len(resp.headers['access-control-allow-methods'].split(', ')), 7) - self.assertEquals('999', resp.headers['access-control-max-age']) + self.assertEqual('999', resp.headers['access-control-max-age']) def test_CORS_valid(self): with save_globals(): @@ -6238,12 +5706,12 @@ class TestObjectController(unittest.TestCase): resp = cors_validation(objectGET)(controller, req) - self.assertEquals(200, resp.status_int) - self.assertEquals('http://foo.bar', - resp.headers['access-control-allow-origin']) - self.assertEquals('red', resp.headers['x-object-meta-color']) + self.assertEqual(200, resp.status_int) + self.assertEqual('http://foo.bar', + resp.headers['access-control-allow-origin']) + self.assertEqual('red', resp.headers['x-object-meta-color']) # X-Super-Secret is in the response, but not "exposed" - self.assertEquals('hush', resp.headers['x-super-secret']) + self.assertEqual('hush', resp.headers['x-super-secret']) self.assertTrue('access-control-expose-headers' in resp.headers) exposed = set( h.strip() for h in @@ -6252,7 +5720,7 @@ class TestObjectController(unittest.TestCase): 'content-type', 'expires', 'last-modified', 'pragma', 'etag', 'x-timestamp', 'x-trans-id', 'x-object-meta-color']) - self.assertEquals(expected_exposed, exposed) + self.assertEqual(expected_exposed, exposed) controller.app.strict_cors_mode = True req = Request.blank( @@ -6262,7 +5730,7 @@ class TestObjectController(unittest.TestCase): resp = cors_validation(objectGET)(controller, req) - self.assertEquals(200, resp.status_int) + self.assertEqual(200, resp.status_int) self.assertTrue('access-control-allow-origin' not in resp.headers) def test_CORS_valid_with_obj_headers(self): @@ -6293,11 +5761,11 @@ class TestObjectController(unittest.TestCase): resp = cors_validation(objectGET)(controller, req) - self.assertEquals(200, resp.status_int) - self.assertEquals('http://obj.origin', - resp.headers['access-control-allow-origin']) - self.assertEquals('x-trans-id', - resp.headers['access-control-expose-headers']) + self.assertEqual(200, resp.status_int) + self.assertEqual('http://obj.origin', + resp.headers['access-control-allow-origin']) + self.assertEqual('x-trans-id', + resp.headers['access-control-expose-headers']) def _gather_x_container_headers(self, controller_call, req, *connect_args, **kwargs): @@ -6606,12 +6074,18 @@ class TestECMismatchedFA(unittest.TestCase): class TestObjectECRangedGET(unittest.TestCase): def setUp(self): + _test_servers[0].logger._clear() self.app = proxy_server.Application( None, FakeMemcache(), logger=debug_logger('proxy-ut'), account_ring=FakeRing(), container_ring=FakeRing()) + def tearDown(self): + prosrv = _test_servers[0] + self.assertFalse(prosrv.logger.get_lines_for_level('error')) + self.assertFalse(prosrv.logger.get_lines_for_level('warning')) + @classmethod def setUpClass(cls): cls.obj_name = 'range-get-test' @@ -6743,7 +6217,7 @@ class TestObjectECRangedGET(unittest.TestCase): self.assertEqual(gotten_obj, self.obj[3783:7879]) def test_aligned_left(self): - # Firts byte is aligned to a segment boundary, last byte is not + # First byte is aligned to a segment boundary, last byte is not status, headers, gotten_obj = self._get_obj("bytes=0-5500") self.assertEqual(status, 206) self.assertEqual(headers['Content-Length'], "5501") @@ -7108,9 +6582,10 @@ class TestContainerController(unittest.TestCase): 'Content-Type': 'text/plain'}) self.assertEqual(controller._convert_policy_to_index(req), None) # negative test - req = Request.blank('/a/c', headers={'Content-Length': '0', - 'Content-Type': 'text/plain', - 'X-Storage-Policy': 'nada'}) + req = Request.blank('/a/c', + headers={'Content-Length': '0', + 'Content-Type': 'text/plain', + 'X-Storage-Policy': 'nada'}) self.assertRaises(HTTPException, controller._convert_policy_to_index, req) # storage policy two is deprecated @@ -7186,14 +6661,14 @@ class TestContainerController(unittest.TestCase): 'Content-Type': 'text/plain'}) self.app.update_request(req) res = method(req) - self.assertEquals(res.status_int, expected) + self.assertEqual(res.status_int, expected) set_http_connect(*statuses, **kwargs) self.app.memcache.store = {} req = Request.blank('/v1/a/c/', headers={'Content-Length': '0', 'Content-Type': 'text/plain'}) self.app.update_request(req) res = method(req) - self.assertEquals(res.status_int, expected) + self.assertEqual(res.status_int, expected) def test_HEAD_GET(self): with save_globals(): @@ -7206,22 +6681,22 @@ class TestContainerController(unittest.TestCase): req = Request.blank('/v1/a/c', {}) self.app.update_request(req) res = controller.HEAD(req) - self.assertEquals(res.status[:len(str(expected))], - str(expected)) + self.assertEqual(res.status[:len(str(expected))], + str(expected)) if expected < 400: - self.assert_('x-works' in res.headers) - self.assertEquals(res.headers['x-works'], 'yes') + self.assertTrue('x-works' in res.headers) + self.assertEqual(res.headers['x-works'], 'yes') if c_expected: self.assertTrue('swift.container/a/c' in res.environ) - self.assertEquals( + self.assertEqual( res.environ['swift.container/a/c']['status'], c_expected) else: self.assertTrue('swift.container/a/c' not in res.environ) if a_expected: self.assertTrue('swift.account/a' in res.environ) - self.assertEquals(res.environ['swift.account/a']['status'], - a_expected) + self.assertEqual(res.environ['swift.account/a']['status'], + a_expected) else: self.assertTrue('swift.account/a' not in res.environ) @@ -7230,22 +6705,22 @@ class TestContainerController(unittest.TestCase): req = Request.blank('/v1/a/c', {}) self.app.update_request(req) res = controller.GET(req) - self.assertEquals(res.status[:len(str(expected))], - str(expected)) + self.assertEqual(res.status[:len(str(expected))], + str(expected)) if expected < 400: - self.assert_('x-works' in res.headers) - self.assertEquals(res.headers['x-works'], 'yes') + self.assertTrue('x-works' in res.headers) + self.assertEqual(res.headers['x-works'], 'yes') if c_expected: self.assertTrue('swift.container/a/c' in res.environ) - self.assertEquals( + self.assertEqual( res.environ['swift.container/a/c']['status'], c_expected) else: self.assertTrue('swift.container/a/c' not in res.environ) if a_expected: self.assertTrue('swift.account/a' in res.environ) - self.assertEquals(res.environ['swift.account/a']['status'], - a_expected) + self.assertEqual(res.environ['swift.account/a']['status'], + a_expected) else: self.assertTrue('swift.account/a' not in res.environ) # In all the following tests cache 200 for account @@ -7291,14 +6766,14 @@ class TestContainerController(unittest.TestCase): expected_policy = POLICIES.default res = req.get_response(self.app) if expected_policy.is_deprecated: - self.assertEquals(res.status_int, 400) + self.assertEqual(res.status_int, 400) self.assertEqual(0, len(backend_requests)) expected = 'is deprecated' self.assertTrue(expected in res.body, '%r did not include %r' % ( res.body, expected)) return - self.assertEquals(res.status_int, 201) + self.assertEqual(res.status_int, 201) self.assertEqual( expected_policy.object_ring.replicas, len(backend_requests)) @@ -7338,7 +6813,7 @@ class TestContainerController(unittest.TestCase): self.app.update_request(req) res = controller.PUT(req) expected = str(expected) - self.assertEquals(res.status[:len(expected)], expected) + self.assertEqual(res.status[:len(expected)], expected) test_status_map((200, 201, 201, 201), 201, missing_container=True) test_status_map((200, 201, 201, 500), 201, missing_container=True) @@ -7398,7 +6873,7 @@ class TestContainerController(unittest.TestCase): self.app.update_request(req) res = controller.PUT(req) expected = str(expected) - self.assertEquals(res.status[:len(expected)], expected) + self.assertEqual(res.status[:len(expected)], expected) self.app.account_autocreate = True calls = [] @@ -7437,7 +6912,7 @@ class TestContainerController(unittest.TestCase): self.app.update_request(req) res = controller.POST(req) expected = str(expected) - self.assertEquals(res.status[:len(expected)], expected) + self.assertEqual(res.status[:len(expected)], expected) test_status_map((200, 201, 201, 201), 201, missing_container=True) test_status_map((200, 201, 201, 500), 201, missing_container=True) @@ -7520,28 +6995,28 @@ class TestContainerController(unittest.TestCase): environ={'REQUEST_METHOD': meth}) self.app.update_request(req) resp = getattr(controller, meth)(req) - self.assertEquals(resp.status_int, 200) + self.assertEqual(resp.status_int, 200) set_http_connect(404, 404, 404, 200, 200, 200) # Make sure it is a blank request wthout env caching req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': meth}) resp = getattr(controller, meth)(req) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) set_http_connect(503, 404, 404) # Make sure it is a blank request wthout env caching req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': meth}) resp = getattr(controller, meth)(req) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) set_http_connect(503, 404, raise_exc=True) # Make sure it is a blank request wthout env caching req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': meth}) resp = getattr(controller, meth)(req) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) for dev in self.app.account_ring.devs: set_node_errors(self.app, dev, @@ -7552,7 +7027,7 @@ class TestContainerController(unittest.TestCase): req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': meth}) resp = getattr(controller, meth)(req) - self.assertEquals(resp.status_int, 404) + self.assertEqual(resp.status_int, 404) def test_put_locking(self): @@ -7578,7 +7053,7 @@ class TestContainerController(unittest.TestCase): req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'PUT'}) self.app.update_request(req) res = controller.PUT(req) - self.assertEquals(res.status_int, 201) + self.assertEqual(res.status_int, 201) def test_error_limiting(self): with save_globals(): @@ -7589,19 +7064,19 @@ class TestContainerController(unittest.TestCase): self.assert_status_map(controller.HEAD, (200, 503, 200, 200), 200, missing_container=False) - self.assertEquals( + self.assertEqual( node_error_count(controller.app, container_ring.devs[0]), 2) - self.assert_( + self.assertTrue( node_last_error(controller.app, container_ring.devs[0]) is not None) for _junk in range(self.app.error_suppression_limit): self.assert_status_map(controller.HEAD, (200, 503, 503, 503), 503) - self.assertEquals( + self.assertEqual( node_error_count(controller.app, container_ring.devs[0]), self.app.error_suppression_limit + 1) self.assert_status_map(controller.HEAD, (200, 200, 200, 200), 503) - self.assert_( + self.assertTrue( node_last_error(controller.app, container_ring.devs[0]) is not None) self.assert_status_map(controller.PUT, (200, 201, 201, 201), 503, @@ -7643,7 +7118,7 @@ class TestContainerController(unittest.TestCase): req = Request.blank('/v1/a/c?format=json') self.app.update_request(req) res = controller.GET(req) - self.assert_('accept-ranges' in res.headers) + self.assertTrue('accept-ranges' in res.headers) self.assertEqual(res.headers['accept-ranges'], 'bytes') def test_response_head_accept_ranges_header(self): @@ -7654,7 +7129,7 @@ class TestContainerController(unittest.TestCase): req = Request.blank('/v1/a/c?format=json') self.app.update_request(req) res = controller.HEAD(req) - self.assert_('accept-ranges' in res.headers) + self.assertTrue('accept-ranges' in res.headers) self.assertEqual(res.headers['accept-ranges'], 'bytes') def test_PUT_metadata(self): @@ -7700,7 +7175,7 @@ class TestContainerController(unittest.TestCase): headers={test_header: test_value}) self.app.update_request(req) getattr(controller, method)(req) - self.assertEquals(test_errors, []) + self.assertEqual(test_errors, []) def test_PUT_bad_metadata(self): self.bad_metadata_helper('PUT') @@ -7715,7 +7190,7 @@ class TestContainerController(unittest.TestCase): req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method}) self.app.update_request(req) resp = getattr(controller, method)(req) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) set_http_connect(201, 201, 201) req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method}, @@ -7723,7 +7198,7 @@ class TestContainerController(unittest.TestCase): ('a' * constraints.MAX_META_NAME_LENGTH): 'v'}) self.app.update_request(req) resp = getattr(controller, method)(req) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) set_http_connect(201, 201, 201) req = Request.blank( '/v1/a/c', environ={'REQUEST_METHOD': method}, @@ -7731,7 +7206,7 @@ class TestContainerController(unittest.TestCase): ('a' * (constraints.MAX_META_NAME_LENGTH + 1)): 'v'}) self.app.update_request(req) resp = getattr(controller, method)(req) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) set_http_connect(201, 201, 201) req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method}, @@ -7739,14 +7214,14 @@ class TestContainerController(unittest.TestCase): 'a' * constraints.MAX_META_VALUE_LENGTH}) self.app.update_request(req) resp = getattr(controller, method)(req) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) set_http_connect(201, 201, 201) req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method}, headers={'X-Container-Meta-Too-Long': 'a' * (constraints.MAX_META_VALUE_LENGTH + 1)}) self.app.update_request(req) resp = getattr(controller, method)(req) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) set_http_connect(201, 201, 201) headers = {} @@ -7756,7 +7231,7 @@ class TestContainerController(unittest.TestCase): headers=headers) self.app.update_request(req) resp = getattr(controller, method)(req) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) set_http_connect(201, 201, 201) headers = {} for x in range(constraints.MAX_META_COUNT + 1): @@ -7765,7 +7240,7 @@ class TestContainerController(unittest.TestCase): headers=headers) self.app.update_request(req) resp = getattr(controller, method)(req) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) set_http_connect(201, 201, 201) headers = {} @@ -7784,7 +7259,7 @@ class TestContainerController(unittest.TestCase): headers=headers) self.app.update_request(req) resp = getattr(controller, method)(req) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) set_http_connect(201, 201, 201) headers['X-Container-Meta-a'] = \ 'a' * (constraints.MAX_META_OVERALL_SIZE - size) @@ -7792,7 +7267,7 @@ class TestContainerController(unittest.TestCase): headers=headers) self.app.update_request(req) resp = getattr(controller, method)(req) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) def test_POST_calls_clean_acl(self): called = [False] @@ -7809,7 +7284,7 @@ class TestContainerController(unittest.TestCase): req.environ['swift.clean_acl'] = clean_acl self.app.update_request(req) controller.POST(req) - self.assert_(called[0]) + self.assertTrue(called[0]) called[0] = False with save_globals(): set_http_connect(200, 201, 201, 201) @@ -7820,7 +7295,7 @@ class TestContainerController(unittest.TestCase): req.environ['swift.clean_acl'] = clean_acl self.app.update_request(req) controller.POST(req) - self.assert_(called[0]) + self.assertTrue(called[0]) def test_PUT_calls_clean_acl(self): called = [False] @@ -7837,7 +7312,7 @@ class TestContainerController(unittest.TestCase): req.environ['swift.clean_acl'] = clean_acl self.app.update_request(req) controller.PUT(req) - self.assert_(called[0]) + self.assertTrue(called[0]) called[0] = False with save_globals(): set_http_connect(200, 201, 201, 201) @@ -7848,7 +7323,7 @@ class TestContainerController(unittest.TestCase): req.environ['swift.clean_acl'] = clean_acl self.app.update_request(req) controller.PUT(req) - self.assert_(called[0]) + self.assertTrue(called[0]) def test_GET_no_content(self): with save_globals(): @@ -7858,10 +7333,10 @@ class TestContainerController(unittest.TestCase): req = Request.blank('/v1/a/c') self.app.update_request(req) res = controller.GET(req) - self.assertEquals(res.status_int, 204) - self.assertEquals( + self.assertEqual(res.status_int, 204) + self.assertEqual( res.environ['swift.container/a/c']['status'], 204) - self.assertEquals(res.content_length, 0) + self.assertEqual(res.content_length, 0) self.assertTrue('transfer-encoding' not in res.headers) def test_GET_calls_authorize(self): @@ -7878,8 +7353,8 @@ class TestContainerController(unittest.TestCase): req.environ['swift.authorize'] = authorize self.app.update_request(req) res = controller.GET(req) - self.assertEquals(res.environ['swift.container/a/c']['status'], 201) - self.assert_(called[0]) + self.assertEqual(res.environ['swift.container/a/c']['status'], 201) + self.assertTrue(called[0]) def test_HEAD_calls_authorize(self): called = [False] @@ -7895,7 +7370,7 @@ class TestContainerController(unittest.TestCase): req.environ['swift.authorize'] = authorize self.app.update_request(req) controller.HEAD(req) - self.assert_(called[0]) + self.assertTrue(called[0]) def test_unauthorized_requests_when_account_not_found(self): # verify unauthorized container requests always return response @@ -8009,7 +7484,7 @@ class TestContainerController(unittest.TestCase): headers={'Origin': 'http://foo.com', 'Access-Control-Request-Method': 'GET'}) resp = controller.OPTIONS(req) - self.assertEquals(401, resp.status_int) + self.assertEqual(401, resp.status_int) def my_empty_origin_container_info(*args): return {'cors': {'allow_origin': None}} @@ -8020,7 +7495,7 @@ class TestContainerController(unittest.TestCase): headers={'Origin': 'http://foo.com', 'Access-Control-Request-Method': 'GET'}) resp = controller.OPTIONS(req) - self.assertEquals(401, resp.status_int) + self.assertEqual(401, resp.status_int) def my_container_info(*args): return { @@ -8037,39 +7512,39 @@ class TestContainerController(unittest.TestCase): 'Access-Control-Request-Method': 'GET'}) req.content_length = 0 resp = controller.OPTIONS(req) - self.assertEquals(200, resp.status_int) - self.assertEquals( + self.assertEqual(200, resp.status_int) + self.assertEqual( 'https://foo.bar', resp.headers['access-control-allow-origin']) for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split(): self.assertTrue( verb in resp.headers['access-control-allow-methods']) - self.assertEquals( + self.assertEqual( len(resp.headers['access-control-allow-methods'].split(', ')), 6) - self.assertEquals('999', resp.headers['access-control-max-age']) + self.assertEqual('999', resp.headers['access-control-max-age']) req = Request.blank( '/v1/a/c', {'REQUEST_METHOD': 'OPTIONS'}, headers={'Origin': 'https://foo.bar'}) req.content_length = 0 resp = controller.OPTIONS(req) - self.assertEquals(401, resp.status_int) + self.assertEqual(401, resp.status_int) req = Request.blank('/v1/a/c', {'REQUEST_METHOD': 'OPTIONS'}) req.content_length = 0 resp = controller.OPTIONS(req) - self.assertEquals(200, resp.status_int) + self.assertEqual(200, resp.status_int) for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split(): self.assertTrue( verb in resp.headers['Allow']) - self.assertEquals(len(resp.headers['Allow'].split(', ')), 6) + self.assertEqual(len(resp.headers['Allow'].split(', ')), 6) req = Request.blank( '/v1/a/c', {'REQUEST_METHOD': 'OPTIONS'}, headers={'Origin': 'http://foo.bar', 'Access-Control-Request-Method': 'GET'}) resp = controller.OPTIONS(req) - self.assertEquals(401, resp.status_int) + self.assertEqual(401, resp.status_int) req = Request.blank( '/v1/a/c', {'REQUEST_METHOD': 'OPTIONS'}, @@ -8077,7 +7552,7 @@ class TestContainerController(unittest.TestCase): 'Access-Control-Request-Method': 'GET'}) controller.app.cors_allow_origin = ['http://foo.bar', ] resp = controller.OPTIONS(req) - self.assertEquals(200, resp.status_int) + self.assertEqual(200, resp.status_int) def my_container_info_wildcard(*args): return { @@ -8094,15 +7569,15 @@ class TestContainerController(unittest.TestCase): 'Access-Control-Request-Method': 'GET'}) req.content_length = 0 resp = controller.OPTIONS(req) - self.assertEquals(200, resp.status_int) - self.assertEquals('*', resp.headers['access-control-allow-origin']) + self.assertEqual(200, resp.status_int) + self.assertEqual('*', resp.headers['access-control-allow-origin']) for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split(): self.assertTrue( verb in resp.headers['access-control-allow-methods']) - self.assertEquals( + self.assertEqual( len(resp.headers['access-control-allow-methods'].split(', ')), 6) - self.assertEquals('999', resp.headers['access-control-max-age']) + self.assertEqual('999', resp.headers['access-control-max-age']) req = Request.blank( '/v1/a/c/o.jpg', @@ -8114,8 +7589,8 @@ class TestContainerController(unittest.TestCase): ) req.content_length = 0 resp = controller.OPTIONS(req) - self.assertEquals(200, resp.status_int) - self.assertEquals( + self.assertEqual(200, resp.status_int) + self.assertEqual( sortHeaderNames('x-foo, x-bar, x-auth-token'), sortHeaderNames(resp.headers['access-control-allow-headers'])) @@ -8144,12 +7619,12 @@ class TestContainerController(unittest.TestCase): resp = cors_validation(containerGET)(controller, req) - self.assertEquals(200, resp.status_int) - self.assertEquals('http://foo.bar', - resp.headers['access-control-allow-origin']) - self.assertEquals('red', resp.headers['x-container-meta-color']) + self.assertEqual(200, resp.status_int) + self.assertEqual('http://foo.bar', + resp.headers['access-control-allow-origin']) + self.assertEqual('red', resp.headers['x-container-meta-color']) # X-Super-Secret is in the response, but not "exposed" - self.assertEquals('hush', resp.headers['x-super-secret']) + self.assertEqual('hush', resp.headers['x-super-secret']) self.assertTrue('access-control-expose-headers' in resp.headers) exposed = set( h.strip() for h in @@ -8158,7 +7633,7 @@ class TestContainerController(unittest.TestCase): 'content-type', 'expires', 'last-modified', 'pragma', 'etag', 'x-timestamp', 'x-trans-id', 'x-container-meta-color']) - self.assertEquals(expected_exposed, exposed) + self.assertEqual(expected_exposed, exposed) def _gather_x_account_headers(self, controller_call, req, *connect_args, **kwargs): @@ -8288,7 +7763,7 @@ class TestContainerController(unittest.TestCase): self.assertEqual(3, len(timestamps)) for timestamp in timestamps: self.assertEqual(timestamp, timestamps[0]) - self.assert_(re.match('[0-9]{10}\.[0-9]{5}', timestamp)) + self.assertTrue(re.match('[0-9]{10}\.[0-9]{5}', timestamp)) def test_DELETE_backed_x_timestamp_header(self): timestamps = [] @@ -8313,7 +7788,7 @@ class TestContainerController(unittest.TestCase): self.assertEqual(3, len(timestamps)) for timestamp in timestamps: self.assertEqual(timestamp, timestamps[0]) - self.assert_(re.match('[0-9]{10}\.[0-9]{5}', timestamp)) + self.assertTrue(re.match('[0-9]{10}\.[0-9]{5}', timestamp)) def test_node_read_timeout_retry_to_container(self): with save_globals(): @@ -8326,7 +7801,7 @@ class TestContainerController(unittest.TestCase): resp.body except ChunkReadTimeout: got_exc = True - self.assert_(got_exc) + self.assertTrue(got_exc) @patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())]) @@ -8345,18 +7820,18 @@ class TestAccountController(unittest.TestCase): req = Request.blank('/v1/a', {}, headers=headers) self.app.update_request(req) res = method(req) - self.assertEquals(res.status_int, expected) + self.assertEqual(res.status_int, expected) if env_expected: - self.assertEquals(res.environ['swift.account/a']['status'], - env_expected) + self.assertEqual(res.environ['swift.account/a']['status'], + env_expected) set_http_connect(*statuses) req = Request.blank('/v1/a/', {}) self.app.update_request(req) res = method(req) - self.assertEquals(res.status_int, expected) + self.assertEqual(res.status_int, expected) if env_expected: - self.assertEquals(res.environ['swift.account/a']['status'], - env_expected) + self.assertEqual(res.environ['swift.account/a']['status'], + env_expected) def test_OPTIONS(self): with save_globals(): @@ -8365,11 +7840,11 @@ class TestAccountController(unittest.TestCase): req = Request.blank('/v1/account', {'REQUEST_METHOD': 'OPTIONS'}) req.content_length = 0 resp = controller.OPTIONS(req) - self.assertEquals(200, resp.status_int) + self.assertEqual(200, resp.status_int) for verb in 'OPTIONS GET POST HEAD'.split(): self.assertTrue( verb in resp.headers['Allow']) - self.assertEquals(len(resp.headers['Allow'].split(', ')), 4) + self.assertEqual(len(resp.headers['Allow'].split(', ')), 4) # Test a CORS OPTIONS request (i.e. including Origin and # Access-Control-Request-Method headers) @@ -8381,22 +7856,22 @@ class TestAccountController(unittest.TestCase): 'Access-Control-Request-Method': 'GET'}) req.content_length = 0 resp = controller.OPTIONS(req) - self.assertEquals(200, resp.status_int) + self.assertEqual(200, resp.status_int) for verb in 'OPTIONS GET POST HEAD'.split(): self.assertTrue( verb in resp.headers['Allow']) - self.assertEquals(len(resp.headers['Allow'].split(', ')), 4) + self.assertEqual(len(resp.headers['Allow'].split(', ')), 4) self.app.allow_account_management = True controller = proxy_server.AccountController(self.app, 'account') req = Request.blank('/v1/account', {'REQUEST_METHOD': 'OPTIONS'}) req.content_length = 0 resp = controller.OPTIONS(req) - self.assertEquals(200, resp.status_int) + self.assertEqual(200, resp.status_int) for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split(): self.assertTrue( verb in resp.headers['Allow']) - self.assertEquals(len(resp.headers['Allow'].split(', ')), 6) + self.assertEqual(len(resp.headers['Allow'].split(', ')), 6) def test_GET(self): with save_globals(): @@ -8492,7 +7967,7 @@ class TestAccountController(unittest.TestCase): self.assert_status_map( controller.POST, (404, 404, 404, 202, 202, 202, 201, 201, 201), 201) - # account_info PUT account POST account + # account_info PUT account POST account self.assert_status_map( controller.POST, (404, 404, 503, 201, 201, 503, 204, 204, 504), 204) @@ -8537,7 +8012,7 @@ class TestAccountController(unittest.TestCase): req = Request.blank('/v1/account', environ={'REQUEST_METHOD': 'HEAD'}) self.app.update_request(req) resp = controller.HEAD(req) - self.assertEquals(resp.status_int, 503) + self.assertEqual(resp.status_int, 503) def test_other_socket_error(self): self.app.account_ring.get_nodes('account') @@ -8548,7 +8023,7 @@ class TestAccountController(unittest.TestCase): req = Request.blank('/v1/account', environ={'REQUEST_METHOD': 'HEAD'}) self.app.update_request(req) resp = controller.HEAD(req) - self.assertEquals(resp.status_int, 503) + self.assertEqual(resp.status_int, 503) def test_response_get_accept_ranges_header(self): with save_globals(): @@ -8557,7 +8032,7 @@ class TestAccountController(unittest.TestCase): req = Request.blank('/v1/a?format=json') self.app.update_request(req) res = controller.GET(req) - self.assert_('accept-ranges' in res.headers) + self.assertTrue('accept-ranges' in res.headers) self.assertEqual(res.headers['accept-ranges'], 'bytes') def test_response_head_accept_ranges_header(self): @@ -8568,7 +8043,7 @@ class TestAccountController(unittest.TestCase): self.app.update_request(req) res = controller.HEAD(req) res.body - self.assert_('accept-ranges' in res.headers) + self.assertTrue('accept-ranges' in res.headers) self.assertEqual(res.headers['accept-ranges'], 'bytes') def test_PUT(self): @@ -8583,7 +8058,7 @@ class TestAccountController(unittest.TestCase): self.app.update_request(req) res = controller.PUT(req) expected = str(expected) - self.assertEquals(res.status[:len(expected)], expected) + self.assertEqual(res.status[:len(expected)], expected) test_status_map((201, 201, 201), 405) self.app.allow_account_management = True test_status_map((201, 201, 201), 201) @@ -8654,7 +8129,7 @@ class TestAccountController(unittest.TestCase): headers={test_header: test_value}) self.app.update_request(req) getattr(controller, method)(req) - self.assertEquals(test_errors, []) + self.assertEqual(test_errors, []) def test_PUT_bad_metadata(self): self.bad_metadata_helper('PUT') @@ -8670,7 +8145,7 @@ class TestAccountController(unittest.TestCase): req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method}) self.app.update_request(req) resp = getattr(controller, method)(req) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) set_http_connect(201, 201, 201) req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method}, @@ -8678,7 +8153,7 @@ class TestAccountController(unittest.TestCase): ('a' * constraints.MAX_META_NAME_LENGTH): 'v'}) self.app.update_request(req) resp = getattr(controller, method)(req) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) set_http_connect(201, 201, 201) req = Request.blank( '/v1/a/c', environ={'REQUEST_METHOD': method}, @@ -8686,7 +8161,7 @@ class TestAccountController(unittest.TestCase): ('a' * (constraints.MAX_META_NAME_LENGTH + 1)): 'v'}) self.app.update_request(req) resp = getattr(controller, method)(req) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) set_http_connect(201, 201, 201) req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method}, @@ -8694,14 +8169,14 @@ class TestAccountController(unittest.TestCase): 'a' * constraints.MAX_META_VALUE_LENGTH}) self.app.update_request(req) resp = getattr(controller, method)(req) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) set_http_connect(201, 201, 201) req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method}, headers={'X-Account-Meta-Too-Long': 'a' * (constraints.MAX_META_VALUE_LENGTH + 1)}) self.app.update_request(req) resp = getattr(controller, method)(req) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) set_http_connect(201, 201, 201) headers = {} @@ -8711,7 +8186,7 @@ class TestAccountController(unittest.TestCase): headers=headers) self.app.update_request(req) resp = getattr(controller, method)(req) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) set_http_connect(201, 201, 201) headers = {} for x in range(constraints.MAX_META_COUNT + 1): @@ -8720,7 +8195,7 @@ class TestAccountController(unittest.TestCase): headers=headers) self.app.update_request(req) resp = getattr(controller, method)(req) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) set_http_connect(201, 201, 201) headers = {} @@ -8739,7 +8214,7 @@ class TestAccountController(unittest.TestCase): headers=headers) self.app.update_request(req) resp = getattr(controller, method)(req) - self.assertEquals(resp.status_int, 201) + self.assertEqual(resp.status_int, 201) set_http_connect(201, 201, 201) headers['X-Account-Meta-a'] = \ 'a' * (constraints.MAX_META_OVERALL_SIZE - size) @@ -8747,7 +8222,7 @@ class TestAccountController(unittest.TestCase): headers=headers) self.app.update_request(req) resp = getattr(controller, method)(req) - self.assertEquals(resp.status_int, 400) + self.assertEqual(resp.status_int, 400) def test_DELETE(self): with save_globals(): @@ -8761,7 +8236,7 @@ class TestAccountController(unittest.TestCase): self.app.update_request(req) res = controller.DELETE(req) expected = str(expected) - self.assertEquals(res.status[:len(expected)], expected) + self.assertEqual(res.status[:len(expected)], expected) test_status_map((201, 201, 201), 405) self.app.allow_account_management = True test_status_map((201, 201, 201), 201) @@ -8785,7 +8260,7 @@ class TestAccountController(unittest.TestCase): self.app.update_request(req) res = controller.DELETE(req) expected = str(expected) - self.assertEquals(res.status[:len(expected)], expected) + self.assertEqual(res.status[:len(expected)], expected) test_status_map((201, 201, 201), 400) self.app.allow_account_management = True test_status_map((201, 201, 201), 400) @@ -8905,7 +8380,7 @@ class TestAccountControllerFakeGetResponse(unittest.TestCase): # Not a swift_owner -- ACLs should NOT be in response header = 'X-Account-Access-Control' - self.assert_(header not in resp.headers, '%r was in %r' % ( + self.assertTrue(header not in resp.headers, '%r was in %r' % ( header, resp.headers)) # Same setup -- mock acct server will provide ACLs @@ -8915,7 +8390,7 @@ class TestAccountControllerFakeGetResponse(unittest.TestCase): resp = app.handle_request(req) # For a swift_owner, the ACLs *should* be in response - self.assert_(header in resp.headers, '%r not in %r' % ( + self.assertTrue(header in resp.headers, '%r not in %r' % ( header, resp.headers)) def test_account_acls_through_delegation(self): @@ -9079,7 +8554,7 @@ class TestProxyObjectPerformance(unittest.TestCase): # Small, fast for testing obj_len = 2 * 64 * 1024 # Use 1 GB or more for measurements - #obj_len = 2 * 512 * 1024 * 1024 + # obj_len = 2 * 512 * 1024 * 1024 self.path = '/v1/a/c/o.large' fd.write('PUT %s HTTP/1.1\r\n' 'Host: localhost\r\n' @@ -9122,7 +8597,7 @@ class TestProxyObjectPerformance(unittest.TestCase): self.assertEqual(total, self.obj_len) end = time.time() - print "Run %02d took %07.03f" % (i, end - start) + print("Run %02d took %07.03f" % (i, end - start)) @patch_policies([StoragePolicy(0, 'migrated', object_ring=FakeRing()), @@ -9181,6 +8656,465 @@ class TestSwiftInfo(unittest.TestCase): self.assertEqual(sorted_pols[2]['name'], 'migrated') +class TestSocketObjectVersions(unittest.TestCase): + + def setUp(self): + global _test_sockets + self.prolis = prolis = listen(('localhost', 0)) + self._orig_prolis = _test_sockets[0] + allowed_headers = ', '.join([ + 'content-encoding', + 'x-object-manifest', + 'content-disposition', + 'foo' + ]) + conf = {'devices': _testdir, 'swift_dir': _testdir, + 'mount_check': 'false', 'allowed_headers': allowed_headers} + prosrv = versioned_writes.VersionedWritesMiddleware( + proxy_logging.ProxyLoggingMiddleware( + _test_servers[0], conf, + logger=_test_servers[0].logger), + {}) + self.coro = spawn(wsgi.server, prolis, prosrv, NullLogger()) + # replace global prosrv with one that's filtered with version + # middleware + self.sockets = list(_test_sockets) + self.sockets[0] = prolis + _test_sockets = tuple(self.sockets) + + def tearDown(self): + self.coro.kill() + # put the global state back + global _test_sockets + self.sockets[0] = self._orig_prolis + _test_sockets = tuple(self.sockets) + + def test_version_manifest(self, oc='versions', vc='vers', o='name'): + versions_to_create = 3 + # Create a container for our versioned object testing + (prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, + obj2lis, obj3lis) = _test_sockets + pre = quote('%03x' % len(o)) + osub = '%s/sub' % o + presub = quote('%03x' % len(osub)) + osub = quote(osub) + presub = quote(presub) + oc = quote(oc) + vc = quote(vc) + + def put_container(): + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('PUT /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nX-Storage-Token: t\r\n' + 'Content-Length: 0\r\nX-Versions-Location: %s\r\n\r\n' + % (oc, vc)) + fd.flush() + headers = readuntil2crlfs(fd) + fd.read() + return headers + + headers = put_container() + exp = 'HTTP/1.1 201' + self.assertEqual(headers[:len(exp)], exp) + + def get_container(): + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('GET /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\n' + 'X-Storage-Token: t\r\n\r\n\r\n' % oc) + fd.flush() + headers = readuntil2crlfs(fd) + body = fd.read() + return headers, body + + # check that the header was set + headers, body = get_container() + exp = 'HTTP/1.1 2' # 2xx series response + self.assertEqual(headers[:len(exp)], exp) + self.assertIn('X-Versions-Location: %s' % vc, headers) + + def put_version_container(): + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('PUT /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nX-Storage-Token: t\r\n' + 'Content-Length: 0\r\n\r\n' % vc) + fd.flush() + headers = readuntil2crlfs(fd) + fd.read() + return headers + + # make the container for the object versions + headers = put_version_container() + exp = 'HTTP/1.1 201' + self.assertEqual(headers[:len(exp)], exp) + + def put(version): + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Storage-Token: ' + 't\r\nContent-Length: 5\r\nContent-Type: text/jibberish%s' + '\r\n\r\n%05d\r\n' % (oc, o, version, version)) + fd.flush() + headers = readuntil2crlfs(fd) + fd.read() + return headers + + def get(container=oc, obj=o): + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('GET /v1/a/%s/%s HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n' + '\r\n' % (container, obj)) + fd.flush() + headers = readuntil2crlfs(fd) + body = fd.read() + return headers, body + + # Create the versioned file + headers = put(0) + exp = 'HTTP/1.1 201' + self.assertEqual(headers[:len(exp)], exp) + + # Create the object versions + for version in range(1, versions_to_create): + sleep(.01) # guarantee that the timestamp changes + headers = put(version) + exp = 'HTTP/1.1 201' + self.assertEqual(headers[:len(exp)], exp) + + # Ensure retrieving the manifest file gets the latest version + headers, body = get() + exp = 'HTTP/1.1 200' + self.assertEqual(headers[:len(exp)], exp) + self.assertIn('Content-Type: text/jibberish%s' % version, headers) + self.assertNotIn('X-Object-Meta-Foo: barbaz', headers) + self.assertEqual(body, '%05d' % version) + + def get_version_container(): + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('GET /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\n' + 'X-Storage-Token: t\r\n\r\n' % vc) + fd.flush() + headers = readuntil2crlfs(fd) + body = fd.read() + return headers, body + + # Ensure we have the right number of versions saved + headers, body = get_version_container() + exp = 'HTTP/1.1 200' + self.assertEqual(headers[:len(exp)], exp) + versions = [x for x in body.split('\n') if x] + self.assertEqual(len(versions), versions_to_create - 1) + + def delete(): + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('DELETE /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r' + '\nConnection: close\r\nX-Storage-Token: t\r\n\r\n' + % (oc, o)) + fd.flush() + headers = readuntil2crlfs(fd) + fd.read() + return headers + + def copy(): + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('COPY /v1/a/%s/%s HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Auth-Token: ' + 't\r\nDestination: %s/copied_name\r\n' + 'Content-Length: 0\r\n\r\n' % (oc, o, oc)) + fd.flush() + headers = readuntil2crlfs(fd) + fd.read() + return headers + + # copy a version and make sure the version info is stripped + headers = copy() + exp = 'HTTP/1.1 2' # 2xx series response to the COPY + self.assertEqual(headers[:len(exp)], exp) + + def get_copy(): + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('GET /v1/a/%s/copied_name HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\n' + 'X-Auth-Token: t\r\n\r\n' % oc) + fd.flush() + headers = readuntil2crlfs(fd) + body = fd.read() + return headers, body + + headers, body = get_copy() + exp = 'HTTP/1.1 200' + self.assertEqual(headers[:len(exp)], exp) + self.assertEqual(body, '%05d' % version) + + def post(): + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('POST /v1/a/%s/%s HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Auth-Token: ' + 't\r\nContent-Type: foo/bar\r\nContent-Length: 0\r\n' + 'X-Object-Meta-Bar: foo\r\n\r\n' % (oc, o)) + fd.flush() + headers = readuntil2crlfs(fd) + fd.read() + return headers + + # post and make sure it's updated + headers = post() + exp = 'HTTP/1.1 2' # 2xx series response to the POST + self.assertEqual(headers[:len(exp)], exp) + + headers, body = get() + self.assertIn('Content-Type: foo/bar', headers) + self.assertIn('X-Object-Meta-Bar: foo', headers) + self.assertEqual(body, '%05d' % version) + + # check container listing + headers, body = get_container() + exp = 'HTTP/1.1 200' + self.assertEqual(headers[:len(exp)], exp) + + # Delete the object versions + for segment in range(versions_to_create - 1, 0, -1): + + headers = delete() + exp = 'HTTP/1.1 2' # 2xx series response + self.assertEqual(headers[:len(exp)], exp) + + # Ensure retrieving the manifest file gets the latest version + headers, body = get() + exp = 'HTTP/1.1 200' + self.assertEqual(headers[:len(exp)], exp) + self.assertIn('Content-Type: text/jibberish%s' % (segment - 1), + headers) + self.assertEqual(body, '%05d' % (segment - 1)) + # Ensure we have the right number of versions saved + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('GET /v1/a/%s?prefix=%s%s/ HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r' + '\n' % (vc, pre, o)) + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 2' # 2xx series response + self.assertEqual(headers[:len(exp)], exp) + body = fd.read() + versions = [x for x in body.split('\n') if x] + self.assertEqual(len(versions), segment - 1) + + # there is now one version left (in the manifest) + # Ensure we have no saved versions + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('GET /v1/a/%s?prefix=%s%s/ HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r\n' + % (vc, pre, o)) + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 204 No Content' + self.assertEqual(headers[:len(exp)], exp) + + # delete the last version + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('DELETE /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nX-Storage-Token: t\r\n\r\n' % (oc, o)) + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 2' # 2xx series response + self.assertEqual(headers[:len(exp)], exp) + + # Ensure it's all gone + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('GET /v1/a/%s/%s HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r\n' + % (oc, o)) + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 404' + self.assertEqual(headers[:len(exp)], exp) + + # make sure manifest files will be ignored + for _junk in range(1, versions_to_create): + sleep(.01) # guarantee that the timestamp changes + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Storage-Token: ' + 't\r\nContent-Length: 0\r\n' + 'Content-Type: text/jibberish0\r\n' + 'Foo: barbaz\r\nX-Object-Manifest: %s/%s/\r\n\r\n' + % (oc, o, oc, o)) + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 201' + self.assertEqual(headers[:len(exp)], exp) + + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('GET /v1/a/%s?prefix=%s%s/ HTTP/1.1\r\nhost: ' + 'localhost\r\nconnection: close\r\nx-auth-token: t\r\n\r\n' + % (vc, pre, o)) + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 204 No Content' + self.assertEqual(headers[:len(exp)], exp) + + # DELETE v1/a/c/obj shouldn't delete v1/a/c/obj/sub versions + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Storage-Token: ' + 't\r\nContent-Length: 5\r\nContent-Type: text/jibberish0\r\n' + 'Foo: barbaz\r\n\r\n00000\r\n' % (oc, o)) + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 201' + self.assertEqual(headers[:len(exp)], exp) + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Storage-Token: ' + 't\r\nContent-Length: 5\r\nContent-Type: text/jibberish0\r\n' + 'Foo: barbaz\r\n\r\n00001\r\n' % (oc, o)) + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 201' + self.assertEqual(headers[:len(exp)], exp) + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Storage-Token: ' + 't\r\nContent-Length: 4\r\nContent-Type: text/jibberish0\r\n' + 'Foo: barbaz\r\n\r\nsub1\r\n' % (oc, osub)) + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 201' + self.assertEqual(headers[:len(exp)], exp) + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Storage-Token: ' + 't\r\nContent-Length: 4\r\nContent-Type: text/jibberish0\r\n' + 'Foo: barbaz\r\n\r\nsub2\r\n' % (oc, osub)) + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 201' + self.assertEqual(headers[:len(exp)], exp) + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('DELETE /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nX-Storage-Token: t\r\n\r\n' % (oc, o)) + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 2' # 2xx series response + self.assertEqual(headers[:len(exp)], exp) + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('GET /v1/a/%s?prefix=%s%s/ HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r\n' + % (vc, presub, osub)) + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 2' # 2xx series response + self.assertEqual(headers[:len(exp)], exp) + body = fd.read() + versions = [x for x in body.split('\n') if x] + self.assertEqual(len(versions), 1) + + # Check for when the versions target container doesn't exist + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('PUT /v1/a/%swhoops HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nX-Storage-Token: t\r\n' + 'Content-Length: 0\r\nX-Versions-Location: none\r\n\r\n' % oc) + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 201' + self.assertEqual(headers[:len(exp)], exp) + # Create the versioned file + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('PUT /v1/a/%swhoops/foo HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Storage-Token: ' + 't\r\nContent-Length: 5\r\n\r\n00000\r\n' % oc) + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 201' + self.assertEqual(headers[:len(exp)], exp) + # Create another version + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('PUT /v1/a/%swhoops/foo HTTP/1.1\r\nHost: ' + 'localhost\r\nConnection: close\r\nX-Storage-Token: ' + 't\r\nContent-Length: 5\r\n\r\n00001\r\n' % oc) + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 412' + self.assertEqual(headers[:len(exp)], exp) + # Delete the object + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('DELETE /v1/a/%swhoops/foo HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nX-Storage-Token: t\r\n\r\n' % oc) + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 2' # 2xx response + self.assertEqual(headers[:len(exp)], exp) + + def test_version_manifest_utf8(self): + oc = '0_oc_non_ascii\xc2\xa3' + vc = '0_vc_non_ascii\xc2\xa3' + o = '0_o_non_ascii\xc2\xa3' + self.test_version_manifest(oc, vc, o) + + def test_version_manifest_utf8_container(self): + oc = '1_oc_non_ascii\xc2\xa3' + vc = '1_vc_ascii' + o = '1_o_ascii' + self.test_version_manifest(oc, vc, o) + + def test_version_manifest_utf8_version_container(self): + oc = '2_oc_ascii' + vc = '2_vc_non_ascii\xc2\xa3' + o = '2_o_ascii' + self.test_version_manifest(oc, vc, o) + + def test_version_manifest_utf8_containers(self): + oc = '3_oc_non_ascii\xc2\xa3' + vc = '3_vc_non_ascii\xc2\xa3' + o = '3_o_ascii' + self.test_version_manifest(oc, vc, o) + + def test_version_manifest_utf8_object(self): + oc = '4_oc_ascii' + vc = '4_vc_ascii' + o = '4_o_non_ascii\xc2\xa3' + self.test_version_manifest(oc, vc, o) + + def test_version_manifest_utf8_version_container_utf_object(self): + oc = '5_oc_ascii' + vc = '5_vc_non_ascii\xc2\xa3' + o = '5_o_non_ascii\xc2\xa3' + self.test_version_manifest(oc, vc, o) + + def test_version_manifest_utf8_container_utf_object(self): + oc = '6_oc_non_ascii\xc2\xa3' + vc = '6_vc_ascii' + o = '6_o_non_ascii\xc2\xa3' + self.test_version_manifest(oc, vc, o) + + if __name__ == '__main__': setup() try: diff --git a/test/unit/test_locale/test_locale.py b/test/unit/test_locale/test_locale.py index a0804ed0eb..fcaca52ef5 100644 --- a/test/unit/test_locale/test_locale.py +++ b/test/unit/test_locale/test_locale.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -#-*- coding:utf-8 -*- +# coding: utf-8 # Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,9 +15,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import print_function import os import unittest -import string import sys import threading @@ -67,12 +67,12 @@ class TestTranslations(unittest.TestCase): def test_translations(self): path = ':'.join(sys.path) translated_message = check_output(['python', __file__, path]) - self.assertEquals(translated_message, 'prova mesaĝo\n') + self.assertEqual(translated_message, 'prova mesaĝo\n') if __name__ == "__main__": os.environ['LC_ALL'] = 'eo' os.environ['SWIFT_LOCALEDIR'] = os.path.dirname(__file__) - sys.path = string.split(sys.argv[1], ':') + sys.path = sys.argv[1].split(':') from swift import gettext_ as _ - print _('test message') + print(_('test message')) diff --git a/tox.ini b/tox.ini index 8b7061a026..e16056cc66 100644 --- a/tox.ini +++ b/tox.ini @@ -27,7 +27,7 @@ downloadcache = ~/cache/pip [testenv:pep8] commands = - flake8 swift test doc setup.py + flake8 {posargs:swift test doc setup.py} flake8 --filename=swift* bin [testenv:py3pep8] @@ -49,15 +49,25 @@ commands = {posargs} [testenv:docs] commands = python setup.py build_sphinx +[testenv:bandit] +deps = -r{toxinidir}/test-requirements.txt +commands = bandit -c bandit.yaml -r swift bin -n 5 -p gate + [flake8] -# it's not a bug that we aren't using all of hacking -# H102 -> apache2 license exists -# H103 -> license is apache -# H201 -> no bare excepts (unless marked with " # noqa") -# H231 -> Check for except statements to be Python 3.x compatible -# H501 -> don't use locals() for str formatting -# H903 -> \n not \r\n -ignore = H -select = F,E,W,H102,H103,H201,H231,H501,H903 +# it's not a bug that we aren't using all of hacking, ignore: +# F812: list comprehension redefines ... +# H101: Use TODO(NAME) +# H202: assertRaises Exception too broad +# H233: Python 3.x incompatible use of print operator +# H234: assertEquals is deprecated, use assertEqual +# H301: one import per line +# H306: imports not in alphabetical order (time, os) +# H401: docstring should not start with a space +# H403: multi line docstrings should end on a new line +# H404: multi line docstring should start without a leading new line +# H405: multi line docstring summary not separated with an empty line +# H501: Do not use self.__dict__ for string formatting +# H703: Multiple positional placeholders +ignore = F812,H101,H202,H233,H234,H301,H306,H401,H403,H404,H405,H501,H703 exclude = .venv,.tox,dist,doc,*egg show-source = True